diff --git a/.codespellrc b/.codespellrc deleted file mode 100644 index 771985af191..00000000000 --- a/.codespellrc +++ /dev/null @@ -1,3 +0,0 @@ -[codespell] -skip = .git,target,./crates/storage/libmdbx-rs/mdbx-sys/libmdbx,Cargo.toml,Cargo.lock -ignore-words-list = crate,ser,ratatui diff --git a/.config/nextest.toml b/.config/nextest.toml index e107857a351..94d55bf0311 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -5,3 +5,9 @@ slow-timeout = { period = "30s", terminate-after = 4 } [[profile.default.overrides]] filter = "test(general_state_tests)" slow-timeout = { period = "1m", terminate-after = 10 } + +# E2E tests using the testsuite framework from crates/e2e-test-utils +# These tests are located in tests/e2e-testsuite/ directories across various crates +[[profile.default.overrides]] +filter = "binary(e2e_testsuite)" +slow-timeout = { period = "2m", terminate-after = 3 } diff --git a/.gitattributes b/.gitattributes index 52ee28d3ba9..17286acb516 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,3 +2,5 @@ book/cli/**/*.md linguist-vendored book/cli/cli.md -linguist-vendored crates/storage/libmdbx-rs/mdbx-sys/libmdbx/** linguist-vendored + +bun.lock linguist-language=JSON-with-Comments diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 1a94c0d4c33..8f32c00f17d 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -48,6 +48,8 @@ exclude_crates=( reth-rpc-api reth-rpc-api-testing-util reth-rpc-builder + reth-rpc-convert + reth-rpc-e2e-tests reth-rpc-engine-api reth-rpc-eth-api reth-rpc-eth-types @@ -58,7 +60,7 @@ exclude_crates=( reth-ress-provider # The following are not supposed to be working reth # all of the crates below - reth-alloy-provider + reth-storage-rpc-provider reth-invalid-block-hooks # reth-provider reth-libmdbx # mdbx reth-mdbx-sys # mdbx @@ -76,6 +78,7 @@ exclude_crates=( reth-era-downloader # tokio reth-era-utils # tokio reth-tracing-otlp + reth-node-ethstats reth-scroll-cli # tokio reth-scroll-node # tokio reth-scroll # tokio diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index f155a3478c6..a4dd3376efd 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -6,7 +6,6 @@ rpc-compat: - debug_getRawReceipts/get-block-n (reth) - debug_getRawTransaction/get-invalid-hash (reth) - - eth_call/call-callenv (reth) - eth_getStorageAt/get-storage-invalid-key-too-large (reth) - eth_getStorageAt/get-storage-invalid-key (reth) - eth_getTransactionReceipt/get-access-list (reth) @@ -60,7 +59,6 @@ engine-auth: # worth re-visiting when more of these related tests are passing eest/consume-engine: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth - - tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py::test_system_contract_errors[fork_Prague-blockchain_test_engine-system_contract_reaches_gas_limit-system_contract_0x0000bbddc7ce488642fb579f8b00f3a590007251]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-nonzero_balance]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-zero_balance]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_amount_offset-value_zero]-reth @@ -68,7 +66,6 @@ eest/consume-engine: - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_index_offset-value_zero]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_index_size-value_zero]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_pubkey_offset-value_zero]-reth - - tests/prague/eip7002_el_triggerable_withdrawals/test_modified_withdrawal_contract.py::test_system_contract_errors[fork_Prague-blockchain_test_engine-system_contract_reaches_gas_limit-system_contract_0x00000961ef480eb55e80d19ad83579a64c007002]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_pubkey_size-value_zero]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_signature_offset-value_zero]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_signature_size-value_zero]-reth @@ -102,9 +99,7 @@ eest/consume-rlp: - tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-zero_balance]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_log_length[fork_Prague-blockchain_test_engine-slice_bytes_False]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_log_length[fork_Prague-blockchain_test_engine-slice_bytes_True]-reth - - tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py::test_system_contract_errors[fork_Prague-blockchain_test-system_contract_reaches_gas_limit-system_contract_0x0000bbddc7ce488642fb579f8b00f3a590007251]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-nonzero_balance]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-zero_balance]-reth - - tests/prague/eip7002_el_triggerable_withdrawals/test_modified_withdrawal_contract.py::test_system_contract_errors[fork_Prague-blockchain_test-system_contract_reaches_gas_limit-system_contract_0x00000961ef480eb55e80d19ad83579a64c007002]-reth - tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-nonzero_balance]-reth - tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-zero_balance]-reth diff --git a/.github/assets/hive/ignored_tests.yaml b/.github/assets/hive/ignored_tests.yaml new file mode 100644 index 00000000000..43021de8420 --- /dev/null +++ b/.github/assets/hive/ignored_tests.yaml @@ -0,0 +1,17 @@ +# Ignored Tests Configuration +# +# This file contains tests that should be ignored for various reasons (flaky, known issues, etc). +# These tests will be IGNORED in the CI results - they won't cause the build to fail +# regardless of whether they pass or fail. +# +# Format +# test_suite: +# - "test name 1" +# - "test name 2" +# +# When a test should no longer be ignored, remove it from this list. + +engine-withdrawals: + # flaky + - Withdrawals Fork on Block 1 - 8 Block Re-Org NewPayload (Paris) (reth) + diff --git a/.github/assets/hive/parse.py b/.github/assets/hive/parse.py index c408a4d1336..11a30ae095b 100644 --- a/.github/assets/hive/parse.py +++ b/.github/assets/hive/parse.py @@ -7,6 +7,7 @@ parser = argparse.ArgumentParser(description="Check for unexpected test results based on an exclusion list.") parser.add_argument("report_json", help="Path to the hive report JSON file.") parser.add_argument("--exclusion", required=True, help="Path to the exclusion YAML file.") +parser.add_argument("--ignored", required=True, help="Path to the ignored tests YAML file.") args = parser.parse_args() # Load hive JSON @@ -18,13 +19,30 @@ exclusion_data = yaml.safe_load(file) exclusions = exclusion_data.get(report['name'], []) +# Load ignored tests YAML +with open(args.ignored, 'r') as file: + ignored_data = yaml.safe_load(file) + ignored_tests = ignored_data.get(report['name'], []) + # Collect unexpected failures and passes unexpected_failures = [] unexpected_passes = [] +ignored_results = {'passed': [], 'failed': []} for test in report['testCases'].values(): test_name = test['name'] test_pass = test['summaryResult']['pass'] + + # Check if this is an ignored test + if test_name in ignored_tests: + # Track ignored test results for informational purposes + if test_pass: + ignored_results['passed'].append(test_name) + else: + ignored_results['failed'].append(test_name) + continue # Skip this test - don't count it as unexpected + + # Check against expected failures if test_name in exclusions: if test_pass: unexpected_passes.append(test_name) @@ -32,6 +50,19 @@ if not test_pass: unexpected_failures.append(test_name) +# Print summary of ignored tests if any were ignored +if ignored_results['passed'] or ignored_results['failed']: + print("Ignored Tests:") + if ignored_results['passed']: + print(f" Passed ({len(ignored_results['passed'])} tests):") + for test in ignored_results['passed']: + print(f" {test}") + if ignored_results['failed']: + print(f" Failed ({len(ignored_results['failed'])} tests):") + for test in ignored_results['failed']: + print(f" {test}") + print() + # Check if there are any unexpected failures or passes and exit with error if unexpected_failures or unexpected_passes: if unexpected_failures: diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml new file mode 100644 index 00000000000..53199542c9c --- /dev/null +++ b/.github/workflows/e2e.yml @@ -0,0 +1,46 @@ +# Runs e2e tests using the testsuite framework + +name: e2e + +on: + pull_request: + merge_group: + push: + branches: [main] + +env: + CARGO_TERM_COLOR: always + SEED: rustethereumethereumrust + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + test: + name: e2e-testsuite + runs-on: ubuntu-latest + env: + RUST_BACKTRACE: 1 + RUST_MIN_STACK: 4194304 + timeout-minutes: 90 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: taiki-e/install-action@nextest + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Run e2e tests + run: | + cargo nextest run \ + --locked --features "asm-keccak" \ + --workspace \ + --exclude 'example-*' \ + --exclude 'exex-subscription' \ + --exclude 'reth-bench' \ + --exclude 'ef-tests' \ + --exclude 'op-reth' \ + --exclude 'reth' \ + -E 'binary(e2e_testsuite)' + diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index af8c5d5c2a1..a833a96e622 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -31,8 +31,6 @@ jobs: uses: actions/checkout@v4 with: repository: ethereum/hive - # TODO: unpin when https://github.com/ethereum/hive/issues/1306 is fixed - ref: edd9969338dd1798ba2e61f049c7e3a15cef53e6 path: hivetests - uses: actions/setup-go@v5 @@ -200,7 +198,7 @@ jobs: - name: Parse hive output run: | - find hivetests/workspace/logs -type f -name "*.json" ! -name "hive.json" | xargs -I {} python .github/assets/hive/parse.py {} --exclusion .github/assets/hive/expected_failures.yaml + find hivetests/workspace/logs -type f -name "*.json" ! -name "hive.json" | xargs -I {} python .github/assets/hive/parse.py {} --exclusion .github/assets/hive/expected_failures.yaml --ignored .github/assets/hive/ignored_tests.yaml - name: Print simulator output if: ${{ failure() }} diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index b94701c8592..2f21680a470 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -42,7 +42,7 @@ jobs: cargo nextest run \ --locked --features "asm-keccak ${{ matrix.network }}" \ --workspace --exclude ef-tests \ - -E "kind(test)" + -E "kind(test) and not binary(e2e_testsuite)" - if: matrix.network == 'optimism' name: Run tests run: | diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 961a435ea70..b7ad4160b0c 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -192,14 +192,12 @@ jobs: - name: Check docs changes run: git diff --exit-code - codespell: + typos: runs-on: ubuntu-latest timeout-minutes: 30 steps: - uses: actions/checkout@v4 - - uses: codespell-project/actions-codespell@v2 - with: - skip: "*.json" + - uses: crate-ci/typos@v1 check-toml: runs-on: ubuntu-latest @@ -297,7 +295,7 @@ jobs: - fmt - udeps - book - - codespell + - typos - grafana - no-test-deps - features diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 87c85561478..8d3cfc950cc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,8 +20,8 @@ env: OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth REPRODUCIBLE_IMAGE_NAME: ${{ github.repository_owner }}/reth-reproducible CARGO_TERM_COLOR: always - DOCKER_IMAGE_NAME_URL: ghcr.io/${{ github.repository_owner }}/reth - DOCKER_OP_IMAGE_NAME_URL: ghcr.io/${{ github.repository_owner }}/op-reth + DOCKER_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/reth + DOCKER_OP_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/op-reth jobs: dry-run: diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index c7afa2eab09..cb317b7ed86 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -67,7 +67,7 @@ jobs: ${{ matrix.args }} --workspace \ --exclude ef-tests --no-tests=warn \ --partition hash:${{ matrix.partition }}/${{ matrix.total_partitions }} \ - -E "!kind(test)" + -E "!kind(test) and not binary(e2e_testsuite)" state: name: Ethereum state tests diff --git a/.gitignore b/.gitignore index a057ce1c628..54821e04d07 100644 --- a/.gitignore +++ b/.gitignore @@ -60,3 +60,12 @@ docs/vocs/node_modules # Cargo chef recipe file recipe.json + +_ +# broken links report +links-report.json + +# Python cache +__pycache__/ +*.py[cod] +*$py.class diff --git a/Cargo.lock b/Cargo.lock index 8198c5e287e..042209bfcf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aead" @@ -97,9 +97,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.1" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5848366a4f08dca1caca0a6151294a4799fe2e59ba25df100491d92e0b921b1c" +checksum = "4195a29a4b87137b2bb02105e746102873bc03561805cf45c0e510c961f160e6" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -107,14 +107,14 @@ dependencies = [ "num_enum", "proptest", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] name = "alloy-consensus" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b77018eec2154eb158869f9f2914a3ea577adf87b11be2764d4795d5ccccf7" +checksum = "1b6093bc69509849435a2d68237a2e9fea79d27390c8e62f1e4012c460aabad8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -138,9 +138,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.7" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31b286aeef04a32720c10defd21c3aa6c626154ac442b55f6d472caeb1c6741" +checksum = "8d1cfed4fefd13b5620cb81cdb6ba397866ff0de514c1b24806e6e79cdff5570" dependencies = [ "alloy-consensus", "alloy-eips", @@ -153,9 +153,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "049ed4836d368929d7c5e206bab2e8d92f00524222edc0026c6bf2a3cb8a02d5" +checksum = "f28074a21cd4f7c3a7ab218c4f38fae6be73944e1feae3b670c68b60bf85ca40" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -175,9 +175,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b95b3deca680efc7e9cba781f1a1db352fa1ea50e6384a514944dcf4419e652" +checksum = "d9e8a436f0aad7df8bb47f144095fba61202265d9f5f09a70b0e3227881a668e" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d134f3ac4926124eaf521a1031d11ea98816df3d39fc446fcfd6b36884603f" +checksum = "5937e2d544e9b71000942d875cbc57965b32859a666ea543cc57aae5a06d602d" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -259,9 +259,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.12.3" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff5aae4c6dc600734b206b175f3200085ee82dcdaa388760358830a984ca9869" +checksum = "b2a3c4a8d217f8ac0d0e5f890979646037d59a85fd3fc8f5b03d2f7a59b8d134" dependencies = [ "alloy-consensus", "alloy-eips", @@ -279,22 +279,23 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1c2792605e648bdd1fddcfed8ce0d39d3db495c71d2240cb53df8aee8aea1f" +checksum = "c51b4c13e02a8104170a4de02ccf006d7c233e6c10ab290ee16e7041e6ac221d" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-serde", "alloy-trie", "serde", + "serde_with", ] [[package]] name = "alloy-hardforks" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ce138b29a2f8e7ed97c064af8359dfa6559c12cba5e821ae4eb93081a56557e" +checksum = "819a3620fe125e0fff365363315ee5e24c23169173b19747dfd6deba33db8990" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -306,9 +307,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15516116086325c157c18261d768a20677f0f699348000ed391d4ad0dcb82530" +checksum = "459f98c6843f208856f338bfb25e65325467f7aff35dfeb0484d0a76e059134b" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -318,9 +319,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31cfdacfeb6b6b40bf6becf92e69e575c68c9f80311c3961d019e29c0b8d6be2" +checksum = "b590caa6b6d8bc10e6e7a7696c59b1e550e89f27f50d1ee13071150d3a3e3f66" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -333,9 +334,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de68a3f09cd9ab029cf87d08630e1336ca9a530969689fd151d505fa888a2603" +checksum = "36fe5af1fca03277daa56ad4ce5f6d623d3f4c2273ea30b9ee8674d18cefc1fa" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -359,9 +360,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc2689c8addfc43461544d07a6f5f3a3e1f5f4efae61206cb5783dc383cfc8f" +checksum = "793df1e3457573877fbde8872e4906638fde565ee2d3bd16d04aad17d43dbf0e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -372,9 +373,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.12.3" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588a87b77b30452991151667522d2f2f724cec9c2ec6602e4187bc97f66d8095" +checksum = "e0286cb45e87871995815db4ce8bc560ba35f7db4cc084e48a79b355db3342bd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -389,10 +390,11 @@ dependencies = [ [[package]] name = "alloy-op-hardforks" -version = "0.2.3" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08043c9284e597f9b5cf741cc6d906fdb26c195a01d88423c84c00ffda835713" +checksum = "2090f21bb6df43e147d976e754bc9a007ca851badbfc6685377aa679b5f151d9" dependencies = [ + "alloy-chains", "alloy-hardforks", "auto_impl", "serde", @@ -400,9 +402,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6177ed26655d4e84e00b65cb494d4e0b8830e7cae7ef5d63087d445a2600fb55" +checksum = "3cfebde8c581a5d37b678d0a48a32decb51efd7a63a08ce2517ddec26db705c8" dependencies = [ "alloy-rlp", "arbitrary", @@ -413,15 +415,15 @@ dependencies = [ "derive_more", "foldhash", "getrandom 0.3.3", - "hashbrown 0.15.3", - "indexmap 2.9.0", + "hashbrown 0.15.4", + "indexmap 2.10.0", "itoa", "k256", "keccak-asm", "paste", "proptest", "proptest-derive", - "rand 0.9.1", + "rand 0.9.2", "ruint", "rustc-hash 2.1.1", "serde", @@ -431,9 +433,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ced931220f547d30313530ad315654b7862ef52631c90ab857d792865f84a7d" +checksum = "d59879a772ebdcde9dc4eb38b2535d32e8503d3175687cc09e763a625c5fcf32" dependencies = [ "alloy-chains", "alloy-consensus", @@ -444,8 +446,10 @@ dependencies = [ "alloy-primitives", "alloy-pubsub", "alloy-rpc-client", + "alloy-rpc-types-debug", "alloy-rpc-types-engine", "alloy-rpc-types-eth", + "alloy-rpc-types-trace", "alloy-signer", "alloy-sol-types", "alloy-transport", @@ -475,9 +479,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e37d6cf286fd30bacac525ab1491f9d1030d39ecce237821f2a5d5922eb9a37" +checksum = "fbdfb2899b54b7cb0063fa8e61938320f9be6b81b681be69c203abf130a87baa" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -513,14 +517,14 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "alloy-rpc-client" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d1d1eac6e48b772c7290f0f79211a0e822a38b057535b514cc119abd857d5b6" +checksum = "7f060e3bb9f319eb01867a2d6d1ff9e0114e8877f5ca8f5db447724136106cae" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -529,7 +533,6 @@ dependencies = [ "alloy-transport-http", "alloy-transport-ipc", "alloy-transport-ws", - "async-stream", "futures", "pin-project", "reqwest", @@ -539,16 +542,15 @@ dependencies = [ "tokio-stream", "tower", "tracing", - "tracing-futures", "url", "wasmtimer", ] [[package]] name = "alloy-rpc-types" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8589c6ae318fcc9624d42e9166f7f82b630d9ad13e180c52addf20b93a8af266" +checksum = "d47b637369245d2dafef84b223b1ff5ea59e6cd3a98d2d3516e32788a0b216df" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -559,9 +561,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0182187bcbe47f3a737f5eced007b7788d4ed37aba19d43fd3df123169b3b05e" +checksum = "db29bf8f7c961533b017f383122cab6517c8da95712cf832e23c60415d520a58" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -571,9 +573,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "754d5062b594ed300a3bb0df615acb7bacdbd7bd1cd1a6e5b59fb936c5025a13" +checksum = "c0b1f499acb3fc729615147bc113b8b798b17379f19d43058a687edc5792c102" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -583,9 +585,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.7" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67971a228100ac65bd86e90439028853435f21796330ef08f00a70a918a84126" +checksum = "1e26b4dd90b33bd158975307fb9cf5fafa737a0e33cbb772a8648bf8be13c104" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -594,9 +596,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32c1ddf8fb2e41fa49316185d7826ed034f55819e0017e65dc6715f911b8a1ee" +checksum = "9196cbbf4b82a3cc0c471a8e68ccb30102170d930948ac940d2bceadc1b1346b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -612,9 +614,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c81ae89a04859751bac72e5e73459bceb3e6a4d2541f2f1374e35be358fd171" +checksum = "71841e6fc8e221892035a74f7d5b279c0a2bf27a7e1c93e7476c64ce9056624e" dependencies = [ "alloy-primitives", "serde", @@ -622,9 +624,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "662b720c498883427ffb9f5e38c7f02b56ac5c0cdd60b457e88ce6b6a20b9ce9" +checksum = "f2f9cbf5f781b9ee39cfdddea078fdef6015424f4c8282ef0e5416d15ca352c4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -638,14 +640,14 @@ dependencies = [ "jsonwebtoken", "rand 0.8.5", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] name = "alloy-rpc-types-eth" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb082c325bdfd05a7c71f52cd1060e62491fbf6edf55962720bdc380847b0784" +checksum = "46586ec3c278639fc0e129f0eb73dbfa3d57f683c44b2ff5e066fab7ba63fa1f" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -659,14 +661,15 @@ dependencies = [ "itertools 0.14.0", "serde", "serde_json", + "serde_with", "thiserror 2.0.12", ] [[package]] name = "alloy-rpc-types-mev" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c1b50012f55de4a6d58ee9512944089fa61a835e6fe3669844075bb6e0312e" +checksum = "79b6e80b501842c3f5803dd5752ae41b61f43bf6d2e1b8d29999d3312d67a8a5" dependencies = [ "alloy-consensus", "alloy-eips", @@ -679,9 +682,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf52c884c7114c5d1f1f2735634ba0f6579911427281fb02cbd5cb8147723ca" +checksum = "bc9a2184493c374ca1dbba9569d37215c23e489970f8c3994f731cb3ed6b0b7d" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -693,9 +696,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4fd0df1af2ed62d02e7acbc408a162a06f30cb91550c2ec34b11c760cdc0ba" +checksum = "a3aaf142f4f6c0bdd06839c422179bae135024407d731e6f365380f88cd4730e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -705,9 +708,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f26c17270c2ac1bd555c4304fe067639f0ddafdd3c8d07a200b2bb5a326e03" +checksum = "1e1722bc30feef87cc0fa824e43c9013f9639cc6c037be7be28a31361c788be2" dependencies = [ "alloy-primitives", "arbitrary", @@ -717,9 +720,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d9fd649d6ed5b8d7e5014e01758efb937e8407124b182a7f711bf487a1a2697" +checksum = "d3674beb29e68fbbc7be302b611cf35fe07b736e308012a280861df5a2361395" dependencies = [ "alloy-primitives", "async-trait", @@ -732,9 +735,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c288c5b38be486bb84986701608f5d815183de990e884bb747f004622783e125" +checksum = "ad7094c39cd41b03ed642145b0bd37251e31a9cf2ed19e1ce761f089867356a6" dependencies = [ "alloy-consensus", "alloy-network", @@ -750,41 +753,41 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a14f21d053aea4c6630687c2f4ad614bed4c81e14737a9b904798b24f30ea849" +checksum = "aedac07a10d4c2027817a43cc1f038313fc53c7ac866f7363239971fd01f9f18" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "alloy-sol-macro-expander" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d99282e7c9ef14eb62727981a985a01869e586d1dec729d3bb33679094c100" +checksum = "24f9a598f010f048d8b8226492b6401104f5a5c1273c2869b72af29b48bb4ba9" dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.9.0", + "indexmap 2.10.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda029f955b78e493360ee1d7bd11e1ab9f2a220a5715449babc79d6d0a01105" +checksum = "f494adf9d60e49aa6ce26dfd42c7417aa6d4343cf2ae621f20e4d92a5ad07d85" dependencies = [ "const-hex", "dunce", @@ -792,15 +795,15 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10db1bd7baa35bc8d4a1b07efbf734e73e5ba09f2580fb8cee3483a36087ceb2" +checksum = "52db32fbd35a9c0c0e538b58b81ebbae08a51be029e7ad60e08b60481c2ec6c3" dependencies = [ "serde", "winnow", @@ -808,9 +811,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58377025a47d8b8426b3e4846a251f2c1991033b27f517aade368146f6ab1dfe" +checksum = "a285b46e3e0c177887028278f04cc8262b76fd3b8e0e20e93cea0a58c35f5ac5" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -820,9 +823,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b790b89e31e183ae36ac0a1419942e21e94d745066f5281417c3e4299ea39e" +checksum = "f89bec2f59a41c0e259b6fe92f78dfc49862c17d10f938db9c33150d5a7f42b6" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -843,9 +846,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f643645a33a681d09ac1ca2112014c2ca09c68aad301da4400484d59c746bc70" +checksum = "0d3615ec64d775fec840f4e9d5c8e1f739eb1854d8d28db093fb3d4805e0cb53" dependencies = [ "alloy-json-rpc", "alloy-rpc-types-engine", @@ -864,9 +867,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c2d843199d0bdb4cbed8f1b6f2da7f68bcb9c5da7f57e789009e4e7e76d1bec" +checksum = "374db72669d8ee09063b9aa1a316e812d5cdfce7fc9a99a3eceaa0e5512300d2" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -884,9 +887,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d27aae8c7a6403d3d3e874ad2eeeadbf46267b614bac2d4d82786b9b8496464" +checksum = "f5dbaa6851875d59c8803088f4b6ec72eaeddf7667547ae8995c1a19fbca6303" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -922,15 +925,15 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.16" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4ef40a046b9bf141afc440cef596c79292708aade57c450dc74e843270fd8e7" +checksum = "9f916ff6d52f219c44a9684aea764ce2c7e1d53bd4a724c9b127863aeacc30bb" dependencies = [ "alloy-primitives", - "darling", + "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -956,9 +959,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" dependencies = [ "anstyle", "anstyle-parse", @@ -971,33 +974,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.8" +version = "3.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6680de5231bd6ee4c6191b8a1325daa282b415391ec9d3a37bd34f2060dc73fa" +checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" dependencies = [ "anstyle", "once_cell_polyfill", @@ -1021,7 +1024,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1070,7 +1073,7 @@ dependencies = [ "ark-std 0.5.0", "educe", "fnv", - "hashbrown 0.15.3", + "hashbrown 0.15.4", "itertools 0.13.0", "num-bigint", "num-integer", @@ -1163,7 +1166,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1201,7 +1204,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1216,7 +1219,7 @@ dependencies = [ "ark-std 0.5.0", "educe", "fnv", - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] [[package]] @@ -1290,7 +1293,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1363,9 +1366,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.23" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" +checksum = "ddb939d66e4ae03cee6091612804ba446b12878410cfa17f785f4dd67d4014e8" dependencies = [ "brotli", "flate2", @@ -1410,7 +1413,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1421,7 +1424,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1459,14 +1462,14 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "az" @@ -1476,9 +1479,9 @@ checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" [[package]] name = "backon" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd0b50b1b78dbadd44ab18b3c794e496f3a139abb9fbc27d9c94c4eebbb96496" +checksum = "302eaff5357a264a2c42f127ecb8bac761cf99749fc3dc95677e2743991f99e7" dependencies = [ "fastrand 2.3.0", "tokio", @@ -1531,9 +1534,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.7.3" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "bech32" @@ -1591,7 +1594,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1716,7 +1719,7 @@ dependencies = [ "boa_interner", "boa_macros", "boa_string", - "indexmap 2.9.0", + "indexmap 2.10.0", "num-bigint", "rustc-hash 2.1.1", ] @@ -1740,9 +1743,9 @@ dependencies = [ "cfg-if", "dashmap 6.1.0", "fast-float2", - "hashbrown 0.15.3", + "hashbrown 0.15.4", "icu_normalizer 1.5.0", - "indexmap 2.9.0", + "indexmap 2.10.0", "intrusive-collections", "itertools 0.13.0", "num-bigint", @@ -1775,7 +1778,7 @@ dependencies = [ "boa_macros", "boa_profiler", "boa_string", - "hashbrown 0.15.3", + "hashbrown 0.15.4", "thin-vec", ] @@ -1787,8 +1790,8 @@ checksum = "42407a3b724cfaecde8f7d4af566df4b56af32a2f11f0956f5570bb974e7f749" dependencies = [ "boa_gc", "boa_macros", - "hashbrown 0.15.3", - "indexmap 2.9.0", + "hashbrown 0.15.4", + "indexmap 2.10.0", "once_cell", "phf", "rustc-hash 2.1.1", @@ -1803,7 +1806,7 @@ checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "synstructure", ] @@ -1898,9 +1901,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "byte-slice-cast" @@ -1910,28 +1913,28 @@ checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" [[package]] name = "bytecount" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" +checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" [[package]] name = "bytemuck" -version = "1.23.0" +version = "1.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.9.3" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1" +checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1967,9 +1970,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" dependencies = [ "serde", ] @@ -2024,9 +2027,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "castaway" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" +checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a" dependencies = [ "rustversion", ] @@ -2059,9 +2062,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" @@ -2134,9 +2137,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.38" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" +checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9" dependencies = [ "clap_builder", "clap_derive", @@ -2144,9 +2147,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.38" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" +checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d" dependencies = [ "anstream", "anstyle", @@ -2156,21 +2159,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.32" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "cmake" @@ -2289,9 +2292,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "colored" @@ -2300,7 +2303,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -2434,9 +2437,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -2483,9 +2486,9 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -2648,7 +2651,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2657,8 +2660,18 @@ version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.20.11", + "darling_macro 0.20.11", +] + +[[package]] +name = "darling" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a79c4acb1fd5fa3d9304be4c76e031c54d2e92d172a393e24b19a14fe8532fe9" +dependencies = [ + "darling_core 0.21.0", + "darling_macro 0.21.0", ] [[package]] @@ -2672,7 +2685,21 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.101", + "syn 2.0.104", +] + +[[package]] +name = "darling_core" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74875de90daf30eb59609910b84d4d368103aaec4c924824c6799b28f77d6a1d" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.104", ] [[package]] @@ -2681,9 +2708,20 @@ version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ - "darling_core", + "darling_core 0.20.11", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "darling_macro" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79f8e61677d5df9167cd85265f8e5f64b215cdea3fb55eebc3e622e44c7a146" +dependencies = [ + "darling_core 0.21.0", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2736,7 +2774,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2795,7 +2833,7 @@ checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2806,7 +2844,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2824,10 +2862,10 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2837,7 +2875,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2858,7 +2896,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "unicode-xid", ] @@ -2917,7 +2955,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.0", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -2957,7 +2995,7 @@ dependencies = [ "parking_lot", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.10", "tokio", "tracing", "uint 0.10.0", @@ -2972,7 +3010,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3020,9 +3058,9 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", @@ -3042,12 +3080,12 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "ef-tests" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3144,7 +3182,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3164,7 +3202,7 @@ checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3175,7 +3213,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3186,12 +3224,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -3248,10 +3286,10 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd55d08012b4e0dfcc92b8d6081234df65f2986ad34cc76eeed69c5e2ce7506" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3360,7 +3398,6 @@ dependencies = [ "alloy-rpc-types", "eyre", "reth-basic-payload-builder", - "reth-engine-local", "reth-ethereum", "reth-ethereum-payload-builder", "reth-payload-builder", @@ -3405,10 +3442,12 @@ dependencies = [ "alloy-eips", "alloy-evm", "alloy-genesis", + "alloy-network", "alloy-op-evm", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "alloy-serde", "async-trait", "derive_more", @@ -3416,11 +3455,13 @@ dependencies = [ "jsonrpsee", "modular-bitfield", "op-alloy-consensus", + "op-alloy-rpc-types", "op-alloy-rpc-types-engine", "op-revm", "reth-chain-state", "reth-codecs", "reth-db-api", + "reth-engine-primitives", "reth-ethereum", "reth-network-peers", "reth-node-builder", @@ -3638,16 +3679,18 @@ dependencies = [ name = "example-txpool-tracing" version = "0.0.0" dependencies = [ + "alloy-network", "alloy-primitives", "alloy-rpc-types-trace", "clap", + "eyre", "futures-util", "reth-ethereum", ] [[package]] name = "exex-subscription" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "clap", @@ -3766,9 +3809,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "miniz_oxide", @@ -3896,7 +3939,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3956,7 +3999,7 @@ dependencies = [ "libc", "log", "rustversion", - "windows 0.61.1", + "windows 0.61.3", ] [[package]] @@ -3991,7 +4034,7 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -4113,9 +4156,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" +checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" dependencies = [ "atomic-waker", "bytes", @@ -4123,7 +4166,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.9.0", + "indexmap 2.10.0", "slab", "tokio", "tokio-util", @@ -4169,9 +4212,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ "allocator-api2", "equivalent", @@ -4206,15 +4249,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - -[[package]] -name = "hermit-abi" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f154ce46856750ed433c8649605bf7ed2de3bc35fd9d2a9f30cddd873c80cb08" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -4250,7 +4287,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.1", + "rand 0.9.2", "ring", "serde", "thiserror 2.0.12", @@ -4273,7 +4310,7 @@ dependencies = [ "moka", "once_cell", "parking_lot", - "rand 0.9.1", + "rand 0.9.2", "resolv-conf", "serde", "smallvec", @@ -4417,9 +4454,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.6" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a01595e11bdcec50946522c32dde3fc6914743000a68b93000965f2f02406d" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http", "hyper", @@ -4431,7 +4468,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots 1.0.0", + "webpki-roots 1.0.2", ] [[package]] @@ -4452,22 +4489,28 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.12" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9f1e950e0d9d1d3c47184416723cf29c0d1f93bd8cccf37e4beb6b44f31710" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" dependencies = [ + "base64 0.22.1", "bytes", "futures-channel", + "futures-core", "futures-util", "http", "http-body", "hyper", + "ipnet", "libc", + "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.0", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -4482,7 +4525,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.61.2", + "windows-core 0.58.0", ] [[package]] @@ -4695,7 +4738,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4752,7 +4795,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4793,13 +4836,13 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.15.4", "serde", ] @@ -4847,15 +4890,15 @@ dependencies = [ [[package]] name = "instability" -version = "0.3.7" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf9fed6d91cfb734e7476a06bde8300a1b94e217e1b523b6f0cd1a01998c71d" +checksum = "435d80800b936787d62688c927b6490e887c7ef5ff9ce922c6c6050fca75eb9a" dependencies = [ - "darling", + "darling 0.20.11", "indoc", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4891,13 +4934,24 @@ dependencies = [ "memoffset", ] +[[package]] +name = "io-uring" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + [[package]] name = "ipconfig" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2", + "socket2 0.5.10", "widestring", "windows-sys 0.48.0", "winreg", @@ -4925,7 +4979,7 @@ version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "hermit-abi 0.5.1", + "hermit-abi", "libc", "windows-sys 0.59.0", ] @@ -5070,7 +5124,7 @@ dependencies = [ "jsonrpsee-types", "parking_lot", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "rustc-hash 2.1.1", "serde", "serde_json", @@ -5115,7 +5169,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5260,15 +5314,15 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.172" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libgit2-sys" -version = "0.18.1+1.9.0" +version = "0.18.2+1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1dcb20f84ffcdd825c7a311ae347cce604a6f084a767dec4a4929829645290e" +checksum = "1c42fe03df2bd3c53a3a9c7317ad91d80c81cd1fb0caec8d7cc4cd2bfa10c222" dependencies = [ "cc", "libc", @@ -5278,12 +5332,12 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a793df0d7afeac54f95b471d3af7f0d4fb975699f972341a4b76988d49cdf0c" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", - "windows-targets 0.53.0", + "windows-targets 0.48.5", ] [[package]] @@ -5294,9 +5348,9 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libp2p-identity" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb68ea10844211a59ce46230909fd0ea040e8a192454d4cc2ee0d53e12280eb" +checksum = "3104e13b51e4711ff5738caa1fb54467c8604c2e94d607e27745bcf709068774" dependencies = [ "asn1_der", "bs58", @@ -5324,9 +5378,9 @@ dependencies = [ [[package]] name = "libredox" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "4488594b9328dee448adb906d8b126d9b7deb7cf5c22161ee591610bb1be83c0" dependencies = [ "bitflags 2.9.1", "libc", @@ -5433,9 +5487,9 @@ checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -5467,7 +5521,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] [[package]] @@ -5476,7 +5530,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] [[package]] @@ -5506,15 +5560,15 @@ dependencies = [ [[package]] name = "lz4_flex" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" +checksum = "08ab2867e3eeeca90e844d1940eab391c9dc5228783db2ed999acbc0a9ed375a" [[package]] name = "mach2" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" +checksum = "d640282b302c0bb0a2a8e0233ead9035e3bed871f0b7e81fe4a1ec829765db44" dependencies = [ "libc", ] @@ -5527,7 +5581,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5541,15 +5595,15 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "memmap2" -version = "0.9.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" +checksum = "483758ad303d734cec05e5c12b41d7e93e6a6390c5e9dae6bdeb7c1259012d28" dependencies = [ "libc", ] @@ -5582,7 +5636,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5592,7 +5646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd7399781913e5393588a8d8c6a2867bf85fb38eaf2502fdce465aad2dc6f034" dependencies = [ "base64 0.22.1", - "indexmap 2.9.0", + "indexmap 2.10.0", "metrics", "metrics-util", "quanta", @@ -5623,12 +5677,12 @@ checksum = "b8496cc523d1f94c1385dd8f0f0c2c480b2b8aeccb5b7e4485ad6365523ae376" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.15.3", - "indexmap 2.9.0", + "hashbrown 0.15.4", + "indexmap 2.10.0", "metrics", "ordered-float", "quanta", - "rand 0.9.1", + "rand 0.9.2", "rand_xoshiro", "sketches-ddsketch", ] @@ -5692,9 +5746,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", "serde", @@ -5708,7 +5762,7 @@ checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.59.0", ] @@ -5827,12 +5881,11 @@ dependencies = [ [[package]] name = "notify" -version = "8.0.0" +version = "8.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fee8403b3d66ac7b26aee6e40a897d85dc5ce26f44da36b8b73e987cc52e943" +checksum = "3163f59cd3fa0e9ef8c32f242966a7b9994fd7378366099593e0e73077cd8c97" dependencies = [ "bitflags 2.9.1", - "filetime", "fsevent-sys", "inotify", "kqueue", @@ -5841,7 +5894,7 @@ dependencies = [ "mio", "notify-types", "walkdir", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -5952,33 +6005,34 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi", "libc", ] [[package]] name = "num_enum" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" dependencies = [ "num_enum_derive", + "rustversion", ] [[package]] name = "num_enum_derive" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5992,12 +6046,13 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d51b0175c49668a033fe7cc69080110d9833b291566cdf332905f3ad9c68a0" +checksum = "675b3a54e5b12af997abc8b6638b0aee51a28caedab70d4967e0d5db3a3f1d06" dependencies = [ "alloy-rlp", "arbitrary", + "cfg-if", "proptest", "ruint", "serde", @@ -6037,9 +6092,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "op-alloy-consensus" -version = "0.18.9" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8719d9b783b29cfa1cf8d591b894805786b9ab4940adc700a57fd0d5b721cf5" +checksum = "d3c719b26da6d9cac18c3a35634d6ab27a74a304ed9b403b43749c22e57a389f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6063,9 +6118,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" -version = "0.18.9" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "839a7a1826dc1d38fdf9c6d30d1f4ed8182c63816c97054e5815206f1ebf08c7" +checksum = "66be312d3446099f1c46b3bb4bbaccdd4b3d6fb3668921158e3d47dff0a8d4a0" dependencies = [ "alloy-consensus", "alloy-network", @@ -6079,9 +6134,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.18.9" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9d3de5348e2b34366413412f1f1534dc6b10d2cf6e8e1d97c451749c0c81c0" +checksum = "3833995acfc568fdac3684f037c4ed3f1f2bd2ef5deeb3f46ecee32aafa34c8e" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -6089,9 +6144,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.18.9" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9640f9e78751e13963762a4a44c846e9ec7974b130c29a51706f40503fe49152" +checksum = "99911fa02e717a96ba24de59874b20cf31c9d116ce79ed4e0253267260b6922f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6099,6 +6154,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", + "arbitrary", "derive_more", "op-alloy-consensus", "serde", @@ -6108,9 +6164,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.18.9" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a4559d84f079b3fdfd01e4ee0bb118025e92105fbb89736f5d77ab3ca261698" +checksum = "50cf45d43a3d548fdc39d9bfab6ba13cc06b3214ef4b9c36d3efbf3faea1b9f1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6130,7 +6186,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.5.0" +version = "1.6.0" dependencies = [ "clap", "reth-cli-util", @@ -6148,8 +6204,8 @@ dependencies = [ [[package]] name = "op-revm" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "8.1.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "once_cell", @@ -6165,9 +6221,9 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.72" +version = "0.10.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" dependencies = [ "bitflags 2.9.1", "cfg-if", @@ -6186,7 +6242,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6197,9 +6253,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.108" +version = "0.9.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", @@ -6283,7 +6339,7 @@ dependencies = [ "glob", "opentelemetry", "percent-encoding", - "rand 0.9.1", + "rand 0.9.2", "serde_json", "thiserror 2.0.12", "tracing", @@ -6359,7 +6415,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6370,9 +6426,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -6380,9 +6436,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", @@ -6425,9 +6481,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "198db74531d58c70a361c42201efde7e2591e976d518caf7662a47dc5720e7b6" +checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" dependencies = [ "memchr", "thiserror 2.0.12", @@ -6475,7 +6531,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6504,7 +6560,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6592,9 +6648,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "potential_utf" @@ -6632,12 +6688,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.32" +version = "0.2.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" +checksum = "ff24dfcda44452b9816fff4cd4227e1bb73ff5a2f1bc1105aa92fb8565ce44d2" dependencies = [ "proc-macro2", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6688,7 +6744,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6736,7 +6792,7 @@ dependencies = [ "bitflags 2.9.1", "lazy_static", "num-traits", - "rand 0.9.1", + "rand 0.9.2", "rand_chacha 0.9.0", "rand_xorshift", "regex-syntax 0.8.5", @@ -6763,7 +6819,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6786,7 +6842,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6802,15 +6858,15 @@ dependencies = [ [[package]] name = "quanta" -version = "0.12.5" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bd1fe6824cea6538803de3ff1bc0cf3949024db3d43c9643024bfb33a807c0e" +checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7" dependencies = [ "crossbeam-utils", "libc", "once_cell", "raw-cpuid", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "web-sys", "winapi", ] @@ -6843,7 +6899,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.1.1", "rustls", - "socket2", + "socket2 0.5.10", "thiserror 2.0.12", "tokio", "tracing", @@ -6859,7 +6915,7 @@ dependencies = [ "bytes", "getrandom 0.3.3", "lru-slab", - "rand 0.9.1", + "rand 0.9.2", "ring", "rustc-hash 2.1.1", "rustls", @@ -6873,14 +6929,14 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4e529991f949c5e25755532370b8af5d114acae52326361d68d47af64aa842" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.5.10", "tracing", "windows-sys 0.59.0", ] @@ -6896,9 +6952,9 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "radium" @@ -6933,9 +6989,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", @@ -7085,9 +7141,9 @@ checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" [[package]] name = "redox_syscall" -version = "0.5.12" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "7251471db004e509f4e75a62cca9435365b5ec7bcdff530d612ac7c87c44a792" dependencies = [ "bitflags 2.9.1", ] @@ -7114,6 +7170,26 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "ref-cast" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "regex" version = "1.11.1" @@ -7160,11 +7236,11 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "regress" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ef7fa9ed0256d64a688a3747d0fef7a88851c18a5e1d57f115f38ec2e09366" +checksum = "145bb27393fe455dd64d6cbc8d059adfa392590a45eadf079c01b11857e7b010" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", "memchr", ] @@ -7176,9 +7252,9 @@ checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "reqwest" -version = "0.12.15" +version = "0.12.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" dependencies = [ "base64 0.22.1", "bytes", @@ -7191,17 +7267,13 @@ dependencies = [ "hyper", "hyper-rustls", "hyper-util", - "ipnet", "js-sys", "log", - "mime", - "once_cell", "percent-encoding", "pin-project-lite", "quinn", "rustls", "rustls-native-certs", - "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", @@ -7211,14 +7283,14 @@ dependencies = [ "tokio-rustls", "tokio-util", "tower", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.26.11", - "windows-registry", + "webpki-roots 1.0.2", ] [[package]] @@ -7229,7 +7301,7 @@ checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" [[package]] name = "reth" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7269,44 +7341,14 @@ dependencies = [ "reth-tasks", "reth-tokio-util", "reth-transaction-pool", - "similar-asserts", "tempfile", "tokio", "tracing", ] -[[package]] -name = "reth-alloy-provider" -version = "1.5.0" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-network", - "alloy-primitives", - "alloy-provider", - "alloy-rpc-types", - "alloy-rpc-types-engine", - "reth-chainspec", - "reth-db-api", - "reth-errors", - "reth-execution-types", - "reth-node-types", - "reth-primitives", - "reth-provider", - "reth-prune-types", - "reth-rpc-convert", - "reth-stages-types", - "reth-storage-api", - "reth-trie", - "revm", - "revm-primitives", - "tokio", - "tracing", -] - [[package]] name = "reth-basic-payload-builder" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7329,7 +7371,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7349,6 +7391,7 @@ dependencies = [ "futures", "humantime", "op-alloy-consensus", + "op-alloy-rpc-types-engine", "reqwest", "reth-cli-runner", "reth-cli-util", @@ -7367,7 +7410,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7378,7 +7421,7 @@ dependencies = [ "metrics", "parking_lot", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-errors", "reth-ethereum-primitives", @@ -7398,7 +7441,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7418,7 +7461,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-genesis", "clap", @@ -7431,7 +7474,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.5.0" +version = "1.6.0" dependencies = [ "ahash", "alloy-chains", @@ -7468,6 +7511,7 @@ dependencies = [ "reth-discv5", "reth-downloaders", "reth-ecies", + "reth-era", "reth-era-downloader", "reth-era-utils", "reth-eth-wire", @@ -7490,6 +7534,7 @@ dependencies = [ "reth-provider", "reth-prune", "reth-prune-types", + "reth-revm", "reth-stages", "reth-stages-types", "reth-static-file", @@ -7509,7 +7554,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.5.0" +version = "1.6.0" dependencies = [ "reth-tasks", "tokio", @@ -7518,7 +7563,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7526,7 +7571,7 @@ dependencies = [ "eyre", "libc", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-fs-util", "secp256k1 0.30.0", "serde", @@ -7538,7 +7583,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7562,18 +7607,18 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.5.0" +version = "1.6.0" dependencies = [ "convert_case", "proc-macro2", "quote", "similar-asserts", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "reth-config" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "eyre", @@ -7590,7 +7635,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7602,12 +7647,12 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-consensus", "reth-ethereum-primitives", @@ -7616,7 +7661,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7634,12 +7679,13 @@ dependencies = [ "reth-tracing", "ringbuffer", "serde", + "serde_json", "tokio", ] [[package]] name = "reth-db" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7664,7 +7710,7 @@ dependencies = [ "rustc-hash 2.1.1", "serde", "serde_json", - "strum 0.27.1", + "strum 0.27.2", "sysinfo", "tempfile", "thiserror 2.0.12", @@ -7672,7 +7718,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7685,7 +7731,7 @@ dependencies = [ "parity-scale-codec", "proptest", "proptest-arbitrary-interop", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-db-models", "reth-ethereum-primitives", @@ -7703,7 +7749,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7732,7 +7778,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7749,7 +7795,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7776,7 +7822,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7787,7 +7833,7 @@ dependencies = [ "itertools 0.14.0", "metrics", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-ethereum-forks", "reth-metrics", @@ -7801,7 +7847,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7811,7 +7857,7 @@ dependencies = [ "hickory-resolver", "linked_hash_set", "parking_lot", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-ethereum-forks", "reth-network-peers", @@ -7829,7 +7875,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7841,7 +7887,7 @@ dependencies = [ "itertools 0.14.0", "metrics", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reth-chainspec", "reth-config", @@ -7868,12 +7914,15 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", + "alloy-genesis", "alloy-network", "alloy-primitives", + "alloy-provider", + "alloy-rlp", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-signer", @@ -7883,9 +7932,15 @@ dependencies = [ "futures-util", "jsonrpsee", "reth-chainspec", + "reth-cli-commands", + "reth-config", + "reth-consensus", "reth-db", + "reth-db-common", "reth-engine-local", + "reth-ethereum-consensus", "reth-ethereum-primitives", + "reth-evm", "reth-network-api", "reth-network-peers", "reth-node-api", @@ -7895,18 +7950,23 @@ dependencies = [ "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", + "reth-primitives", + "reth-primitives-traits", "reth-provider", + "reth-prune-types", "reth-rpc-api", "reth-rpc-builder", "reth-rpc-eth-api", "reth-rpc-layer", "reth-rpc-server-types", "reth-stages-types", + "reth-static-file", "reth-tasks", "reth-tokio-util", "reth-tracing", "revm", "serde_json", + "tempfile", "tokio", "tokio-stream", "tracing", @@ -7915,7 +7975,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.5.0" +version = "1.6.0" dependencies = [ "aes", "alloy-primitives", @@ -7945,7 +8005,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7969,7 +8029,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7993,7 +8053,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.5.0" +version = "1.6.0" dependencies = [ "futures", "pin-project", @@ -8023,7 +8083,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8043,7 +8103,7 @@ dependencies = [ "parking_lot", "proptest", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reth-chain-state", "reth-chainspec", @@ -8080,6 +8140,7 @@ dependencies = [ "reth-trie-db", "reth-trie-parallel", "reth-trie-sparse", + "reth-trie-sparse-parallel", "revm", "revm-primitives", "revm-state", @@ -8092,7 +8153,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8118,7 +8179,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8127,7 +8188,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "eyre", - "rand 0.9.1", + "rand 0.9.2", "reqwest", "reth-era-downloader", "reth-ethereum-primitives", @@ -8140,7 +8201,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "bytes", @@ -8157,7 +8218,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8186,7 +8247,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.5.0" +version = "1.6.0" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -8196,7 +8257,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8212,7 +8273,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-ecies", "reth-eth-wire-types", @@ -8234,7 +8295,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8248,7 +8309,7 @@ dependencies = [ "derive_more", "proptest", "proptest-arbitrary-interop", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-codecs-derive", "reth-ethereum-primitives", @@ -8259,7 +8320,7 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8269,6 +8330,7 @@ dependencies = [ "reth-consensus", "reth-consensus-common", "reth-db", + "reth-engine-local", "reth-eth-wire", "reth-ethereum-cli", "reth-ethereum-consensus", @@ -8298,66 +8360,29 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", - "alloy-eips", - "alloy-primitives", - "alloy-rlp", - "alloy-rpc-types", - "backon", "clap", "eyre", - "futures", - "reth-basic-payload-builder", "reth-chainspec", "reth-cli", "reth-cli-commands", "reth-cli-runner", - "reth-cli-util", - "reth-config", - "reth-consensus", "reth-db", - "reth-db-api", - "reth-downloaders", - "reth-errors", - "reth-ethereum-payload-builder", - "reth-ethereum-primitives", - "reth-evm", - "reth-execution-types", - "reth-exex", - "reth-fs-util", - "reth-network", - "reth-network-api", - "reth-network-p2p", "reth-node-api", "reth-node-builder", "reth-node-core", "reth-node-ethereum", - "reth-node-events", "reth-node-metrics", - "reth-payload-builder", - "reth-primitives-traits", - "reth-provider", - "reth-prune", - "reth-revm", - "reth-stages", - "reth-static-file", - "reth-tasks", "reth-tracing", - "reth-transaction-pool", - "reth-trie", - "reth-trie-db", - "serde_json", - "similar-asserts", "tempfile", - "tokio", "tracing", ] [[package]] name = "reth-ethereum-consensus" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8373,7 +8398,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8391,7 +8416,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8404,7 +8429,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8430,7 +8455,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8443,19 +8468,18 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-primitives-traits", "reth-zstd-compressors", "secp256k1 0.30.0", "serde", "serde_with", - "test-fuzz", ] [[package]] name = "reth-etl" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "rayon", @@ -8465,7 +8489,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8491,7 +8515,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8513,7 +8537,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-evm", "alloy-primitives", @@ -8525,7 +8549,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8534,7 +8558,7 @@ dependencies = [ "arbitrary", "bincode 1.3.3", "derive_more", - "rand 0.9.1", + "rand 0.9.2", "reth-ethereum-primitives", "reth-primitives-traits", "reth-trie-common", @@ -8545,7 +8569,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8556,7 +8580,7 @@ dependencies = [ "itertools 0.14.0", "metrics", "parking_lot", - "rand 0.9.1", + "rand 0.9.2", "reth-chain-state", "reth-chainspec", "reth-config", @@ -8589,7 +8613,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "eyre", @@ -8621,13 +8645,13 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", "arbitrary", "bincode 1.3.3", - "rand 0.9.1", + "rand 0.9.2", "reth-chain-state", "reth-ethereum-primitives", "reth-execution-types", @@ -8638,7 +8662,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.5.0" +version = "1.6.0" dependencies = [ "serde", "serde_json", @@ -8647,7 +8671,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8674,7 +8698,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.5.0" +version = "1.6.0" dependencies = [ "bytes", "futures", @@ -8682,7 +8706,7 @@ dependencies = [ "interprocess", "jsonrpsee", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "reth-tracing", "serde", "serde_json", @@ -8696,16 +8720,16 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.5.0" +version = "1.6.0" dependencies = [ "bitflags 2.9.1", "byteorder", "codspeed-criterion-compat", "dashmap 6.1.0", "derive_more", - "indexmap 2.9.0", + "indexmap 2.10.0", "parking_lot", - "rand 0.9.1", + "rand 0.9.2", "reth-mdbx-sys", "smallvec", "tempfile", @@ -8715,7 +8739,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.5.0" +version = "1.6.0" dependencies = [ "bindgen", "cc", @@ -8723,7 +8747,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.5.0" +version = "1.6.0" dependencies = [ "futures", "metrics", @@ -8734,14 +8758,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.5.0" +version = "1.6.0" dependencies = [ "futures-util", "if-addrs", @@ -8755,7 +8779,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8774,7 +8798,7 @@ dependencies = [ "parking_lot", "pin-project", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-consensus", "reth-discv4", @@ -8816,11 +8840,12 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.5.0" +version = "1.6.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-admin", - "async-trait", + "alloy-rpc-types-eth", "auto_impl", "derive_more", "enr", @@ -8840,7 +8865,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8862,13 +8887,13 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", "enr", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "secp256k1 0.30.0", "serde_json", "serde_with", @@ -8879,7 +8904,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -8892,14 +8917,14 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.5.0" +version = "1.6.0" dependencies = [ "anyhow", "bincode 1.3.3", "derive_more", "lz4_flex", "memmap2", - "rand 0.9.1", + "rand 0.9.2", "reth-fs-util", "serde", "tempfile", @@ -8910,7 +8935,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8933,7 +8958,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8962,7 +8987,9 @@ dependencies = [ "reth-engine-service", "reth-engine-tree", "reth-engine-util", + "reth-ethereum-engine-primitives", "reth-evm", + "reth-evm-ethereum", "reth-exex", "reth-fs-util", "reth-invalid-block-hooks", @@ -8971,6 +8998,8 @@ dependencies = [ "reth-network-p2p", "reth-node-api", "reth-node-core", + "reth-node-ethereum", + "reth-node-ethstats", "reth-node-events", "reth-node-metrics", "reth-payload-builder", @@ -8998,7 +9027,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9011,7 +9040,7 @@ dependencies = [ "futures", "humantime", "proptest", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-cli-util", "reth-config", @@ -9019,6 +9048,7 @@ dependencies = [ "reth-db", "reth-discv4", "reth-discv5", + "reth-engine-local", "reth-engine-primitives", "reth-ethereum-forks", "reth-net-nat", @@ -9038,7 +9068,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "shellexpand", - "strum 0.27.1", + "strum 0.27.2", "thiserror 2.0.12", "tokio", "toml", @@ -9050,12 +9080,13 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-contract", "alloy-eips", "alloy-genesis", + "alloy-network", "alloy-primitives", "alloy-provider", "alloy-rpc-types-beacon", @@ -9065,11 +9096,11 @@ dependencies = [ "alloy-sol-types", "eyre", "futures", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", - "reth-consensus", "reth-db", "reth-e2e-test-utils", + "reth-engine-local", "reth-engine-primitives", "reth-ethereum-consensus", "reth-ethereum-engine-primitives", @@ -9101,9 +9132,32 @@ dependencies = [ "tokio", ] +[[package]] +name = "reth-node-ethstats" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-primitives", + "chrono", + "futures-util", + "reth-chain-state", + "reth-network-api", + "reth-primitives-traits", + "reth-storage-api", + "reth-transaction-pool", + "serde", + "serde_json", + "thiserror 2.0.12", + "tokio", + "tokio-stream", + "tokio-tungstenite", + "tracing", + "url", +] + [[package]] name = "reth-node-events" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9126,7 +9180,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.5.0" +version = "1.6.0" dependencies = [ "eyre", "http", @@ -9139,7 +9193,7 @@ dependencies = [ "reqwest", "reth-metrics", "reth-tasks", - "socket2", + "socket2 0.5.10", "tikv-jemalloc-ctl", "tokio", "tower", @@ -9148,7 +9202,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9160,7 +9214,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.5.0" +version = "1.6.0" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9168,6 +9222,7 @@ dependencies = [ "reth-consensus", "reth-consensus-common", "reth-db", + "reth-engine-local", "reth-eth-wire", "reth-evm", "reth-exex", @@ -9199,7 +9254,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9209,6 +9264,7 @@ dependencies = [ "alloy-primitives", "derive_more", "miniz_oxide", + "op-alloy-consensus", "op-alloy-rpc-types", "paste", "reth-chainspec", @@ -9225,7 +9281,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9273,7 +9329,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9305,7 +9361,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9325,13 +9381,14 @@ dependencies = [ "reth-optimism-primitives", "reth-primitives-traits", "reth-revm", + "reth-rpc-eth-api", "revm", "thiserror 2.0.12", ] [[package]] name = "reth-optimism-forks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9341,7 +9398,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9354,7 +9411,6 @@ dependencies = [ "eyre", "futures", "op-alloy-consensus", - "op-alloy-network", "op-alloy-rpc-types-engine", "op-revm", "reth-chainspec", @@ -9362,6 +9418,7 @@ dependencies = [ "reth-db", "reth-e2e-test-utils", "reth-engine-local", + "reth-engine-primitives", "reth-evm", "reth-network", "reth-node-api", @@ -9385,8 +9442,6 @@ dependencies = [ "reth-revm", "reth-rpc-api", "reth-rpc-engine-api", - "reth-rpc-eth-api", - "reth-rpc-eth-types", "reth-rpc-server-types", "reth-tasks", "reth-tracing", @@ -9401,7 +9456,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9439,7 +9494,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9453,7 +9508,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-primitives-traits", "reth-zstd-compressors", @@ -9466,7 +9521,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9491,13 +9546,10 @@ dependencies = [ "op-alloy-rpc-types", "op-alloy-rpc-types-engine", "op-revm", - "parking_lot", "reqwest", - "reth-chain-state", "reth-chainspec", "reth-evm", "reth-metrics", - "reth-network-api", "reth-node-api", "reth-node-builder", "reth-optimism-chainspec", @@ -9526,7 +9578,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9544,7 +9596,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9581,7 +9633,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9601,7 +9653,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "pin-project", "reth-payload-primitives", @@ -9612,7 +9664,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9632,7 +9684,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9641,7 +9693,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9650,7 +9702,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9672,7 +9724,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9693,7 +9745,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reth-chainspec", "reth-codecs", @@ -9711,7 +9763,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9724,7 +9776,7 @@ dependencies = [ "metrics", "notify", "parking_lot", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reth-chain-state", "reth-chainspec", @@ -9752,7 +9804,7 @@ dependencies = [ "revm-database", "revm-database-interface", "revm-state", - "strum 0.27.1", + "strum 0.27.2", "tempfile", "tokio", "tracing", @@ -9760,7 +9812,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9792,7 +9844,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -9811,7 +9863,7 @@ dependencies = [ [[package]] name = "reth-ress-protocol" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9828,8 +9880,8 @@ dependencies = [ "reth-ress-protocol", "reth-storage-errors", "reth-tracing", - "strum 0.27.1", - "strum_macros 0.27.1", + "strum 0.27.2", + "strum_macros 0.27.2", "tokio", "tokio-stream", "tracing", @@ -9837,7 +9889,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9863,7 +9915,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9877,7 +9929,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -9905,15 +9957,17 @@ dependencies = [ "http", "http-body", "hyper", + "itertools 0.14.0", "jsonrpsee", "jsonrpsee-types", "jsonwebtoken", "parking_lot", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "reth-chain-state", "reth-chainspec", "reth-consensus", + "reth-db-api", "reth-engine-primitives", "reth-errors", "reth-ethereum-primitives", @@ -9955,7 +10009,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-genesis", @@ -9982,7 +10036,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10001,7 +10055,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-network", @@ -10019,6 +10073,7 @@ dependencies = [ "reth-chainspec", "reth-consensus", "reth-engine-primitives", + "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-ethereum-primitives", "reth-evm", @@ -10056,17 +10111,20 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-json-rpc", "alloy-network", "alloy-primitives", "alloy-rpc-types-eth", + "alloy-signer", "jsonrpsee-types", "op-alloy-consensus", + "op-alloy-network", "op-alloy-rpc-types", "op-revm", + "reth-ethereum-primitives", "reth-evm", "reth-optimism-primitives", "reth-primitives-traits", @@ -10080,9 +10138,29 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "reth-rpc-e2e-tests" +version = "1.6.0" +dependencies = [ + "alloy-genesis", + "alloy-rpc-types-engine", + "eyre", + "futures-util", + "jsonrpsee", + "reth-chainspec", + "reth-e2e-test-utils", + "reth-node-api", + "reth-node-ethereum", + "reth-rpc-api", + "reth-tracing", + "serde_json", + "tokio", + "tracing", +] + [[package]] name = "reth-rpc-engine-api" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10118,7 +10196,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10144,12 +10222,12 @@ dependencies = [ "reth-evm", "reth-network-api", "reth-node-api", - "reth-payload-builder", "reth-primitives-traits", "reth-revm", "reth-rpc-convert", "reth-rpc-eth-types", "reth-rpc-server-types", + "reth-scroll-evm", "reth-storage-api", "reth-tasks", "reth-transaction-pool", @@ -10162,11 +10240,12 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-evm", + "alloy-network", "alloy-primitives", "alloy-rpc-types-eth", "alloy-sol-types", @@ -10176,7 +10255,7 @@ dependencies = [ "jsonrpsee-core", "jsonrpsee-types", "metrics", - "rand 0.9.1", + "rand 0.9.2", "reth-chain-state", "reth-chainspec", "reth-errors", @@ -10205,7 +10284,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10222,7 +10301,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10232,12 +10311,12 @@ dependencies = [ "reth-errors", "reth-network-api", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] name = "reth-scroll-chainspec" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -10261,7 +10340,7 @@ dependencies = [ [[package]] name = "reth-scroll-cli" -version = "1.5.0" +version = "1.6.0" dependencies = [ "clap", "eyre", @@ -10275,7 +10354,9 @@ dependencies = [ "reth-node-core", "reth-node-metrics", "reth-scroll-chainspec", + "reth-scroll-consensus", "reth-scroll-evm", + "reth-scroll-node", "reth-scroll-primitives", "reth-tracing", "scroll-alloy-consensus", @@ -10284,7 +10365,7 @@ dependencies = [ [[package]] name = "reth-scroll-consensus" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10302,7 +10383,7 @@ dependencies = [ [[package]] name = "reth-scroll-engine-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10311,7 +10392,7 @@ dependencies = [ "alloy-rpc-types-engine", "arbitrary", "eyre", - "rand 0.9.1", + "rand 0.9.2", "reth-chain-state", "reth-chainspec", "reth-engine-primitives", @@ -10329,7 +10410,7 @@ dependencies = [ [[package]] name = "reth-scroll-evm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10358,7 +10439,7 @@ dependencies = [ [[package]] name = "reth-scroll-forks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -10371,13 +10452,14 @@ dependencies = [ [[package]] name = "reth-scroll-node" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-genesis", "alloy-primitives", "alloy-rpc-types-engine", "alloy-rpc-types-eth", + "clap", "eyre", "reth-chainspec", "reth-db", @@ -10414,6 +10496,7 @@ dependencies = [ "scroll-alloy-consensus", "scroll-alloy-evm", "scroll-alloy-hardforks", + "scroll-alloy-network", "scroll-alloy-rpc-types-engine", "serde_json", "tokio", @@ -10422,7 +10505,7 @@ dependencies = [ [[package]] name = "reth-scroll-payload" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10452,7 +10535,7 @@ dependencies = [ [[package]] name = "reth-scroll-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10462,7 +10545,7 @@ dependencies = [ "bytes", "modular-bitfield", "once_cell", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-primitives-traits", "reth-zstd-compressors", @@ -10473,7 +10556,7 @@ dependencies = [ [[package]] name = "reth-scroll-rpc" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10485,14 +10568,11 @@ dependencies = [ "alloy-transport-http", "eyre", "jsonrpsee-types", - "parking_lot", "reqwest", "reth-chainspec", "reth-evm", - "reth-network-api", "reth-node-api", "reth-node-builder", - "reth-primitives", "reth-primitives-traits", "reth-provider", "reth-rpc", @@ -10506,7 +10586,6 @@ dependencies = [ "reth-transaction-pool", "revm", "scroll-alloy-consensus", - "scroll-alloy-evm", "scroll-alloy-hardforks", "scroll-alloy-network", "scroll-alloy-rpc-types", @@ -10517,7 +10596,7 @@ dependencies = [ [[package]] name = "reth-scroll-txpool" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10541,7 +10620,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10556,7 +10635,7 @@ dependencies = [ "itertools 0.14.0", "num-traits", "paste", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reqwest", "reth-chainspec", @@ -10602,7 +10681,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10631,7 +10710,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -10639,7 +10718,7 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-trie-common", "serde", @@ -10648,7 +10727,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10673,7 +10752,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "assert_matches", @@ -10697,19 +10776,19 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "clap", "derive_more", "reth-nippy-jar", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] name = "reth-storage-api" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10732,7 +10811,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10745,9 +10824,38 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "reth-storage-rpc-provider" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types", + "alloy-rpc-types-engine", + "parking_lot", + "reth-chainspec", + "reth-db-api", + "reth-errors", + "reth-execution-types", + "reth-node-types", + "reth-primitives", + "reth-provider", + "reth-prune-types", + "reth-rpc-convert", + "reth-stages-types", + "reth-storage-api", + "reth-trie", + "revm", + "tokio", + "tracing", +] + [[package]] name = "reth-tasks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "auto_impl", "dyn-clone", @@ -10764,14 +10872,14 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-ethereum-primitives", "reth-primitives-traits", "secp256k1 0.30.0", @@ -10779,7 +10887,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.5.0" +version = "1.6.0" dependencies = [ "tokio", "tokio-stream", @@ -10788,7 +10896,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.5.0" +version = "1.6.0" dependencies = [ "clap", "eyre", @@ -10802,7 +10910,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.5.0" +version = "1.6.0" dependencies = [ "opentelemetry", "opentelemetry-otlp", @@ -10815,7 +10923,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10832,7 +10940,7 @@ dependencies = [ "paste", "proptest", "proptest-arbitrary-interop", - "rand 0.9.1", + "rand 0.9.2", "reth-chain-state", "reth-chainspec", "reth-eth-wire-types", @@ -10861,7 +10969,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10893,7 +11001,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10925,7 +11033,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10951,7 +11059,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10961,7 +11069,7 @@ dependencies = [ "metrics", "proptest", "proptest-arbitrary-interop", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reth-db-api", "reth-execution-errors", @@ -10980,7 +11088,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10995,7 +11103,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-execution-errors", "reth-metrics", "reth-primitives-traits", @@ -11012,7 +11120,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -11020,14 +11128,18 @@ dependencies = [ "arbitrary", "assert_matches", "itertools 0.14.0", + "pretty_assertions", "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", + "rayon", "reth-execution-errors", "reth-primitives-traits", + "reth-provider", "reth-trie", "reth-trie-common", + "reth-trie-db", "reth-trie-sparse", "smallvec", "tracing", @@ -11035,15 +11147,15 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.5.0" +version = "1.6.0" dependencies = [ "zstd", ] [[package]] name = "revm" -version = "26.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "27.1.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-context", @@ -11060,8 +11172,8 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "6.1.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "bitvec", "once_cell", @@ -11072,8 +11184,8 @@ dependencies = [ [[package]] name = "revm-context" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "8.0.4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "cfg-if", "derive-where", @@ -11087,8 +11199,8 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "9.0.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -11102,8 +11214,8 @@ dependencies = [ [[package]] name = "revm-database" -version = "6.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "7.0.2" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-eips", "revm-bytecode", @@ -11115,10 +11227,11 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "6.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "7.0.2" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", + "either", "revm-primitives", "revm-state", "serde", @@ -11126,8 +11239,8 @@ dependencies = [ [[package]] name = "revm-handler" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "8.1.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "derive-where", @@ -11144,8 +11257,8 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "8.1.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "either", @@ -11161,9 +11274,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.25.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aabdffc06bdb434d9163e2d63b6fae843559afd300ea3fbeb113b8a0d8ec728" +checksum = "aad27cab355b0aa905d0744f3222e716b40ad48b32276ac4b0a615f2c3364c97" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -11181,8 +11294,8 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "22.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "24.0.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -11192,14 +11305,15 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "23.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "25.0.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "ark-bls12-381", "ark-bn254", "ark-ec", "ark-ff 0.5.0", "ark-serialize 0.5.0", + "arrayref", "aurora-engine-modexp", "blst", "c-kzg", @@ -11217,8 +11331,8 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "20.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "20.1.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-primitives", "num_enum", @@ -11228,7 +11342,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm?branch=main#6a1e33df5b9e5ad6585a8469faaba9e6ea5e3f3d" +source = "git+https://github.com/scroll-tech/scroll-revm#720ee7802e5ad695ac1f8699bbab9c9f2424417f" dependencies = [ "auto_impl", "enumn", @@ -11241,8 +11355,8 @@ dependencies = [ [[package]] name = "revm-state" -version = "6.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "7.0.2" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "bitflags 2.9.1", "revm-bytecode", @@ -11381,7 +11495,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.101", + "syn 2.0.104", "unicode-ident", ] @@ -11417,7 +11531,7 @@ dependencies = [ "primitive-types", "proptest", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "rlp", "ruint-macro", "serde", @@ -11433,9 +11547,9 @@ checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" [[package]] name = "rustc-hash" @@ -11491,22 +11605,22 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" dependencies = [ "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "2491382039b29b9b11ff08b76ff6c97cf287671dbb74f0be44bda389fffe9bd1" dependencies = [ "log", "once_cell", @@ -11529,15 +11643,6 @@ dependencies = [ "security-framework 3.2.0", ] -[[package]] -name = "rustls-pemfile" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "rustls-pki-types" version = "1.12.0" @@ -11554,7 +11659,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19787cda76408ec5404443dc8b31795c87cd8fec49762dc75fa727740d34acc1" dependencies = [ - "core-foundation 0.10.0", + "core-foundation 0.10.1", "core-foundation-sys", "jni", "log", @@ -11577,9 +11682,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.3" +version = "0.103.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" dependencies = [ "ring", "rustls-pki-types", @@ -11634,6 +11739,30 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "schnellru" version = "0.2.4" @@ -11659,7 +11788,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scroll-alloy-consensus" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11672,7 +11801,7 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-codecs-derive", "serde", @@ -11683,7 +11812,7 @@ dependencies = [ [[package]] name = "scroll-alloy-evm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11705,7 +11834,7 @@ dependencies = [ [[package]] name = "scroll-alloy-hardforks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-hardforks", "auto_impl", @@ -11714,7 +11843,7 @@ dependencies = [ [[package]] name = "scroll-alloy-network" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-network", @@ -11728,7 +11857,7 @@ dependencies = [ [[package]] name = "scroll-alloy-provider" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-provider", @@ -11768,7 +11897,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11786,7 +11915,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types-engine" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -11797,7 +11926,7 @@ dependencies = [ [[package]] name = "scroll-reth" -version = "1.5.0" +version = "1.6.0" dependencies = [ "clap", "reth-cli-util", @@ -11840,7 +11969,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c3c81b43dc2d8877c216a3fccf76677ee1ebccd429566d3e67447290d0c42b2" dependencies = [ "bitcoin_hashes", - "rand 0.9.1", + "rand 0.9.2", "secp256k1-sys 0.11.0", ] @@ -11882,7 +12011,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ "bitflags 2.9.1", - "core-foundation 0.10.0", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -11963,16 +12092,16 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "itoa", "memchr", "ryu", @@ -11992,9 +12121,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ "serde", ] @@ -12013,15 +12142,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.12.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.9.0", + "indexmap 2.10.0", + "schemars 0.9.0", + "schemars 1.0.4", "serde", "serde_derive", "serde_json", @@ -12031,14 +12162,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.12.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -12232,18 +12363,15 @@ checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a" [[package]] name = "slab" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" dependencies = [ "arbitrary", "serde", @@ -12276,14 +12404,24 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "soketto" version = "0.8.1" @@ -12345,11 +12483,11 @@ dependencies = [ [[package]] name = "strum" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" dependencies = [ - "strum_macros 0.27.1", + "strum_macros 0.27.2", ] [[package]] @@ -12362,20 +12500,19 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "strum_macros" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77a8c5abcaf0f9ce05d62342b7d298c346515365c36b673df4ebe3ced01fde8" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ "heck", "proc-macro2", "quote", - "rustversion", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -12397,9 +12534,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", @@ -12408,14 +12545,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ac494e7266fcdd2ad80bf4375d55d27a117ea5c866c26d0e97fe5b3caeeb75" +checksum = "a7a985ff4ffd7373e10e0fb048110fb11a162e5a4c47f92ddb8787a6f766b769" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -12435,7 +12572,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -12451,6 +12588,27 @@ dependencies = [ "windows 0.57.0", ] +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.9.1", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tagptr" version = "0.2.0" @@ -12495,7 +12653,7 @@ dependencies = [ "fastrand 2.3.0", "getrandom 0.3.3", "once_cell", - "rustix 1.0.7", + "rustix 1.0.8", "windows-sys 0.59.0", ] @@ -12517,7 +12675,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -12528,15 +12686,15 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "test-case-core", ] [[package]] name = "test-fuzz" -version = "7.2.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae2f06b1ae65cbf4dc1f4975279cee7dbf70fcca269bdbdd8aabd20a79e6785c" +checksum = "bb4eb3ad07d6df1b12c23bc2d034e35a80c25d2e1232d083b42c081fd01c1c63" dependencies = [ "serde", "serde_combinators", @@ -12547,9 +12705,9 @@ dependencies = [ [[package]] name = "test-fuzz-internal" -version = "7.2.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac6a1dc2074c20c6410ac75687be17808a22abfd449e28301a95d72974b91768" +checksum = "53b853a8b27e0c335dd114f182fc808b917ced20dbc1bcdab79cc3e023b38762" dependencies = [ "bincode 2.0.1", "cargo_metadata 0.19.2", @@ -12558,24 +12716,24 @@ dependencies = [ [[package]] name = "test-fuzz-macro" -version = "7.2.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190423aabaca6cec8392cf45e471777e036d424a14d979db8033f25cc417f1ad" +checksum = "eb25760cf823885b202e5cc8ef8dc385e80ef913537656129ea8b34470280601" dependencies = [ - "darling", + "darling 0.21.0", "heck", "itertools 0.14.0", "prettyplease", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "test-fuzz-runtime" -version = "7.2.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05723662ca81651b49dd87b50cae65a0a38523aa1c0cb6b049a8c4f5c2c7836" +checksum = "c9b807e6d99cb6157a3f591ccf9f02187730a5774b9b1f066ff7dffba329495e" dependencies = [ "hex", "num-traits", @@ -12616,7 +12774,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -12627,17 +12785,16 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -12770,20 +12927,22 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.1" +version = "1.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" +checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35" dependencies = [ "backtrace", "bytes", + "io-uring", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "slab", + "socket2 0.6.0", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -12794,7 +12953,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -12838,6 +12997,7 @@ dependencies = [ "futures-util", "log", "rustls", + "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", @@ -12862,9 +13022,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", @@ -12874,20 +13034,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.26" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "serde", "serde_spanned", "toml_datetime", @@ -12897,9 +13057,9 @@ dependencies = [ [[package]] name = "toml_write" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tonic" @@ -12931,7 +13091,7 @@ dependencies = [ "futures-core", "futures-util", "hdrhistogram", - "indexmap 2.9.0", + "indexmap 2.10.0", "pin-project-lite", "slab", "sync_wrapper", @@ -12944,9 +13104,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.4" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ "async-compression", "base64 0.22.1", @@ -13011,20 +13171,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -13036,8 +13196,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "futures", - "futures-task", "pin-project", "tracing", ] @@ -13136,9 +13294,9 @@ dependencies = [ [[package]] name = "tracy-client" -version = "0.18.0" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d90a2c01305b02b76fdd89ac8608bae27e173c829a35f7d76a345ab5d33836db" +checksum = "ef54005d3d760186fd662dad4b7bb27ecd5531cdef54d1573ebd3f20a9205ed7" dependencies = [ "loom", "once_cell", @@ -13148,12 +13306,12 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.24.3" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fff37da548239c3bf9e64a12193d261e8b22b660991c6fd2df057c168f435f" +checksum = "319c70195101a93f56db4c74733e272d720768e13471f400c78406a326b172b0" dependencies = [ "cc", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -13175,10 +13333,10 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bee2ea1551f90040ab0e34b6fb7f2fa3bad8acc925837ac654f2c78a13e3089" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -13214,7 +13372,7 @@ dependencies = [ "http", "httparse", "log", - "rand 0.9.1", + "rand 0.9.2", "rustls", "rustls-pki-types", "sha1", @@ -13459,7 +13617,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -13504,9 +13662,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" @@ -13539,7 +13697,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "wasm-bindgen-shared", ] @@ -13574,7 +13732,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -13603,9 +13761,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" +checksum = "d8d49b5d6c64e8558d9b1b065014426f35c18de636895d24893dbbd329743446" dependencies = [ "futures", "js-sys", @@ -13641,14 +13799,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.0", + "webpki-root-certs 1.0.2", ] [[package]] name = "webpki-root-certs" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a83f7e1a9f8712695c03eabe9ed3fbca0feff0152f33f12593e5a6303cb1a4" +checksum = "4e4ffd8df1c57e87c325000a3d6ef93db75279dc3a231125aac571650f22b12a" dependencies = [ "rustls-pki-types", ] @@ -13659,14 +13817,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.0", + "webpki-roots 1.0.2", ] [[package]] name = "webpki-roots" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" dependencies = [ "rustls-pki-types", ] @@ -13699,7 +13857,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -13730,9 +13888,9 @@ dependencies = [ [[package]] name = "windows" -version = "0.61.1" +version = "0.61.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" dependencies = [ "windows-collections", "windows-core 0.61.2", @@ -13807,7 +13965,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -13818,7 +13976,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -13829,7 +13987,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -13840,7 +13998,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -13851,7 +14009,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -13862,14 +14020,14 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-numerics" @@ -13883,13 +14041,13 @@ dependencies = [ [[package]] name = "windows-registry" -version = "0.4.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" dependencies = [ + "windows-link", "windows-result 0.3.4", - "windows-strings 0.3.1", - "windows-targets 0.53.0", + "windows-strings 0.4.2", ] [[package]] @@ -13929,15 +14087,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-strings" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" -dependencies = [ - "windows-link", -] - [[package]] name = "windows-strings" version = "0.4.2" @@ -13983,6 +14132,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -14031,9 +14189,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.0" +version = "0.53.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" dependencies = [ "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", @@ -14236,9 +14394,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.10" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" dependencies = [ "memchr", ] @@ -14282,9 +14440,9 @@ checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "ws_stream_wasm" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" +checksum = "6c173014acad22e83f16403ee360115b38846fe754e735c5d9d3803fe70c6abc" dependencies = [ "async_io_stream", "futures", @@ -14293,7 +14451,7 @@ dependencies = [ "pharos", "rustc_version 0.4.1", "send_wrapper 0.6.0", - "thiserror 1.0.69", + "thiserror 2.0.12", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -14310,12 +14468,12 @@ dependencies = [ [[package]] name = "xattr" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" +checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909" dependencies = [ "libc", - "rustix 1.0.7", + "rustix 1.0.8", ] [[package]] @@ -14356,7 +14514,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "synstructure", ] @@ -14368,28 +14526,28 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -14409,7 +14567,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "synstructure", ] @@ -14430,7 +14588,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -14474,7 +14632,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -14485,7 +14643,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c4eb0fc6cd3..7db7095d16f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.5.0" +version = "1.6.0" edition = "2021" rust-version = "1.86" license = "MIT OR Apache-2.0" @@ -11,7 +11,7 @@ exclude = [".github/"] members = [ "bin/reth-bench/", "bin/reth/", - "crates/alloy-provider/", + "crates/storage/rpc-provider/", "crates/chain-state/", "crates/chainspec/", "crates/cli/cli/", @@ -67,6 +67,7 @@ members = [ "crates/node/api/", "crates/node/builder/", "crates/node/core/", + "crates/node/ethstats", "crates/node/events/", "crates/node/metrics", "crates/node/types", @@ -105,6 +106,7 @@ members = [ "crates/rpc/rpc-layer", "crates/rpc/rpc-server-types/", "crates/rpc/rpc-testing-util/", + "crates/rpc/rpc-e2e-tests/", "crates/rpc/rpc-convert/", "crates/rpc/rpc/", "crates/scroll/alloy/consensus", @@ -188,7 +190,7 @@ members = [ "crates/tracing-otlp", ] default-members = ["bin/reth"] -exclude = ["book/sources", "book/cli"] +exclude = ["docs/cli"] # Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 # https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html @@ -340,7 +342,7 @@ codegen-units = 1 # reth op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } -reth-alloy-provider = { path = "crates/alloy-provider" } +reth-storage-rpc-provider = { path = "crates/storage/rpc-provider" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-bench = { path = "bin/reth-bench" } reth-chain-state = { path = "crates/chain-state" } @@ -410,6 +412,7 @@ reth-node-api = { path = "crates/node/api" } reth-node-builder = { path = "crates/node/builder" } reth-node-core = { path = "crates/node/core" } reth-node-ethereum = { path = "crates/ethereum/node" } +reth-node-ethstats = { path = "crates/node/ethstats" } reth-node-events = { path = "crates/node/events" } reth-node-metrics = { path = "crates/node/metrics" } reth-optimism-node = { path = "crates/optimism/node" } @@ -439,6 +442,7 @@ reth-rpc = { path = "crates/rpc/rpc" } reth-rpc-api = { path = "crates/rpc/rpc-api" } reth-rpc-api-testing-util = { path = "crates/rpc/rpc-testing-util" } reth-rpc-builder = { path = "crates/rpc/rpc-builder" } +reth-rpc-e2e-tests = { path = "crates/rpc/rpc-e2e-tests" } reth-rpc-engine-api = { path = "crates/rpc/rpc-engine-api" } reth-rpc-eth-api = { path = "crates/rpc/rpc-eth-api" } reth-rpc-eth-types = { path = "crates/rpc/rpc-eth-types", default-features = false } @@ -469,60 +473,60 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false, features = ["enable_eip7702"] } -revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", branch = "main", default-features = false } -revm-inspectors = "0.25.0" +revm = { git = "https://github.com/scroll-tech/revm", default-features = false, features = ["enable_eip7702", "enable_eip7623"] } +revm-bytecode = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-database = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-state = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-primitives = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-interpreter = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-inspector = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-context = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-context-interface = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-database-interface = { git = "https://github.com/scroll-tech/revm", default-features = false } +op-revm = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", default-features = false } +revm-inspectors = "0.27.1" # eth -alloy-chains = { version = "0.2.0", default-features = false } -alloy-dyn-abi = "1.2.0" +alloy-chains = { version = "0.2.5", default-features = false } +alloy-dyn-abi = "1.3.0" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.12", default-features = false } -alloy-primitives = { version = "1.2.0", default-features = false, features = ["map-foldhash"] } +alloy-evm = { version = "0.16", default-features = false } +alloy-primitives = { version = "1.3.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } -alloy-sol-macro = "1.2.0" -alloy-sol-types = { version = "1.2.0", default-features = false } +alloy-sol-macro = "1.3.0" +alloy-sol-types = { version = "1.3.0", default-features = false } alloy-trie = { version = "0.9.0", default-features = false } alloy-hardforks = "0.2.7" -alloy-consensus = { version = "1.0.13", default-features = false } -alloy-contract = { version = "1.0.13", default-features = false } -alloy-eips = { version = "1.0.13", default-features = false } -alloy-genesis = { version = "1.0.13", default-features = false } -alloy-json-rpc = { version = "1.0.13", default-features = false } -alloy-network = { version = "1.0.13", default-features = false } -alloy-network-primitives = { version = "1.0.13", default-features = false } -alloy-provider = { version = "1.0.13", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.13", default-features = false } -alloy-rpc-client = { version = "1.0.13", default-features = false } -alloy-rpc-types = { version = "1.0.13", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.13", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.13", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.13", default-features = false } -alloy-rpc-types-debug = { version = "1.0.13", default-features = false } -alloy-rpc-types-engine = { version = "1.0.13", default-features = false } -alloy-rpc-types-eth = { version = "1.0.13", default-features = false } -alloy-rpc-types-mev = { version = "1.0.13", default-features = false } -alloy-rpc-types-trace = { version = "1.0.13", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.13", default-features = false } -alloy-serde = { version = "1.0.13", default-features = false } -alloy-signer = { version = "1.0.13", default-features = false } -alloy-signer-local = { version = "1.0.13", default-features = false } -alloy-transport = { version = "1.0.13" } -alloy-transport-http = { version = "1.0.13", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.13", default-features = false } -alloy-transport-ws = { version = "1.0.13", default-features = false } +alloy-consensus = { version = "1.0.23", default-features = false } +alloy-contract = { version = "1.0.23", default-features = false } +alloy-eips = { version = "1.0.23", default-features = false } +alloy-genesis = { version = "1.0.23", default-features = false } +alloy-json-rpc = { version = "1.0.23", default-features = false } +alloy-network = { version = "1.0.23", default-features = false } +alloy-network-primitives = { version = "1.0.23", default-features = false } +alloy-provider = { version = "1.0.23", features = ["reqwest"], default-features = false } +alloy-pubsub = { version = "1.0.23", default-features = false } +alloy-rpc-client = { version = "1.0.23", default-features = false } +alloy-rpc-types = { version = "1.0.23", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.0.23", default-features = false } +alloy-rpc-types-anvil = { version = "1.0.23", default-features = false } +alloy-rpc-types-beacon = { version = "1.0.23", default-features = false } +alloy-rpc-types-debug = { version = "1.0.23", default-features = false } +alloy-rpc-types-engine = { version = "1.0.23", default-features = false } +alloy-rpc-types-eth = { version = "1.0.23", default-features = false } +alloy-rpc-types-mev = { version = "1.0.23", default-features = false } +alloy-rpc-types-trace = { version = "1.0.23", default-features = false } +alloy-rpc-types-txpool = { version = "1.0.23", default-features = false } +alloy-serde = { version = "1.0.23", default-features = false } +alloy-signer = { version = "1.0.23", default-features = false } +alloy-signer-local = { version = "1.0.23", default-features = false } +alloy-transport = { version = "1.0.23" } +alloy-transport-http = { version = "1.0.23", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.0.23", default-features = false } +alloy-transport-ws = { version = "1.0.23", default-features = false } # scroll scroll-alloy-consensus = { path = "crates/scroll/alloy/consensus", default-features = false } @@ -544,20 +548,19 @@ reth-scroll-primitives = { path = "crates/scroll/primitives", default-features = reth-scroll-rpc = { path = "crates/scroll/rpc" } reth-scroll-trie = { path = "crates/scroll/trie" } reth-scroll-txpool = { path = "crates/scroll/txpool" } -# TODO (scroll): point to crates.io/tag once the crate is published/a tag is created. -poseidon-bn254 = { git = "https://github.com/scroll-tech/poseidon-bn254", rev = "526a64a", features = ["bn254"] } # op -alloy-op-evm = { version = "0.12", default-features = false } +alloy-op-evm = { version = "0.16", default-features = false } alloy-op-hardforks = "0.2.2" -op-alloy-rpc-types = { version = "0.18.7", default-features = false } -op-alloy-rpc-types-engine = { version = "0.18.7", default-features = false } -op-alloy-network = { version = "0.18.7", default-features = false } -op-alloy-consensus = { version = "0.18.7", default-features = false } -op-alloy-rpc-jsonrpsee = { version = "0.18.7", default-features = false } +op-alloy-rpc-types = { version = "0.18.12", default-features = false } +op-alloy-rpc-types-engine = { version = "0.18.12", default-features = false } +op-alloy-network = { version = "0.18.12", default-features = false } +op-alloy-consensus = { version = "0.18.12", default-features = false } +op-alloy-rpc-jsonrpsee = { version = "0.18.12", default-features = false } op-alloy-flz = { version = "0.13.1", default-features = false } # misc +either = { version = "1.15.0", default-features = false } aquamarine = "0.6" auto_impl = { version = "1", default-features = false } backon = { version = "1.2", default-features = false, features = ["std-blocking-sleep", "tokio-sleep"] } @@ -609,6 +612,7 @@ byteorder = "1" mini-moka = "0.10" tar-no-std = { version = "0.3.2", default-features = false } miniz_oxide = { version = "0.8.4", default-features = false } +chrono = "0.4.41" # metrics metrics = "0.24.0" @@ -624,6 +628,7 @@ quote = "1.0" # tokio tokio = { version = "1.44.2", default-features = false } tokio-stream = "0.1.11" +tokio-tungstenite = "0.26.2" tokio-util = { version = "0.7.4", features = ["codec"] } # async @@ -749,8 +754,8 @@ walkdir = "2.3.3" vergen-git2 = "1.0.5" [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" } -op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" } +revm = { git = "https://github.com/scroll-tech/revm" } +op-revm = { git = "https://github.com/scroll-tech/revm" } # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } diff --git a/HARDFORK-CHECKLIST.md b/HARDFORK-CHECKLIST.md index 3e3628f0b4c..0b6361221bb 100644 --- a/HARDFORK-CHECKLIST.md +++ b/HARDFORK-CHECKLIST.md @@ -30,7 +30,7 @@ Opstack tries to be as close to the L1 engine API as much as possible. Isthmus (Prague equivalent) introduced the first deviation from the L1 engine API with an additional field in the `ExecutionPayload`. For this reason the op engine API -has it's own server traits `OpEngineApi`. +has its own server traits `OpEngineApi`. Adding a new versioned endpoint requires the same changes as for L1 just for the dedicated OP types. ### Hardforks diff --git a/Makefile b/Makefile index e7c73d44837..010c1897cfd 100644 --- a/Makefile +++ b/Makefile @@ -452,12 +452,12 @@ lint-udeps: --exclude reth-scroll-node --exclude "scroll-reth" --exclude reth-scroll-rpc \ --exclude reth-scroll-trie -lint-codespell: ensure-codespell - codespell --skip "*.json" --skip "./testing/ef-tests/ethereum-tests" +lint-typos: ensure-typos + typos -ensure-codespell: - @if ! command -v codespell &> /dev/null; then \ - echo "codespell not found. Please install it by running the command `pip install codespell` or refer to the following link for more information: https://github.com/codespell-project/codespell" \ +ensure-typos: + @if ! command -v typos &> /dev/null; then \ + echo "typos not found. Please install it by running the command `cargo install typos-cli` or refer to the following link for more information: https://github.com/crate-ci/typos" \ exit 1; \ fi @@ -483,7 +483,7 @@ ensure-dprint: lint: make fmt && \ make clippy && \ - make lint-codespell && \ + make lint-typos && \ make lint-toml clippy-fix: diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 640c582b7f4..891fa4f9780 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -35,6 +35,7 @@ alloy-transport-ipc.workspace = true alloy-transport-ws.workspace = true alloy-transport.workspace = true op-alloy-consensus = { workspace = true, features = ["alloy-compat"] } +op-alloy-rpc-types-engine = { workspace = true, features = ["serde"] } # reqwest reqwest = { workspace = true, default-features = false, features = ["rustls-tls-native-roots"] } @@ -64,7 +65,6 @@ humantime.workspace = true csv.workspace = true [dev-dependencies] -reth-tracing.workspace = true [features] default = ["jemalloc"] diff --git a/bin/reth-bench/README.md b/bin/reth-bench/README.md index 3f7ae7f0377..9d8a04f8deb 100644 --- a/bin/reth-bench/README.md +++ b/bin/reth-bench/README.md @@ -49,7 +49,7 @@ reth stage unwind to-block 21000000 The following `reth-bench` command would then start the benchmark at block 21,000,000: ```bash -reth-bench new-payload-fcu --rpc-url --from 21000000 --to --jwtsecret +reth-bench new-payload-fcu --rpc-url --from 21000000 --to --jwt-secret ``` Finally, make sure that reth is built using a build profile suitable for what you are trying to measure. @@ -80,11 +80,11 @@ RUSTFLAGS="-C target-cpu=native" cargo build --profile profiling --no-default-fe ### Run the Benchmark: First, start the reth node. Here is an example that runs `reth` compiled with the `profiling` profile, runs `samply`, and configures `reth` to run with metrics enabled: ```bash -samply record -p 3001 target/profiling/reth node --metrics localhost:9001 --authrpc.jwtsecret +samply record -p 3001 target/profiling/reth node --metrics localhost:9001 --authrpc.jwt-secret ``` ```bash -reth-bench new-payload-fcu --rpc-url --from --to --jwtsecret +reth-bench new-payload-fcu --rpc-url --from --to --jwt-secret ``` Replace ``, ``, and `` with the appropriate values for your testing environment. `` should be the URL of an RPC endpoint that can provide the blocks that will be used during the execution. diff --git a/bin/reth-bench/scripts/compare_newpayload_latency.py b/bin/reth-bench/scripts/compare_newpayload_latency.py index ff9cdad5262..f434d034b9a 100755 --- a/bin/reth-bench/scripts/compare_newpayload_latency.py +++ b/bin/reth-bench/scripts/compare_newpayload_latency.py @@ -3,7 +3,7 @@ # requires-python = ">=3.8" # dependencies = [ # "pandas", -# "matplotlib", +# "matplotlib", # "numpy", # ] # /// @@ -16,6 +16,8 @@ # # - A simple line graph plotting the latencies of the two files against each # other. +# +# - A gas per second (gas/s) chart showing throughput over time. import argparse @@ -23,15 +25,82 @@ import matplotlib.pyplot as plt import numpy as np import sys +import os +from matplotlib.ticker import FuncFormatter + +def get_output_filename(base_path, suffix=None): + """Generate output filename with optional suffix.""" + if suffix is None: + return base_path + + # Split the base path into directory, name, and extension + dir_name = os.path.dirname(base_path) + base_name = os.path.basename(base_path) + name, ext = os.path.splitext(base_name) + + # Create new filename with suffix + new_name = f"{name}_{suffix}{ext}" + return os.path.join(dir_name, new_name) if dir_name else new_name + +def format_gas_units(value, pos): + """Format gas values with appropriate units (gas, Kgas, Mgas, Ggas, Tgas).""" + if value == 0: + return '0' + + # Define unit thresholds and labels + units = [ + (1e12, 'Tgas'), # Teragas + (1e9, 'Ggas'), # Gigagas + (1e6, 'Mgas'), # Megagas + (1e3, 'Kgas'), # Kilogas + (1, 'gas') # gas + ] + + abs_value = abs(value) + for threshold, unit in units: + if abs_value >= threshold: + scaled_value = value / threshold + # Format with appropriate precision + if scaled_value >= 100: + return f'{scaled_value:.0f}{unit}/s' + elif scaled_value >= 10: + return f'{scaled_value:.1f}{unit}/s' + else: + return f'{scaled_value:.2f}{unit}/s' + + return f'{value:.0f}gas/s' + +def moving_average(data, window_size): + """Calculate moving average with given window size.""" + if window_size <= 1: + return data + + # Use pandas for efficient rolling mean calculation + series = pd.Series(data) + return series.rolling(window=window_size, center=True, min_periods=1).mean().values def main(): parser = argparse.ArgumentParser(description='Generate histogram of total_latency percent differences between two CSV files') parser.add_argument('baseline_csv', help='First CSV file, used as the baseline/control') parser.add_argument('comparison_csv', help='Second CSV file, which is being compared to the baseline') parser.add_argument('-o', '--output', default='latency.png', help='Output image file (default: latency.png)') + parser.add_argument('--graphs', default='all', help='Comma-separated list of graphs to plot: histogram, line, gas, all (default: all)') + parser.add_argument('--average', type=int, metavar='N', help='Apply moving average over N blocks to smooth line and gas charts') + parser.add_argument('--separate', action='store_true', help='Output each chart as a separate file') args = parser.parse_args() + # Parse graph selection + if args.graphs.lower() == 'all': + selected_graphs = {'histogram', 'line', 'gas'} + else: + selected_graphs = set(graph.strip().lower() for graph in args.graphs.split(',')) + valid_graphs = {'histogram', 'line', 'gas'} + invalid_graphs = selected_graphs - valid_graphs + if invalid_graphs: + print(f"Error: Invalid graph types: {', '.join(invalid_graphs)}. Valid options are: histogram, line, gas, all", file=sys.stderr) + sys.exit(1) + try: df1 = pd.read_csv(args.baseline_csv) df2 = pd.read_csv(args.comparison_csv) @@ -50,14 +119,24 @@ def main(): print(f"Error: 'total_latency' column not found in {args.comparison_csv}", file=sys.stderr) sys.exit(1) + # Check for gas_used column if gas graph is selected + if 'gas' in selected_graphs: + if 'gas_used' not in df1.columns: + print(f"Error: 'gas_used' column not found in {args.baseline_csv} (required for gas graph)", file=sys.stderr) + sys.exit(1) + if 'gas_used' not in df2.columns: + print(f"Error: 'gas_used' column not found in {args.comparison_csv} (required for gas graph)", file=sys.stderr) + sys.exit(1) + if len(df1) != len(df2): print("Warning: CSV files have different number of rows. Using minimum length.", file=sys.stderr) min_len = min(len(df1), len(df2)) df1 = df1.head(min_len) df2 = df2.head(min_len) - latency1 = df1['total_latency'].values - latency2 = df2['total_latency'].values + # Convert from microseconds to milliseconds for better readability + latency1 = df1['total_latency'].values / 1000.0 + latency2 = df2['total_latency'].values / 1000.0 # Handle division by zero with np.errstate(divide='ignore', invalid='ignore'): @@ -70,54 +149,220 @@ def main(): print("Error: No valid percent differences could be calculated", file=sys.stderr) sys.exit(1) - # Create histogram with 1% buckets - min_diff = np.floor(percent_diff.min()) - max_diff = np.ceil(percent_diff.max()) + # Calculate statistics once for use in graphs and output + mean_diff = np.mean(percent_diff) + median_diff = np.median(percent_diff) - bins = np.arange(min_diff, max_diff + 1, 1) + # Determine number of subplots and create figure + num_plots = len(selected_graphs) + if num_plots == 0: + print("Error: No valid graphs selected", file=sys.stderr) + sys.exit(1) - # Create figure with two subplots - fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12)) + # Store output filenames + output_files = [] + + if args.separate: + # We'll create individual figures for each graph + pass + else: + # Create combined figure + if num_plots == 1: + fig, ax = plt.subplots(1, 1, figsize=(12, 6)) + axes = [ax] + else: + fig, axes = plt.subplots(num_plots, 1, figsize=(12, 6 * num_plots)) - # Top subplot: Histogram - ax1.hist(percent_diff, bins=bins, edgecolor='black', alpha=0.7) - ax1.set_xlabel('Percent Difference (%)') - ax1.set_ylabel('Number of Blocks') - ax1.set_title(f'Total Latency Percent Difference Histogram\n({args.baseline_csv} vs {args.comparison_csv})') - ax1.grid(True, alpha=0.3) + plot_idx = 0 - # Add statistics to the histogram - mean_diff = np.mean(percent_diff) - median_diff = np.median(percent_diff) - ax1.axvline(mean_diff, color='red', linestyle='--', label=f'Mean: {mean_diff:.2f}%') - ax1.axvline(median_diff, color='orange', linestyle='--', label=f'Median: {median_diff:.2f}%') - ax1.legend() - - # Bottom subplot: Latency vs Block Number - if 'block_number' in df1.columns and 'block_number' in df2.columns: - block_numbers = df1['block_number'].values[:len(percent_diff)] - ax2.plot(block_numbers, latency1[:len(percent_diff)], 'b-', alpha=0.7, label=f'Baseline ({args.baseline_csv})') - ax2.plot(block_numbers, latency2[:len(percent_diff)], 'r-', alpha=0.7, label=f'Comparison ({args.comparison_csv})') - ax2.set_xlabel('Block Number') - ax2.set_ylabel('Total Latency (ms)') - ax2.set_title('Total Latency vs Block Number') - ax2.grid(True, alpha=0.3) - ax2.legend() + # Plot histogram if selected + if 'histogram' in selected_graphs: + if args.separate: + fig, ax = plt.subplots(1, 1, figsize=(12, 6)) + else: + ax = axes[plot_idx] + + min_diff = np.floor(percent_diff.min()) + max_diff = np.ceil(percent_diff.max()) + + # Create histogram with 1% buckets + bins = np.arange(min_diff, max_diff + 1, 1) + + ax.hist(percent_diff, bins=bins, edgecolor='black', alpha=0.7) + ax.set_xlabel('Percent Difference (%)') + ax.set_ylabel('Number of Blocks') + ax.set_title(f'Total Latency Percent Difference Histogram\n({args.baseline_csv} vs {args.comparison_csv})') + ax.grid(True, alpha=0.3) + + # Add statistics to the histogram + ax.axvline(mean_diff, color='red', linestyle='--', label=f'Mean: {mean_diff:.2f}%') + ax.axvline(median_diff, color='orange', linestyle='--', label=f'Median: {median_diff:.2f}%') + ax.legend() + + if args.separate: + plt.tight_layout() + output_file = get_output_filename(args.output, 'histogram') + plt.savefig(output_file, dpi=300, bbox_inches='tight') + output_files.append(output_file) + plt.close(fig) + else: + plot_idx += 1 + + # Plot line graph if selected + if 'line' in selected_graphs: + if args.separate: + fig, ax = plt.subplots(1, 1, figsize=(12, 6)) + else: + ax = axes[plot_idx] + + # Determine comparison color based on median change. The median being + # negative means processing time got faster, so that becomes green. + comparison_color = 'green' if median_diff < 0 else 'red' + + # Apply moving average if requested + plot_latency1 = latency1[:len(percent_diff)] + plot_latency2 = latency2[:len(percent_diff)] + + if args.average: + plot_latency1 = moving_average(plot_latency1, args.average) + plot_latency2 = moving_average(plot_latency2, args.average) + if 'block_number' in df1.columns and 'block_number' in df2.columns: + block_numbers = df1['block_number'].values[:len(percent_diff)] + ax.plot(block_numbers, plot_latency1, 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax.plot(block_numbers, plot_latency2, comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax.set_xlabel('Block Number') + ax.set_ylabel('Total Latency (ms)') + title = 'Total Latency vs Block Number' + if args.average: + title += f' ({args.average}-block moving average)' + ax.set_title(title) + ax.grid(True, alpha=0.3) + ax.legend() + else: + # If no block_number column, use index + indices = np.arange(len(percent_diff)) + ax.plot(indices, plot_latency1, 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax.plot(indices, plot_latency2, comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax.set_xlabel('Block Index') + ax.set_ylabel('Total Latency (ms)') + title = 'Total Latency vs Block Index' + if args.average: + title += f' ({args.average}-block moving average)' + ax.set_title(title) + ax.grid(True, alpha=0.3) + ax.legend() + + if args.separate: + plt.tight_layout() + output_file = get_output_filename(args.output, 'line') + plt.savefig(output_file, dpi=300, bbox_inches='tight') + output_files.append(output_file) + plt.close(fig) + else: + plot_idx += 1 + + # Plot gas/s graph if selected + if 'gas' in selected_graphs: + if args.separate: + fig, ax = plt.subplots(1, 1, figsize=(12, 6)) + else: + ax = axes[plot_idx] + + # Calculate gas per second (gas/s) + # latency is in microseconds, so convert to seconds for gas/s calculation + gas1 = df1['gas_used'].values[:len(percent_diff)] + gas2 = df2['gas_used'].values[:len(percent_diff)] + + # Convert latency from microseconds to seconds + latency1_sec = df1['total_latency'].values[:len(percent_diff)] / 1_000_000.0 + latency2_sec = df2['total_latency'].values[:len(percent_diff)] / 1_000_000.0 + + # Calculate gas per second + gas_per_sec1 = gas1 / latency1_sec + gas_per_sec2 = gas2 / latency2_sec + + # Store original values for statistics before averaging + original_gas_per_sec1 = gas_per_sec1.copy() + original_gas_per_sec2 = gas_per_sec2.copy() + + # Apply moving average if requested + if args.average: + gas_per_sec1 = moving_average(gas_per_sec1, args.average) + gas_per_sec2 = moving_average(gas_per_sec2, args.average) + + # Calculate median gas/s for color determination (use original values) + median_gas_per_sec1 = np.median(original_gas_per_sec1) + median_gas_per_sec2 = np.median(original_gas_per_sec2) + comparison_color = 'green' if median_gas_per_sec2 > median_gas_per_sec1 else 'red' + + if 'block_number' in df1.columns and 'block_number' in df2.columns: + block_numbers = df1['block_number'].values[:len(percent_diff)] + ax.plot(block_numbers, gas_per_sec1, 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax.plot(block_numbers, gas_per_sec2, comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax.set_xlabel('Block Number') + ax.set_ylabel('Gas Throughput') + title = 'Gas Throughput vs Block Number' + if args.average: + title += f' ({args.average}-block moving average)' + ax.set_title(title) + ax.grid(True, alpha=0.3) + ax.legend() + + # Format Y-axis with gas units + formatter = FuncFormatter(format_gas_units) + ax.yaxis.set_major_formatter(formatter) + else: + # If no block_number column, use index + indices = np.arange(len(percent_diff)) + ax.plot(indices, gas_per_sec1, 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax.plot(indices, gas_per_sec2, comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax.set_xlabel('Block Index') + ax.set_ylabel('Gas Throughput') + title = 'Gas Throughput vs Block Index' + if args.average: + title += f' ({args.average}-block moving average)' + ax.set_title(title) + ax.grid(True, alpha=0.3) + ax.legend() + + # Format Y-axis with gas units + formatter = FuncFormatter(format_gas_units) + ax.yaxis.set_major_formatter(formatter) + + if args.separate: + plt.tight_layout() + output_file = get_output_filename(args.output, 'gas') + plt.savefig(output_file, dpi=300, bbox_inches='tight') + output_files.append(output_file) + plt.close(fig) + else: + plot_idx += 1 + + # Save combined figure if not using separate files + if not args.separate: + plt.tight_layout() + plt.savefig(args.output, dpi=300, bbox_inches='tight') + output_files.append(args.output) + + # Create graph type description for output message + graph_types = [] + if 'histogram' in selected_graphs: + graph_types.append('histogram') + if 'line' in selected_graphs: + graph_types.append('latency graph') + if 'gas' in selected_graphs: + graph_types.append('gas/s graph') + graph_desc = ' and '.join(graph_types) + + # Print output file(s) information + if args.separate: + print(f"Saved {len(output_files)} separate files:") + for output_file in output_files: + print(f" - {output_file}") else: - # If no block_number column, use index - indices = np.arange(len(percent_diff)) - ax2.plot(indices, latency1[:len(percent_diff)], 'b-', alpha=0.7, label=f'Baseline ({args.baseline_csv})') - ax2.plot(indices, latency2[:len(percent_diff)], 'r-', alpha=0.7, label=f'Comparison ({args.comparison_csv})') - ax2.set_xlabel('Block Index') - ax2.set_ylabel('Total Latency (ms)') - ax2.set_title('Total Latency vs Block Index') - ax2.grid(True, alpha=0.3) - ax2.legend() - - plt.tight_layout() - plt.savefig(args.output, dpi=300, bbox_inches='tight') - print(f"Histogram and latency graph saved to {args.output}") + print(f"{graph_desc.capitalize()} saved to {args.output}") + # Always print statistics print(f"\nStatistics:") print(f"Mean percent difference: {mean_diff:.2f}%") print(f"Median percent difference: {median_diff:.2f}%") @@ -125,6 +370,15 @@ def main(): print(f"Min: {percent_diff.min():.2f}%") print(f"Max: {percent_diff.max():.2f}%") print(f"Total blocks analyzed: {len(percent_diff)}") + + # Print gas/s statistics if gas data is available + if 'gas' in selected_graphs: + # Use original values for statistics (not averaged) + print(f"\nGas/s Statistics:") + print(f"Baseline median gas/s: {median_gas_per_sec1:,.0f}") + print(f"Comparison median gas/s: {median_gas_per_sec2:,.0f}") + gas_diff_percent = ((median_gas_per_sec2 - median_gas_per_sec1) / median_gas_per_sec1) * 100 + print(f"Gas/s percent change: {gas_diff_percent:+.2f}%") if __name__ == '__main__': main() diff --git a/bin/reth-bench/src/bench/context.rs b/bin/reth-bench/src/bench/context.rs index e5b1b363449..c4006dc8155 100644 --- a/bin/reth-bench/src/bench/context.rs +++ b/bin/reth-bench/src/bench/context.rs @@ -3,6 +3,7 @@ use crate::{authenticated_transport::AuthenticatedTransportConnect, bench_mode::BenchMode}; use alloy_eips::BlockNumberOrTag; +use alloy_primitives::address; use alloy_provider::{network::AnyNetwork, Provider, RootProvider}; use alloy_rpc_client::ClientBuilder; use alloy_rpc_types_engine::JwtSecret; @@ -25,6 +26,8 @@ pub(crate) struct BenchContext { pub(crate) benchmark_mode: BenchMode, /// The next block to fetch. pub(crate) next_block: u64, + /// Whether the chain is an OP rollup. + pub(crate) is_optimism: bool, } impl BenchContext { @@ -33,17 +36,28 @@ impl BenchContext { pub(crate) async fn new(bench_args: &BenchmarkArgs, rpc_url: String) -> eyre::Result { info!("Running benchmark using data from RPC URL: {}", rpc_url); - // Ensure that output directory is a directory + // Ensure that output directory exists and is a directory if let Some(output) = &bench_args.output { if output.is_file() { return Err(eyre::eyre!("Output path must be a directory")); } + // Create the directory if it doesn't exist + if !output.exists() { + std::fs::create_dir_all(output)?; + info!("Created output directory: {:?}", output); + } } // set up alloy client for blocks let client = ClientBuilder::default().http(rpc_url.parse()?); let block_provider = RootProvider::::new(client); + // Check if this is an OP chain by checking code at a predeploy address. + let is_optimism = !block_provider + .get_code_at(address!("0x420000000000000000000000000000000000000F")) + .await? + .is_empty(); + // If neither `--from` nor `--to` are provided, we will run the benchmark continuously, // starting at the latest block. let mut benchmark_mode = BenchMode::new(bench_args.from, bench_args.to)?; @@ -52,7 +66,7 @@ impl BenchContext { let auth_jwt = bench_args .auth_jwtsecret .clone() - .ok_or_else(|| eyre::eyre!("--jwtsecret must be provided for authenticated RPC"))?; + .ok_or_else(|| eyre::eyre!("--jwt-secret must be provided for authenticated RPC"))?; // fetch jwt from file // @@ -94,6 +108,6 @@ impl BenchContext { }; let next_block = first_block.header.number + 1; - Ok(Self { auth_provider, block_provider, benchmark_mode, next_block }) + Ok(Self { auth_provider, block_provider, benchmark_mode, next_block, is_optimism }) } } diff --git a/bin/reth-bench/src/bench/mod.rs b/bin/reth-bench/src/bench/mod.rs index afc76b3b6ac..da3ccb1a8bb 100644 --- a/bin/reth-bench/src/bench/mod.rs +++ b/bin/reth-bench/src/bench/mod.rs @@ -38,7 +38,7 @@ pub enum Subcommands { /// /// One powerful use case is pairing this command with the `cast block` command, for example: /// - /// `cast block latest--full --json | reth-bench send-payload --rpc-url localhost:5000 + /// `cast block latest --full --json | reth-bench send-payload --rpc-url localhost:5000 /// --jwt-secret $(cat ~/.local/share/reth/mainnet/jwt.hex)` SendPayload(send_payload::Command), } diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index 76166197a73..ac0ab66a864 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -9,10 +9,10 @@ use crate::{ GAS_OUTPUT_SUFFIX, }, }, - valid_payload::{call_forkchoice_updated, call_new_payload}, + valid_payload::{block_to_new_payload, call_forkchoice_updated, call_new_payload}, }; use alloy_provider::Provider; -use alloy_rpc_types_engine::{ExecutionPayload, ForkchoiceState}; +use alloy_rpc_types_engine::ForkchoiceState; use clap::Parser; use csv::Writer; use humantime::parse_duration; @@ -39,32 +39,23 @@ pub struct Command { impl Command { /// Execute `benchmark new-payload-fcu` command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = - BenchContext::new(&self.benchmark, self.rpc_url).await?; + let BenchContext { + benchmark_mode, + block_provider, + auth_provider, + mut next_block, + is_optimism, + } = BenchContext::new(&self.benchmark, self.rpc_url).await?; let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { let block_res = block_provider.get_block_by_number(next_block.into()).full().await; let block = block_res.unwrap().unwrap(); + let header = block.header.clone(); - let block = block - .into_inner() - .map_header(|header| header.map(|h| h.into_header_with_defaults())) - .try_map_transactions(|tx| { - // try to convert unknowns into op type so that we can also support optimism - tx.try_into_either::() - }) - .unwrap() - .into_consensus(); - - let blob_versioned_hashes = - block.body.blob_versioned_hashes_iter().copied().collect::>(); - - // Convert to execution payload - let (payload, sidecar) = ExecutionPayload::from_block_slow(&block); - let header = block.header; - let head_block_hash = payload.block_hash(); + let (version, params) = block_to_new_payload(block, is_optimism).unwrap(); + let head_block_hash = header.hash; let safe_block_hash = block_provider.get_block_by_number(header.number.saturating_sub(32).into()); @@ -81,9 +72,8 @@ impl Command { sender .send(( header, - blob_versioned_hashes, - payload, - sidecar, + version, + params, head_block_hash, safe_block_hash, finalized_block_hash, @@ -98,7 +88,7 @@ impl Command { let total_benchmark_duration = Instant::now(); let mut total_wait_time = Duration::ZERO; - while let Some((header, versioned_hashes, payload, sidecar, head, safe, finalized)) = { + while let Some((header, version, params, head, safe, finalized)) = { let wait_start = Instant::now(); let result = receiver.recv().await; total_wait_time += wait_start.elapsed(); @@ -118,19 +108,11 @@ impl Command { }; let start = Instant::now(); - let message_version = call_new_payload( - &auth_provider, - payload, - sidecar, - header.parent_beacon_block_root, - versioned_hashes, - ) - .await?; + call_new_payload(&auth_provider, version, params).await?; let new_payload_result = NewPayloadResult { gas_used, latency: start.elapsed() }; - call_forkchoice_updated(&auth_provider, message_version, forkchoice_state, None) - .await?; + call_forkchoice_updated(&auth_provider, version, forkchoice_state, None).await?; // calculate the total duration and the fcu latency, record let total_latency = start.elapsed(); diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 099ef8112e1..8dda7df4ecd 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -8,10 +8,9 @@ use crate::{ NEW_PAYLOAD_OUTPUT_SUFFIX, }, }, - valid_payload::call_new_payload, + valid_payload::{block_to_new_payload, call_new_payload}, }; use alloy_provider::Provider; -use alloy_rpc_types_engine::ExecutionPayload; use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; @@ -33,29 +32,25 @@ pub struct Command { impl Command { /// Execute `benchmark new-payload-only` command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = - BenchContext::new(&self.benchmark, self.rpc_url).await?; + let BenchContext { + benchmark_mode, + block_provider, + auth_provider, + mut next_block, + is_optimism, + } = BenchContext::new(&self.benchmark, self.rpc_url).await?; let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { let block_res = block_provider.get_block_by_number(next_block.into()).full().await; let block = block_res.unwrap().unwrap(); - let block = block - .into_inner() - .map_header(|header| header.map(|h| h.into_header_with_defaults())) - .try_map_transactions(|tx| { - tx.try_into_either::() - }) - .unwrap() - .into_consensus(); - - let blob_versioned_hashes = - block.body.blob_versioned_hashes_iter().copied().collect::>(); - let (payload, sidecar) = ExecutionPayload::from_block_slow(&block); + let header = block.header.clone(); + + let (version, params) = block_to_new_payload(block, is_optimism).unwrap(); next_block += 1; - sender.send((block.header, blob_versioned_hashes, payload, sidecar)).await.unwrap(); + sender.send((header, version, params)).await.unwrap(); } }); @@ -64,7 +59,7 @@ impl Command { let total_benchmark_duration = Instant::now(); let mut total_wait_time = Duration::ZERO; - while let Some((header, versioned_hashes, payload, sidecar)) = { + while let Some((header, version, params)) = { let wait_start = Instant::now(); let result = receiver.recv().await; total_wait_time += wait_start.elapsed(); @@ -73,7 +68,7 @@ impl Command { // just put gas used here let gas_used = header.gas_used; - let block_number = payload.block_number(); + let block_number = header.number; debug!( target: "reth-bench", @@ -82,14 +77,7 @@ impl Command { ); let start = Instant::now(); - call_new_payload( - &auth_provider, - payload, - sidecar, - header.parent_beacon_block_root, - versioned_hashes, - ) - .await?; + call_new_payload(&auth_provider, version, params).await?; let new_payload_result = NewPayloadResult { gas_used, latency: start.elapsed() }; info!(%new_payload_result); diff --git a/bin/reth-bench/src/valid_payload.rs b/bin/reth-bench/src/valid_payload.rs index e2f83a0ec25..d253506b22b 100644 --- a/bin/reth-bench/src/valid_payload.rs +++ b/bin/reth-bench/src/valid_payload.rs @@ -2,53 +2,20 @@ //! response. This is useful for benchmarking, as it allows us to wait for a payload to be valid //! before sending additional calls. -use alloy_eips::eip7685::RequestsOrHash; -use alloy_primitives::B256; -use alloy_provider::{ext::EngineApi, Network, Provider}; +use alloy_eips::eip7685::Requests; +use alloy_provider::{ext::EngineApi, network::AnyRpcBlock, Network, Provider}; use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1, - ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadAttributes, PayloadStatus, + ExecutionPayload, ExecutionPayloadInputV2, ForkchoiceState, ForkchoiceUpdated, + PayloadAttributes, PayloadStatus, }; use alloy_transport::TransportResult; +use op_alloy_rpc_types_engine::OpExecutionPayloadV4; use reth_node_api::EngineApiMessageVersion; use tracing::error; /// An extension trait for providers that implement the engine API, to wait for a VALID response. #[async_trait::async_trait] pub trait EngineApiValidWaitExt: Send + Sync { - /// Calls `engine_newPayloadV1` with the given [`ExecutionPayloadV1`], and waits until the - /// response is VALID. - async fn new_payload_v1_wait( - &self, - payload: ExecutionPayloadV1, - ) -> TransportResult; - - /// Calls `engine_newPayloadV2` with the given [`ExecutionPayloadInputV2`], and waits until the - /// response is VALID. - async fn new_payload_v2_wait( - &self, - payload: ExecutionPayloadInputV2, - ) -> TransportResult; - - /// Calls `engine_newPayloadV3` with the given [`ExecutionPayloadV3`], parent beacon block root, - /// and versioned hashes, and waits until the response is VALID. - async fn new_payload_v3_wait( - &self, - payload: ExecutionPayloadV3, - versioned_hashes: Vec, - parent_beacon_block_root: B256, - ) -> TransportResult; - - /// Calls `engine_newPayloadV4` with the given [`ExecutionPayloadV3`], parent beacon block root, - /// versioned hashes, and requests hash, and waits until the response is VALID. - async fn new_payload_v4_wait( - &self, - payload: ExecutionPayloadV3, - versioned_hashes: Vec, - parent_beacon_block_root: B256, - requests_hash: B256, - ) -> TransportResult; - /// Calls `engine_forkChoiceUpdatedV1` with the given [`ForkchoiceState`] and optional /// [`PayloadAttributes`], and waits until the response is VALID. async fn fork_choice_updated_v1_wait( @@ -80,122 +47,6 @@ where N: Network, P: Provider + EngineApi, { - async fn new_payload_v1_wait( - &self, - payload: ExecutionPayloadV1, - ) -> TransportResult { - let mut status = self.new_payload_v1(payload.clone()).await?; - while !status.is_valid() { - if status.is_invalid() { - error!(?status, ?payload, "Invalid newPayloadV1",); - panic!("Invalid newPayloadV1: {status:?}"); - } - status = self.new_payload_v1(payload.clone()).await?; - } - Ok(status) - } - - async fn new_payload_v2_wait( - &self, - payload: ExecutionPayloadInputV2, - ) -> TransportResult { - let mut status = self.new_payload_v2(payload.clone()).await?; - while !status.is_valid() { - if status.is_invalid() { - error!(?status, ?payload, "Invalid newPayloadV2",); - panic!("Invalid newPayloadV2: {status:?}"); - } - status = self.new_payload_v2(payload.clone()).await?; - } - Ok(status) - } - - async fn new_payload_v3_wait( - &self, - payload: ExecutionPayloadV3, - versioned_hashes: Vec, - parent_beacon_block_root: B256, - ) -> TransportResult { - let mut status = self - .new_payload_v3(payload.clone(), versioned_hashes.clone(), parent_beacon_block_root) - .await?; - while !status.is_valid() { - if status.is_invalid() { - error!( - ?status, - ?payload, - ?versioned_hashes, - ?parent_beacon_block_root, - "Invalid newPayloadV3", - ); - panic!("Invalid newPayloadV3: {status:?}"); - } - if status.is_syncing() { - return Err(alloy_json_rpc::RpcError::UnsupportedFeature( - "invalid range: no canonical state found for parent of requested block", - )) - } - status = self - .new_payload_v3(payload.clone(), versioned_hashes.clone(), parent_beacon_block_root) - .await?; - } - Ok(status) - } - - async fn new_payload_v4_wait( - &self, - payload: ExecutionPayloadV3, - versioned_hashes: Vec, - parent_beacon_block_root: B256, - requests_hash: B256, - ) -> TransportResult { - // We cannot use `self.new_payload_v4` because it does not support sending - // `RequestsOrHash::Hash` - - let mut status: PayloadStatus = self - .client() - .request( - "engine_newPayloadV4", - ( - payload.clone(), - versioned_hashes.clone(), - parent_beacon_block_root, - RequestsOrHash::Hash(requests_hash), - ), - ) - .await?; - while !status.is_valid() { - if status.is_invalid() { - error!( - ?status, - ?payload, - ?versioned_hashes, - ?parent_beacon_block_root, - "Invalid newPayloadV4", - ); - panic!("Invalid newPayloadV4: {status:?}"); - } - if status.is_syncing() { - return Err(alloy_json_rpc::RpcError::UnsupportedFeature( - "invalid range: no canonical state found for parent of requested block", - )) - } - status = self - .client() - .request( - "engine_newPayloadV4", - ( - payload.clone(), - versioned_hashes.clone(), - parent_beacon_block_root, - RequestsOrHash::Hash(requests_hash), - ), - ) - .await?; - } - Ok(status) - } - async fn fork_choice_updated_v1_wait( &self, fork_choice_state: ForkchoiceState, @@ -282,39 +133,60 @@ where } } -/// Calls the correct `engine_newPayload` method depending on the given [`ExecutionPayload`] and its -/// versioned variant. Returns the [`EngineApiMessageVersion`] depending on the payload's version. -/// -/// # Panics -/// If the given payload is a V3 payload, but a parent beacon block root is provided as `None`. -pub(crate) async fn call_new_payload>( - provider: P, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - parent_beacon_block_root: Option, - versioned_hashes: Vec, -) -> TransportResult { - match payload { +pub(crate) fn block_to_new_payload( + block: AnyRpcBlock, + is_optimism: bool, +) -> eyre::Result<(EngineApiMessageVersion, serde_json::Value)> { + let block = block + .into_inner() + .map_header(|header| header.map(|h| h.into_header_with_defaults())) + .try_map_transactions(|tx| { + // try to convert unknowns into op type so that we can also support optimism + tx.try_into_either::() + })? + .into_consensus(); + + // Convert to execution payload + let (payload, sidecar) = ExecutionPayload::from_block_slow(&block); + + let (version, params) = match payload { ExecutionPayload::V3(payload) => { - // We expect the caller to provide `parent_beacon_block_root` for V3 payloads. - let parent_beacon_block_root = parent_beacon_block_root - .expect("parent_beacon_block_root is required for V3 payloads and higher"); + let cancun = sidecar.cancun().unwrap(); - if let Some(requests_hash) = sidecar.requests_hash() { - provider - .new_payload_v4_wait( - payload, - versioned_hashes, - parent_beacon_block_root, - requests_hash, + if let Some(prague) = sidecar.prague() { + if is_optimism { + ( + EngineApiMessageVersion::V4, + serde_json::to_value(( + OpExecutionPayloadV4 { + payload_inner: payload, + withdrawals_root: block.withdrawals_root.unwrap(), + }, + cancun.versioned_hashes.clone(), + cancun.parent_beacon_block_root, + Requests::default(), + ))?, + ) + } else { + ( + EngineApiMessageVersion::V4, + serde_json::to_value(( + payload, + cancun.versioned_hashes.clone(), + cancun.parent_beacon_block_root, + prague.requests.requests_hash(), + ))?, ) - .await?; - Ok(EngineApiMessageVersion::V4) + } } else { - provider - .new_payload_v3_wait(payload, versioned_hashes, parent_beacon_block_root) - .await?; - Ok(EngineApiMessageVersion::V3) + ( + EngineApiMessageVersion::V3, + serde_json::to_value(( + payload, + cancun.versioned_hashes.clone(), + cancun.parent_beacon_block_root, + ))?, + ) } } ExecutionPayload::V2(payload) => { @@ -323,16 +195,43 @@ pub(crate) async fn call_new_payload>( withdrawals: Some(payload.withdrawals), }; - provider.new_payload_v2_wait(input).await?; - - Ok(EngineApiMessageVersion::V2) + (EngineApiMessageVersion::V2, serde_json::to_value((input,))?) } ExecutionPayload::V1(payload) => { - provider.new_payload_v1_wait(payload).await?; + (EngineApiMessageVersion::V1, serde_json::to_value((payload,))?) + } + }; - Ok(EngineApiMessageVersion::V1) + Ok((version, params)) +} + +/// Calls the correct `engine_newPayload` method depending on the given [`ExecutionPayload`] and its +/// versioned variant. Returns the [`EngineApiMessageVersion`] depending on the payload's version. +/// +/// # Panics +/// If the given payload is a V3 payload, but a parent beacon block root is provided as `None`. +pub(crate) async fn call_new_payload>( + provider: P, + version: EngineApiMessageVersion, + params: serde_json::Value, +) -> TransportResult<()> { + let method = version.method_name(); + + let mut status: PayloadStatus = provider.client().request(method, ¶ms).await?; + + while !status.is_valid() { + if status.is_invalid() { + error!(?status, ?params, "Invalid {method}",); + panic!("Invalid {method}: {status:?}"); + } + if status.is_syncing() { + return Err(alloy_json_rpc::RpcError::UnsupportedFeature( + "invalid range: no canonical state found for parent of requested block", + )) } + status = provider.client().request(method, ¶ms).await?; } + Ok(()) } /// Calls the correct `engine_forkchoiceUpdated` method depending on the given diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index fb940250033..a590f25810b 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -64,7 +64,6 @@ eyre.workspace = true [dev-dependencies] backon.workspace = true -similar-asserts.workspace = true tempfile.workspace = true [features] @@ -76,6 +75,7 @@ asm-keccak = [ "reth-node-core/asm-keccak", "reth-primitives/asm-keccak", "reth-ethereum-cli/asm-keccak", + "reth-node-ethereum/asm-keccak", ] jemalloc = [ diff --git a/crates/alloy-provider/README.md b/crates/alloy-provider/README.md deleted file mode 100644 index 37a75f1b328..00000000000 --- a/crates/alloy-provider/README.md +++ /dev/null @@ -1,60 +0,0 @@ -# Alloy Provider for Reth - -This crate provides an implementation of reth's `StateProviderFactory` and related traits that fetches state data via RPC instead of from a local database. - -Originally created by [cakevm](https://github.com/cakevm/alloy-reth-provider). - -## Features - -- Implements `StateProviderFactory` for remote RPC state access -- Supports Ethereum networks -- Useful for testing without requiring a full database -- Can be used with reth ExEx (Execution Extensions) for testing - -## Usage - -```rust -use alloy_provider::ProviderBuilder; -use reth_alloy_provider::AlloyRethProvider; -use reth_ethereum_node::EthereumNode; - -// Initialize provider -let provider = ProviderBuilder::new() - .builtin("https://eth.merkle.io") - .await - .unwrap(); - -// Create database provider with NodeTypes -let db_provider = AlloyRethProvider::new(provider, EthereumNode); - -// Get state at specific block -let state = db_provider.state_by_block_id(BlockId::number(16148323)).unwrap(); -``` - -## Configuration - -The provider can be configured with custom settings: - -```rust -use reth_alloy_provider::{AlloyRethProvider, AlloyRethProviderConfig}; -use reth_ethereum_node::EthereumNode; - -let config = AlloyRethProviderConfig { - compute_state_root: true, // Enable state root computation -}; - -let db_provider = AlloyRethProvider::new_with_config(provider, EthereumNode, config); -``` - -## Technical Details - -The provider uses `alloy_network::AnyNetwork` for network operations, providing compatibility with various Ethereum-based networks while maintaining the expected block structure with headers. - -## License - -Licensed under either of: - -- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. \ No newline at end of file diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 39a26f49378..be3b5a981d1 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -53,7 +53,6 @@ reth-primitives-traits = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true -alloy-consensus.workspace = true rand.workspace = true [features] diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 20f2a2a4c21..22fae8951d3 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -159,7 +159,7 @@ impl CanonicalInMemoryStateInner { } type PendingBlockAndReceipts = - (SealedBlock<::Block>, Vec>); + (RecoveredBlock<::Block>, Vec>); /// This type is responsible for providing the blocks, receipts, and state for /// all canonical blocks not on disk yet and keeps track of the block range that @@ -480,7 +480,7 @@ impl CanonicalInMemoryState { pub fn pending_block_and_receipts(&self) -> Option> { self.pending_state().map(|block_state| { ( - block_state.block_ref().recovered_block().sealed_block().clone(), + block_state.block_ref().recovered_block().clone(), block_state.executed_block_receipts(), ) }) @@ -1347,7 +1347,7 @@ mod tests { // Check the pending block and receipts assert_eq!( state.pending_block_and_receipts().unwrap(), - (block2.recovered_block().sealed_block().clone(), vec![]) + (block2.recovered_block().clone(), vec![]) ); } diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index 6d09d71c634..4d3c23117b3 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -35,7 +35,6 @@ derive_more.workspace = true alloy-trie = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } -alloy-genesis.workspace = true [features] default = ["std"] diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index afbc5c827c1..e51c2c9f07f 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,13 +1,14 @@ use crate::{ChainSpec, DepositContract}; use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; -use alloy_consensus::{BlockHeader, Header}; +use alloy_consensus::Header; use alloy_eips::{calc_next_block_base_fee, eip1559::BaseFeeParams, eip7840::BlobParams}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; use core::fmt::{Debug, Display}; use reth_ethereum_forks::EthereumHardforks; use reth_network_peers::NodeRecord; +use reth_primitives_traits::{AlloyBlockHeader, BlockHeader}; /// Trait representing type configuring a chain spec. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 7accf96fa3b..7b1faccfc4d 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -1052,9 +1052,9 @@ mod tests { "Expected fork ID {expected_id:?}, computed fork ID {computed_id:?} for hardfork {hardfork}" ); if matches!(hardfork, EthereumHardfork::Shanghai) { - if let Some(shangai_id) = spec.shanghai_fork_id() { + if let Some(shanghai_id) = spec.shanghai_fork_id() { assert_eq!( - expected_id, &shangai_id, + expected_id, &shanghai_id, "Expected fork ID {expected_id:?}, computed fork ID {computed_id:?} for Shanghai hardfork" ); } else { diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index b8e4d397697..06ceb9423c1 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -24,6 +24,7 @@ reth-db-common.workspace = true reth-downloaders.workspace = true reth-ecies.workspace = true reth-eth-wire.workspace = true +reth-era.workspace = true reth-era-downloader.workspace = true reth-era-utils.workspace = true reth-etl.workspace = true @@ -43,6 +44,7 @@ reth-ethereum-primitives = { workspace = true, optional = true } reth-provider.workspace = true reth-prune.workspace = true reth-prune-types = { workspace = true, optional = true } +reth-revm.workspace = true reth-stages.workspace = true reth-stages-types = { workspace = true, optional = true } reth-static-file-types = { workspace = true, features = ["clap"] } diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index be3bcec5a17..3249fc98113 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -232,7 +232,7 @@ where } /// Helper trait aggregating components required for the CLI. -pub trait CliNodeComponents { +pub trait CliNodeComponents: Send + Sync + 'static { /// Evm to use. type Evm: ConfigureEvm + 'static; /// Consensus implementation. @@ -260,3 +260,18 @@ where &self.1 } } + +/// Helper trait alias for an [`FnOnce`] producing [`CliNodeComponents`]. +pub trait CliComponentsBuilder: + FnOnce(Arc) -> Self::Components + Send + Sync + 'static +{ + type Components: CliNodeComponents; +} + +impl CliComponentsBuilder for F +where + F: FnOnce(Arc) -> Comp + Send + Sync + 'static, + Comp: CliNodeComponents, +{ + type Components = Comp; +} diff --git a/crates/cli/commands/src/download.rs b/crates/cli/commands/src/download.rs index 08c21d9eb83..2e33729e395 100644 --- a/crates/cli/commands/src/download.rs +++ b/crates/cli/commands/src/download.rs @@ -17,7 +17,7 @@ use tokio::task; use tracing::info; const BYTE_UNITS: [&str; 4] = ["B", "KB", "MB", "GB"]; -const MERKLE_BASE_URL: &str = "https://downloads.merkle.io"; +const MERKLE_BASE_URL: &str = "https://snapshots.merkle.io"; const EXTENSION_TAR_FILE: &str = ".tar.lz4"; #[derive(Debug, Parser)] @@ -32,7 +32,7 @@ pub struct DownloadCommand { long_help = "Specify a snapshot URL or let the command propose a default one.\n\ \n\ Available snapshot sources:\n\ - - https://downloads.merkle.io (default, mainnet archive)\n\ + - https://snapshots.merkle.io (default, mainnet archive)\n\ - https://publicnode.com/snapshots (full nodes & testnets)\n\ \n\ If no URL is provided, the latest mainnet archive snapshot\n\ diff --git a/crates/cli/commands/src/export_era.rs b/crates/cli/commands/src/export_era.rs new file mode 100644 index 00000000000..dbedf1852e5 --- /dev/null +++ b/crates/cli/commands/src/export_era.rs @@ -0,0 +1,109 @@ +//! Command exporting block data to convert them to ERA1 files. + +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use clap::{Args, Parser}; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_cli::chainspec::ChainSpecParser; +use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; +use reth_era_utils as era1; +use reth_provider::DatabaseProviderFactory; +use std::{path::PathBuf, sync::Arc}; +use tracing::info; + +// Default folder name for era1 export files +const ERA1_EXPORT_FOLDER_NAME: &str = "era1-export"; + +#[derive(Debug, Parser)] +pub struct ExportEraCommand { + #[command(flatten)] + env: EnvironmentArgs, + + #[clap(flatten)] + export: ExportArgs, +} + +#[derive(Debug, Args)] +pub struct ExportArgs { + /// Optional first block number to export from the db. + /// It is by default 0. + #[arg(long, value_name = "first-block-number", verbatim_doc_comment)] + first_block_number: Option, + /// Optional last block number to export from the db. + /// It is by default 8191. + #[arg(long, value_name = "last-block-number", verbatim_doc_comment)] + last_block_number: Option, + /// The maximum number of blocks per file, it can help you to decrease the size of the files. + /// Must be less than or equal to 8192. + #[arg(long, value_name = "max-blocks-per-file", verbatim_doc_comment)] + max_blocks_per_file: Option, + /// The directory path where to export era1 files. + /// The block data are read from the database. + #[arg(long, value_name = "EXPORT_ERA1_PATH", verbatim_doc_comment)] + path: Option, +} + +impl> ExportEraCommand { + /// Execute `export-era` command + pub async fn execute(self) -> eyre::Result<()> + where + N: CliNodeTypes, + { + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + // Either specified path or default to `//era1-export/` + let data_dir = match &self.export.path { + Some(path) => path.clone(), + None => self + .env + .datadir + .resolve_datadir(self.env.chain.chain()) + .data_dir() + .join(ERA1_EXPORT_FOLDER_NAME), + }; + + let export_config = era1::ExportConfig { + network: self.env.chain.chain().to_string(), + first_block_number: self.export.first_block_number.unwrap_or(0), + last_block_number: self + .export + .last_block_number + .unwrap_or(MAX_BLOCKS_PER_ERA1 as u64 - 1), + max_blocks_per_file: self + .export + .max_blocks_per_file + .unwrap_or(MAX_BLOCKS_PER_ERA1 as u64), + dir: data_dir, + }; + + export_config.validate()?; + + info!( + target: "reth::cli", + "Starting ERA1 block export: blocks {}-{} to {}", + export_config.first_block_number, + export_config.last_block_number, + export_config.dir.display() + ); + + // Only read access is needed for the database provider + let provider = provider_factory.database_provider_ro()?; + + let exported_files = era1::export(&provider, &export_config)?; + + info!( + target: "reth::cli", + "Successfully exported {} ERA1 files to {}", + exported_files.len(), + export_config.dir.display() + ); + + Ok(()) + } +} + +impl ExportEraCommand { + /// Returns the underlying chain being used to run this command + pub fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index eef67117063..05434de4c21 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -1,36 +1,16 @@ //! Command that initializes the node by importing a chain from a file. -use crate::common::{AccessRights, CliNodeComponents, CliNodeTypes, Environment, EnvironmentArgs}; -use alloy_primitives::B256; +use crate::{ + common::{AccessRights, CliNodeComponents, CliNodeTypes, Environment, EnvironmentArgs}, + import_op::{import_blocks_from_file, ImportConfig}, +}; use clap::Parser; -use futures::{Stream, StreamExt}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_config::Config; -use reth_consensus::{ConsensusError, FullConsensus}; -use reth_db_api::{tables, transaction::DbTx}; -use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, -}; -use reth_evm::ConfigureEvm; -use reth_network_p2p::{ - bodies::downloader::BodyDownloader, - headers::downloader::{HeaderDownloader, SyncTarget}, -}; -use reth_node_api::BlockTy; use reth_node_core::version::SHORT_VERSION; -use reth_node_events::node::NodeEvent; -use reth_provider::{ - providers::ProviderNodeTypes, BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderError, - ProviderFactory, StageCheckpointReader, -}; -use reth_prune::PruneModes; -use reth_stages::{prelude::*, Pipeline, StageId, StageSet}; -use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; -use tokio::sync::watch; -use tracing::{debug, error, info}; +use tracing::info; + +pub use crate::import_op::build_import_pipeline_impl as build_import_pipeline; /// Syncs RLP encoded blocks from a file. #[derive(Debug, Parser)] @@ -66,101 +46,29 @@ impl> ImportComm { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); - if self.no_state { - info!(target: "reth::cli", "Disabled stages requiring state"); - } - - debug!(target: "reth::cli", - chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), - "Chunking chain import" - ); - let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; let components = components(provider_factory.chain_spec()); - let executor = components.evm_config().clone(); - let consensus = Arc::new(components.consensus().clone()); - info!(target: "reth::cli", "Consensus engine initialized"); - - // open file - let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; - - let mut total_decoded_blocks = 0; - let mut total_decoded_txns = 0; - - let mut sealed_header = provider_factory - .sealed_header(provider_factory.last_block_number()?)? - .expect("should have genesis"); - - while let Some(file_client) = - reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? - { - // create a new FileClient from chunk read from file - info!(target: "reth::cli", - "Importing chain file chunk" - ); - - let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; - info!(target: "reth::cli", "Chain file chunk read"); - - total_decoded_blocks += file_client.headers_len(); - total_decoded_txns += file_client.total_transactions(); - - let (mut pipeline, events) = build_import_pipeline( - &config, - provider_factory.clone(), - &consensus, - Arc::new(file_client), - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), - self.no_state, - executor.clone(), - )?; - - // override the tip - pipeline.set_tip(tip); - debug!(target: "reth::cli", ?tip, "Tip manually set"); - - let provider = provider_factory.provider()?; - let latest_block_number = - provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); - tokio::spawn(reth_node_events::node::handle_events(None, latest_block_number, events)); + let import_config = ImportConfig { no_state: self.no_state, chunk_len: self.chunk_len }; - // Run pipeline - info!(target: "reth::cli", "Starting sync pipeline"); - tokio::select! { - res = pipeline.run() => res?, - _ = tokio::signal::ctrl_c() => {}, - } - - sealed_header = provider_factory - .sealed_header(provider_factory.last_block_number()?)? - .expect("should have genesis"); - } - - let provider = provider_factory.provider()?; + let executor = components.evm_config().clone(); + let consensus = Arc::new(components.consensus().clone()); - let total_imported_blocks = provider.tx_ref().entries::()?; - let total_imported_txns = provider.tx_ref().entries::()?; + let result = import_blocks_from_file( + &self.path, + import_config, + provider_factory, + &config, + executor, + consensus, + ) + .await?; - if total_decoded_blocks != total_imported_blocks || - total_decoded_txns != total_imported_txns - { - error!(target: "reth::cli", - total_decoded_blocks, - total_imported_blocks, - total_decoded_txns, - total_imported_txns, - "Chain was partially imported" - ); + if !result.is_complete() { + return Err(eyre::eyre!("Chain was partially imported")); } - info!(target: "reth::cli", - total_imported_blocks, - total_imported_txns, - "Chain file imported" - ); - Ok(()) } } @@ -172,82 +80,6 @@ impl ImportCommand { } } -/// Builds import pipeline. -/// -/// If configured to execute, all stages will run. Otherwise, only stages that don't require state -/// will run. -pub fn build_import_pipeline( - config: &Config, - provider_factory: ProviderFactory, - consensus: &Arc, - file_client: Arc>>, - static_file_producer: StaticFileProducer>, - disable_exec: bool, - evm_config: E, -) -> eyre::Result<(Pipeline, impl Stream>)> -where - N: ProviderNodeTypes, - C: FullConsensus + 'static, - E: ConfigureEvm + 'static, -{ - if !file_client.has_canonical_blocks() { - eyre::bail!("unable to import non canonical blocks"); - } - - // Retrieve latest header found in the database. - let last_block_number = provider_factory.last_block_number()?; - let local_head = provider_factory - .sealed_header(last_block_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(last_block_number.into()))?; - - let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(file_client.clone(), consensus.clone()) - .into_task(); - // TODO: The pipeline should correctly configure the downloader on its own. - // Find the possibility to remove unnecessary pre-configuration. - header_downloader.update_local_head(local_head); - header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); - - let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) - .build(file_client.clone(), consensus.clone(), provider_factory.clone()) - .into_task(); - // TODO: The pipeline should correctly configure the downloader on its own. - // Find the possibility to remove unnecessary pre-configuration. - body_downloader - .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) - .expect("failed to set download range"); - - let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - - let max_block = file_client.max_block().unwrap_or(0); - - let pipeline = Pipeline::builder() - .with_tip_sender(tip_tx) - // we want to sync all blocks the file client provides or 0 if empty - .with_max_block(max_block) - .with_fail_on_unwind(true) - .add_stages( - DefaultStages::new( - provider_factory.clone(), - tip_rx, - consensus.clone(), - header_downloader, - body_downloader, - evm_config, - config.stages.clone(), - PruneModes::default(), - None, - ) - .builder() - .disable_all_if(&StageId::STATE_REQUIRED, || disable_exec), - ) - .build(provider_factory, static_file_producer); - - let events = pipeline.events().map(Into::into); - - Ok((pipeline, events)) -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/cli/commands/src/import_op.rs b/crates/cli/commands/src/import_op.rs new file mode 100644 index 00000000000..c3adec10200 --- /dev/null +++ b/crates/cli/commands/src/import_op.rs @@ -0,0 +1,254 @@ +//! Core import functionality without CLI dependencies. + +use alloy_primitives::B256; +use futures::StreamExt; +use reth_config::Config; +use reth_consensus::FullConsensus; +use reth_db_api::{tables, transaction::DbTx}; +use reth_downloaders::{ + bodies::bodies::BodiesDownloaderBuilder, + file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, + headers::reverse_headers::ReverseHeadersDownloaderBuilder, +}; +use reth_evm::ConfigureEvm; +use reth_network_p2p::{ + bodies::downloader::BodyDownloader, + headers::downloader::{HeaderDownloader, SyncTarget}, +}; +use reth_node_api::BlockTy; +use reth_node_events::node::NodeEvent; +use reth_provider::{ + providers::ProviderNodeTypes, BlockNumReader, HeaderProvider, ProviderError, ProviderFactory, + StageCheckpointReader, +}; +use reth_prune::PruneModes; +use reth_stages::{prelude::*, Pipeline, StageId, StageSet}; +use reth_static_file::StaticFileProducer; +use std::{path::Path, sync::Arc}; +use tokio::sync::watch; +use tracing::{debug, error, info}; + +/// Configuration for importing blocks from RLP files. +#[derive(Debug, Clone, Default)] +pub struct ImportConfig { + /// Disables stages that require state. + pub no_state: bool, + /// Chunk byte length to read from file. + pub chunk_len: Option, +} + +/// Result of an import operation. +#[derive(Debug)] +pub struct ImportResult { + /// Total number of blocks decoded from the file. + pub total_decoded_blocks: usize, + /// Total number of transactions decoded from the file. + pub total_decoded_txns: usize, + /// Total number of blocks imported into the database. + pub total_imported_blocks: usize, + /// Total number of transactions imported into the database. + pub total_imported_txns: usize, +} + +impl ImportResult { + /// Returns true if all blocks and transactions were imported successfully. + pub fn is_complete(&self) -> bool { + self.total_decoded_blocks == self.total_imported_blocks && + self.total_decoded_txns == self.total_imported_txns + } +} + +/// Imports blocks from an RLP-encoded file into the database. +/// +/// This function reads RLP-encoded blocks from a file in chunks and imports them +/// using the pipeline infrastructure. It's designed to be used both from the CLI +/// and from test code. +pub async fn import_blocks_from_file( + path: &Path, + import_config: ImportConfig, + provider_factory: ProviderFactory, + config: &Config, + executor: impl ConfigureEvm + 'static, + consensus: Arc< + impl FullConsensus + 'static, + >, +) -> eyre::Result +where + N: ProviderNodeTypes, +{ + if import_config.no_state { + info!(target: "reth::import", "Disabled stages requiring state"); + } + + debug!(target: "reth::import", + chunk_byte_len=import_config.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking chain import" + ); + + info!(target: "reth::import", "Consensus engine initialized"); + + // open file + let mut reader = ChunkedFileReader::new(path, import_config.chunk_len).await?; + + let mut total_decoded_blocks = 0; + let mut total_decoded_txns = 0; + + let mut sealed_header = provider_factory + .sealed_header(provider_factory.last_block_number()?)? + .expect("should have genesis"); + + while let Some(file_client) = + reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? + { + // create a new FileClient from chunk read from file + info!(target: "reth::import", + "Importing chain file chunk" + ); + + let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; + info!(target: "reth::import", "Chain file chunk read"); + + total_decoded_blocks += file_client.headers_len(); + total_decoded_txns += file_client.total_transactions(); + + let (mut pipeline, events) = build_import_pipeline_impl( + config, + provider_factory.clone(), + &consensus, + Arc::new(file_client), + StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), + import_config.no_state, + executor.clone(), + )?; + + // override the tip + pipeline.set_tip(tip); + debug!(target: "reth::import", ?tip, "Tip manually set"); + + let provider = provider_factory.provider()?; + + let latest_block_number = + provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); + tokio::spawn(reth_node_events::node::handle_events(None, latest_block_number, events)); + + // Run pipeline + info!(target: "reth::import", "Starting sync pipeline"); + tokio::select! { + res = pipeline.run() => res?, + _ = tokio::signal::ctrl_c() => { + info!(target: "reth::import", "Import interrupted by user"); + break; + }, + } + + sealed_header = provider_factory + .sealed_header(provider_factory.last_block_number()?)? + .expect("should have genesis"); + } + + let provider = provider_factory.provider()?; + + let total_imported_blocks = provider.tx_ref().entries::()?; + let total_imported_txns = provider.tx_ref().entries::()?; + + let result = ImportResult { + total_decoded_blocks, + total_decoded_txns, + total_imported_blocks, + total_imported_txns, + }; + + if !result.is_complete() { + error!(target: "reth::import", + total_decoded_blocks, + total_imported_blocks, + total_decoded_txns, + total_imported_txns, + "Chain was partially imported" + ); + } else { + info!(target: "reth::import", + total_imported_blocks, + total_imported_txns, + "Chain file imported" + ); + } + + Ok(result) +} + +/// Builds import pipeline. +/// +/// If configured to execute, all stages will run. Otherwise, only stages that don't require state +/// will run. +pub fn build_import_pipeline_impl( + config: &Config, + provider_factory: ProviderFactory, + consensus: &Arc, + file_client: Arc>>, + static_file_producer: StaticFileProducer>, + disable_exec: bool, + evm_config: E, +) -> eyre::Result<(Pipeline, impl futures::Stream>)> +where + N: ProviderNodeTypes, + C: FullConsensus + 'static, + E: ConfigureEvm + 'static, +{ + if !file_client.has_canonical_blocks() { + eyre::bail!("unable to import non canonical blocks"); + } + + // Retrieve latest header found in the database. + let last_block_number = provider_factory.last_block_number()?; + let local_head = provider_factory + .sealed_header(last_block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(last_block_number.into()))?; + + let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) + .build(file_client.clone(), consensus.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + header_downloader.update_local_head(local_head); + header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); + + let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) + .build(file_client.clone(), consensus.clone(), provider_factory.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + body_downloader + .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) + .expect("failed to set download range"); + + let (tip_tx, tip_rx) = watch::channel(B256::ZERO); + + let max_block = file_client.max_block().unwrap_or(0); + + let pipeline = Pipeline::builder() + .with_tip_sender(tip_tx) + // we want to sync all blocks the file client provides or 0 if empty + .with_max_block(max_block) + .with_fail_on_unwind(true) + .add_stages( + DefaultStages::new( + provider_factory.clone(), + tip_rx, + consensus.clone(), + header_downloader, + body_downloader, + evm_config, + config.stages.clone(), + PruneModes::default(), + None, + ) + .builder() + .disable_all_if(&StageId::STATE_REQUIRED, || disable_exec), + ) + .build(provider_factory, static_file_producer); + + let events = pipeline.events().map(Into::into); + + Ok((pipeline, events)) +} diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index 76e7791e1d4..7a80997b976 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -1,14 +1,14 @@ //! Command that initializes the node from a genesis file. use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader as AlloyBlockHeader, Header}; use alloy_primitives::{B256, U256}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_db_common::init::init_from_state_dump; use reth_node_api::NodePrimitives; -use reth_primitives_traits::SealedHeader; +use reth_primitives_traits::{BlockHeader, SealedHeader}; use reth_provider::{ BlockNumReader, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; @@ -72,7 +72,7 @@ impl> InitStateC where N: CliNodeTypes< ChainSpec = C::ChainSpec, - Primitives: NodePrimitives, + Primitives: NodePrimitives>, >, { info!(target: "reth::cli", "Reth init-state starting"); @@ -85,7 +85,9 @@ impl> InitStateC if self.without_evm { // ensure header, total difficulty and header hash are provided let header = self.header.ok_or_else(|| eyre::eyre!("Header file must be provided"))?; - let header = without_evm::read_header_from_file(header)?; + let header = without_evm::read_header_from_file::< + ::BlockHeader, + >(header)?; let header_hash = self.header_hash.ok_or_else(|| eyre::eyre!("Header hash must be provided"))?; @@ -103,7 +105,10 @@ impl> InitStateC &provider_rw, SealedHeader::new(header, header_hash), total_difficulty, - |number| Header { number, ..Default::default() }, + |number| { + let header = Header { number, ..Default::default() }; + <::BlockHeader>::from(header) + }, )?; // SAFETY: it's safe to commit static files, since in the event of a crash, they @@ -112,7 +117,7 @@ impl> InitStateC // Necessary to commit, so the header is accessible to provider_rw and // init_state_dump static_file_provider.commit()?; - } else if last_block_number > 0 && last_block_number < header.number { + } else if last_block_number > 0 && last_block_number < header.number() { return Err(eyre::eyre!( "Data directory should be empty when calling init-state with --without-evm-history." )); diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index c839aaf268e..3a85b175eb4 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,4 +1,4 @@ -use alloy_consensus::{BlockHeader, Header}; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rlp::Decodable; use reth_codecs::Compact; @@ -12,14 +12,16 @@ use reth_stages::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use std::{fs::File, io::Read, path::PathBuf}; use tracing::info; - /// Reads the header RLP from a file and returns the Header. -pub(crate) fn read_header_from_file(path: PathBuf) -> Result { +pub(crate) fn read_header_from_file(path: PathBuf) -> Result +where + H: Decodable, +{ let mut file = File::open(path)?; let mut buf = Vec::new(); file.read_to_end(&mut buf)?; - let header = Header::decode(&mut &buf[..])?; + let header = H::decode(&mut &buf[..])?; Ok(header) } diff --git a/crates/cli/commands/src/launcher.rs b/crates/cli/commands/src/launcher.rs index e5e35f97aac..86cc8d33dc3 100644 --- a/crates/cli/commands/src/launcher.rs +++ b/crates/cli/commands/src/launcher.rs @@ -2,7 +2,7 @@ use futures::Future; use reth_cli::chainspec::ChainSpecParser; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use std::{fmt, marker::PhantomData, sync::Arc}; +use std::{fmt, sync::Arc}; /// A trait for launching a reth node with custom configuration strategies. /// @@ -40,14 +40,12 @@ where /// This struct adapts existing closures to work with the new [`Launcher`] trait, /// maintaining backward compatibility with current node implementations while /// enabling the transition to the more flexible trait-based approach. -pub struct FnLauncher { +pub struct FnLauncher { /// The function to execute when launching the node func: F, - /// Phantom data to track the future type - _result: PhantomData, } -impl FnLauncher { +impl FnLauncher { /// Creates a new function launcher adapter. /// /// Type parameters `C` and `Ext` help the compiler infer correct types @@ -59,18 +57,23 @@ impl FnLauncher { pub fn new(func: F) -> Self where C: ChainSpecParser, - F: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, + F: AsyncFnOnce( + WithLaunchContext, C::ChainSpec>>, + Ext, + ) -> eyre::Result<()>, { - Self { func, _result: PhantomData } + Self { func } } } -impl Launcher for FnLauncher +impl Launcher for FnLauncher where C: ChainSpecParser, Ext: clap::Args + fmt::Debug, - F: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, - Fut: Future>, + F: AsyncFnOnce( + WithLaunchContext, C::ChainSpec>>, + Ext, + ) -> eyre::Result<()>, { fn entrypoint( self, diff --git a/crates/cli/commands/src/lib.rs b/crates/cli/commands/src/lib.rs index 778f284028a..bf4504074a5 100644 --- a/crates/cli/commands/src/lib.rs +++ b/crates/cli/commands/src/lib.rs @@ -13,14 +13,17 @@ pub mod config_cmd; pub mod db; pub mod download; pub mod dump_genesis; +pub mod export_era; pub mod import; pub mod import_era; +pub mod import_op; pub mod init_cmd; pub mod init_state; pub mod launcher; pub mod node; pub mod p2p; pub mod prune; +pub mod re_execute; pub mod recover; pub mod stage; #[cfg(feature = "arbitrary")] diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index ab07a553c19..c3a20231638 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -13,71 +13,159 @@ use reth_config::Config; use reth_network::{BlockDownloaderProvider, NetworkConfigBuilder}; use reth_network_p2p::bodies::client::BodiesClient; use reth_node_core::{ - args::{DatabaseArgs, DatadirArgs, NetworkArgs}, + args::{DatadirArgs, NetworkArgs}, utils::get_single_header, }; pub mod bootnode; -mod rlpx; +pub mod rlpx; /// `reth p2p` command #[derive(Debug, Parser)] pub struct Command { - /// The path to the configuration file to use. - #[arg(long, value_name = "FILE", verbatim_doc_comment)] - config: Option, + #[command(subcommand)] + command: Subcommands, +} - /// The chain this node is running. - /// - /// Possible values are either a built-in chain or the path to a chain specification file. - #[arg( - long, - value_name = "CHAIN_OR_PATH", - long_help = C::help_message(), - default_value = C::SUPPORTED_CHAINS[0], - value_parser = C::parser() - )] - chain: Arc, +impl> Command { + /// Execute `p2p` command + pub async fn execute>(self) -> eyre::Result<()> { + match self.command { + Subcommands::Header { args, id } => { + let handle = args.launch_network::().await?; + let fetch_client = handle.fetch_client().await?; + let backoff = args.backoff(); - /// The number of retries per request - #[arg(long, default_value = "5")] - retries: usize, + let header = (move || get_single_header(fetch_client.clone(), id)) + .retry(backoff) + .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .await?; + println!("Successfully downloaded header: {header:?}"); + } - #[command(flatten)] - network: NetworkArgs, + Subcommands::Body { args, id } => { + let handle = args.launch_network::().await?; + let fetch_client = handle.fetch_client().await?; + let backoff = args.backoff(); - #[command(flatten)] - datadir: DatadirArgs, + let hash = match id { + BlockHashOrNumber::Hash(hash) => hash, + BlockHashOrNumber::Number(number) => { + println!("Block number provided. Downloading header first..."); + let client = fetch_client.clone(); + let header = (move || { + get_single_header(client.clone(), BlockHashOrNumber::Number(number)) + }) + .retry(backoff) + .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .await?; + header.hash() + } + }; + let (_, result) = (move || { + let client = fetch_client.clone(); + client.get_block_bodies(vec![hash]) + }) + .retry(backoff) + .notify(|err, _| println!("Error requesting block: {err}. Retrying...")) + .await? + .split(); + if result.len() != 1 { + eyre::bail!( + "Invalid number of headers received. Expected: 1. Received: {}", + result.len() + ) + } + let body = result.into_iter().next().unwrap(); + println!("Successfully downloaded body: {body:?}") + } + Subcommands::Rlpx(command) => { + command.execute().await?; + } + Subcommands::Bootnode(command) => { + command.execute().await?; + } + } - #[command(flatten)] - db: DatabaseArgs, + Ok(()) + } +} - #[command(subcommand)] - command: Subcommands, +impl Command { + /// Returns the underlying chain being used to run this command + pub fn chain_spec(&self) -> Option<&Arc> { + match &self.command { + Subcommands::Header { args, .. } => Some(&args.chain), + Subcommands::Body { args, .. } => Some(&args.chain), + Subcommands::Rlpx(_) => None, + Subcommands::Bootnode(_) => None, + } + } } /// `reth p2p` subcommands #[derive(Subcommand, Debug)] -pub enum Subcommands { +pub enum Subcommands { /// Download block header Header { + #[command(flatten)] + args: DownloadArgs, /// The header number or hash #[arg(value_parser = hash_or_num_value_parser)] id: BlockHashOrNumber, }, /// Download block body Body { + #[command(flatten)] + args: DownloadArgs, /// The block number or hash #[arg(value_parser = hash_or_num_value_parser)] id: BlockHashOrNumber, }, // RLPx utilities Rlpx(rlpx::Command), + /// Bootnode command + Bootnode(bootnode::Command), } -impl> Command { - /// Execute `p2p` command - pub async fn execute>(self) -> eyre::Result<()> { +#[derive(Debug, Clone, Parser)] +pub struct DownloadArgs { + /// The number of retries per request + #[arg(long, default_value = "5")] + retries: usize, + + #[command(flatten)] + network: NetworkArgs, + + #[command(flatten)] + datadir: DatadirArgs, + + /// The path to the configuration file to use. + #[arg(long, value_name = "FILE", verbatim_doc_comment)] + config: Option, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + #[arg( + long, + value_name = "CHAIN_OR_PATH", + long_help = C::help_message(), + default_value = C::SUPPORTED_CHAINS[0], + value_parser = C::parser() + )] + chain: Arc, +} + +impl DownloadArgs { + /// Creates and spawns the network and returns the handle. + pub async fn launch_network( + &self, + ) -> eyre::Result> + where + C::ChainSpec: EthChainSpec + Hardforks + EthereumHardforks + Send + Sync + 'static, + N: CliNodeTypes, + { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); @@ -104,73 +192,38 @@ impl let net = NetworkConfigBuilder::::new(p2p_secret_key) .peer_config(config.peers_config_with_basic_nodes_from_file(None)) .external_ip_resolver(self.network.nat) - .disable_discv4_discovery_if(self.chain.chain().is_optimism()) .boot_nodes(boot_nodes.clone()) .apply(|builder| { self.network.discovery.apply_to_builder(builder, rlpx_socket, boot_nodes) }) - .build_with_noop_provider(self.chain) + .build_with_noop_provider(self.chain.clone()) .manager() .await?; - let network = net.handle().clone(); + let handle = net.handle().clone(); tokio::task::spawn(net); - let fetch_client = network.fetch_client().await?; - let retries = self.retries.max(1); - let backoff = ConstantBuilder::default().with_max_times(retries); - - match self.command { - Subcommands::Header { id } => { - let header = (move || get_single_header(fetch_client.clone(), id)) - .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) - .await?; - println!("Successfully downloaded header: {header:?}"); - } - Subcommands::Body { id } => { - let hash = match id { - BlockHashOrNumber::Hash(hash) => hash, - BlockHashOrNumber::Number(number) => { - println!("Block number provided. Downloading header first..."); - let client = fetch_client.clone(); - let header = (move || { - get_single_header(client.clone(), BlockHashOrNumber::Number(number)) - }) - .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) - .await?; - header.hash() - } - }; - let (_, result) = (move || { - let client = fetch_client.clone(); - client.get_block_bodies(vec![hash]) - }) - .retry(backoff) - .notify(|err, _| println!("Error requesting block: {err}. Retrying...")) - .await? - .split(); - if result.len() != 1 { - eyre::bail!( - "Invalid number of headers received. Expected: 1. Received: {}", - result.len() - ) - } - let body = result.into_iter().next().unwrap(); - println!("Successfully downloaded body: {body:?}") - } - Subcommands::Rlpx(command) => { - command.execute().await?; - } - } + Ok(handle) + } - Ok(()) + pub fn backoff(&self) -> ConstantBuilder { + ConstantBuilder::default().with_max_times(self.retries.max(1)) } } -impl Command { - /// Returns the underlying chain being used to run this command - pub fn chain_spec(&self) -> Option<&Arc> { - Some(&self.chain) +#[cfg(test)] +mod tests { + use super::*; + use reth_ethereum_cli::chainspec::EthereumChainSpecParser; + + #[test] + fn parse_header_cmd() { + let _args: Command = + Command::parse_from(["reth", "header", "--chain", "mainnet", "1000"]); + } + + #[test] + fn parse_body_cmd() { + let _args: Command = + Command::parse_from(["reth", "body", "--chain", "mainnet", "1000"]); } } diff --git a/crates/cli/commands/src/re_execute.rs b/crates/cli/commands/src/re_execute.rs new file mode 100644 index 00000000000..a555297488e --- /dev/null +++ b/crates/cli/commands/src/re_execute.rs @@ -0,0 +1,222 @@ +//! Re-execute blocks from database in parallel. + +use crate::common::{ + AccessRights, CliComponentsBuilder, CliNodeComponents, CliNodeTypes, Environment, + EnvironmentArgs, +}; +use alloy_consensus::{BlockHeader, TxReceipt}; +use clap::Parser; +use eyre::WrapErr; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; +use reth_cli::chainspec::ChainSpecParser; +use reth_consensus::FullConsensus; +use reth_evm::{execute::Executor, ConfigureEvm}; +use reth_primitives_traits::{format_gas_throughput, BlockBody, GotExpected, SignedTransaction}; +use reth_provider::{ + BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, ReceiptProvider, + StaticFileProviderFactory, TransactionVariant, +}; +use reth_revm::database::StateProviderDatabase; +use reth_stages::stages::calculate_gas_used_from_headers; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::{sync::mpsc, task::JoinSet}; +use tracing::*; + +/// `reth re-execute` command +/// +/// Re-execute blocks in parallel to verify historical sync correctness. +#[derive(Debug, Parser)] +pub struct Command { + #[command(flatten)] + env: EnvironmentArgs, + + /// The height to start at. + #[arg(long, default_value = "1")] + from: u64, + + /// The height to end at. Defaults to the latest block. + #[arg(long)] + to: Option, + + /// Number of tasks to run in parallel + #[arg(long, default_value = "10")] + num_tasks: u64, +} + +impl Command { + /// Returns the underlying chain being used to run this command + pub fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} + +impl> Command { + /// Execute `re-execute` command + pub async fn execute(self, components: impl CliComponentsBuilder) -> eyre::Result<()> + where + N: CliNodeTypes, + { + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + let provider = provider_factory.database_provider_ro()?; + let components = components(provider_factory.chain_spec()); + + let min_block = self.from; + let max_block = self.to.unwrap_or(provider.best_block_number()?); + + let total_blocks = max_block - min_block; + let total_gas = calculate_gas_used_from_headers( + &provider_factory.static_file_provider(), + min_block..=max_block, + )?; + let blocks_per_task = total_blocks / self.num_tasks; + + let db_at = { + let provider_factory = provider_factory.clone(); + move |block_number: u64| { + StateProviderDatabase( + provider_factory.history_by_block_number(block_number).unwrap(), + ) + } + }; + + let (stats_tx, mut stats_rx) = mpsc::unbounded_channel(); + + let mut tasks = JoinSet::new(); + for i in 0..self.num_tasks { + let start_block = min_block + i * blocks_per_task; + let end_block = + if i == self.num_tasks - 1 { max_block } else { start_block + blocks_per_task }; + + // Spawn thread executing blocks + let provider_factory = provider_factory.clone(); + let evm_config = components.evm_config().clone(); + let consensus = components.consensus().clone(); + let db_at = db_at.clone(); + let stats_tx = stats_tx.clone(); + tasks.spawn_blocking(move || { + let mut executor = evm_config.batch_executor(db_at(start_block - 1)); + for block in start_block..end_block { + let block = provider_factory + .recovered_block(block.into(), TransactionVariant::NoHash)? + .unwrap(); + let result = executor.execute_one(&block)?; + + if let Err(err) = consensus + .validate_block_post_execution(&block, &result) + .wrap_err_with(|| format!("Failed to validate block {}", block.number())) + { + let correct_receipts = + provider_factory.receipts_by_block(block.number().into())?.unwrap(); + + for (i, (receipt, correct_receipt)) in + result.receipts.iter().zip(correct_receipts.iter()).enumerate() + { + if receipt != correct_receipt { + let tx_hash = block.body().transactions()[i].tx_hash(); + error!( + ?receipt, + ?correct_receipt, + index = i, + ?tx_hash, + "Invalid receipt" + ); + let expected_gas_used = correct_receipt.cumulative_gas_used() - + if i == 0 { + 0 + } else { + correct_receipts[i - 1].cumulative_gas_used() + }; + let got_gas_used = receipt.cumulative_gas_used() - + if i == 0 { + 0 + } else { + result.receipts[i - 1].cumulative_gas_used() + }; + if got_gas_used != expected_gas_used { + let mismatch = GotExpected { + expected: expected_gas_used, + got: got_gas_used, + }; + + error!(number=?block.number(), ?mismatch, "Gas usage mismatch"); + return Err(err); + } + } else { + continue; + } + } + + return Err(err); + } + let _ = stats_tx.send(block.gas_used()); + + // Reset DB once in a while to avoid OOM + if executor.size_hint() > 1_000_000 { + executor = evm_config.batch_executor(db_at(block.number())); + } + } + + eyre::Ok(()) + }); + } + + let instant = Instant::now(); + let mut total_executed_blocks = 0; + let mut total_executed_gas = 0; + + let mut last_logged_gas = 0; + let mut last_logged_blocks = 0; + let mut last_logged_time = Instant::now(); + + let mut interval = tokio::time::interval(Duration::from_secs(10)); + + loop { + tokio::select! { + Some(gas_used) = stats_rx.recv() => { + total_executed_blocks += 1; + total_executed_gas += gas_used; + } + result = tasks.join_next() => { + if let Some(result) = result { + if matches!(result, Err(_) | Ok(Err(_))) { + error!(?result); + return Err(eyre::eyre!("Re-execution failed: {result:?}")); + } + } else { + break; + } + } + _ = interval.tick() => { + let blocks_executed = total_executed_blocks - last_logged_blocks; + let gas_executed = total_executed_gas - last_logged_gas; + + if blocks_executed > 0 { + let progress = 100.0 * total_executed_gas as f64 / total_gas as f64; + info!( + throughput=?format_gas_throughput(gas_executed, last_logged_time.elapsed()), + progress=format!("{progress:.2}%"), + "Executed {blocks_executed} blocks" + ); + } + + last_logged_blocks = total_executed_blocks; + last_logged_gas = total_executed_gas; + last_logged_time = Instant::now(); + } + } + } + + info!( + start_block = min_block, + end_block = max_block, + throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()), + "Re-executed successfully" + ); + + Ok(()) + } +} diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 96dea6c232f..901e8697cd5 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -23,7 +23,6 @@ alloy-eips.workspace = true [dev-dependencies] alloy-primitives = { workspace = true, features = ["rand"] } reth-ethereum-primitives.workspace = true -alloy-consensus.workspace = true rand.workspace = true [features] diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index a682bc2f910..72389acdce8 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -132,11 +132,35 @@ where /// - Compares the ommer hash in the block header to the block body /// - Compares the transactions root in the block header to the block body /// - Pre-execution transaction validation -/// - (Optionally) Compares the receipts root in the block header to the block body pub fn validate_block_pre_execution( block: &SealedBlock, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> +where + B: Block, + ChainSpec: EthereumHardforks, +{ + post_merge_hardfork_fields(block, chain_spec)?; + + // Check transaction root + if let Err(error) = block.ensure_transaction_root_valid() { + return Err(ConsensusError::BodyTransactionRootDiff(error.into())) + } + + Ok(()) +} + +/// Validates the ommers hash and other fork-specific fields. +/// +/// These fork-specific validations are: +/// * EIP-4895 withdrawals validation, if shanghai is active based on the given chainspec. See more +/// information about the specific checks in [`validate_shanghai_withdrawals`]. +/// * EIP-4844 blob gas validation, if cancun is active based on the given chainspec. See more +/// information about the specific checks in [`validate_cancun_gas`]. +pub fn post_merge_hardfork_fields( + block: &SealedBlock, + chain_spec: &ChainSpec, +) -> Result<(), ConsensusError> where B: Block, ChainSpec: EthereumHardforks, @@ -153,11 +177,6 @@ where )) } - // Check transaction root - if let Err(error) = block.ensure_transaction_root_valid() { - return Err(ConsensusError::BodyTransactionRootDiff(error.into())) - } - // EIP-4895: Beacon chain push withdrawals as operations if chain_spec.is_shanghai_active_at_timestamp(block.timestamp()) { validate_shanghai_withdrawals(block)?; diff --git a/crates/consensus/debug-client/Cargo.toml b/crates/consensus/debug-client/Cargo.toml index 784c52c3b53..5ff3735c33c 100644 --- a/crates/consensus/debug-client/Cargo.toml +++ b/crates/consensus/debug-client/Cargo.toml @@ -28,8 +28,9 @@ auto_impl.workspace = true derive_more.workspace = true futures.workspace = true eyre.workspace = true -reqwest = { workspace = true, features = ["rustls-tls", "json"] } +reqwest = { workspace = true, features = ["rustls-tls"] } serde = { workspace = true, features = ["derive"] } tokio = { workspace = true, features = ["time"] } +serde_json.workspace = true ringbuffer.workspace = true diff --git a/crates/consensus/debug-client/src/providers/etherscan.rs b/crates/consensus/debug-client/src/providers/etherscan.rs index c52ee609d20..ea21d95e73d 100644 --- a/crates/consensus/debug-client/src/providers/etherscan.rs +++ b/crates/consensus/debug-client/src/providers/etherscan.rs @@ -3,7 +3,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; use alloy_json_rpc::{Response, ResponsePayload}; use reqwest::Client; -use reth_tracing::tracing::warn; +use reth_tracing::tracing::{debug, warn}; use serde::{de::DeserializeOwned, Serialize}; use std::{sync::Arc, time::Duration}; use tokio::{sync::mpsc, time::interval}; @@ -14,6 +14,7 @@ pub struct EtherscanBlockProvider { http_client: Client, base_url: String, api_key: String, + chain_id: u64, interval: Duration, #[debug(skip)] convert: Arc PrimitiveBlock + Send + Sync>, @@ -27,12 +28,14 @@ where pub fn new( base_url: String, api_key: String, + chain_id: u64, convert: impl Fn(RpcBlock) -> PrimitiveBlock + Send + Sync + 'static, ) -> Self { Self { http_client: Client::new(), base_url, api_key, + chain_id, interval: Duration::from_secs(3), convert: Arc::new(convert), } @@ -56,20 +59,26 @@ where tag => tag.to_string(), }; - let resp: Response = self - .http_client - .get(&self.base_url) - .query(&[ - ("module", "proxy"), - ("action", "eth_getBlockByNumber"), - ("tag", &tag), - ("boolean", "true"), - ("apikey", &self.api_key), - ]) - .send() - .await? - .json() - .await?; + let mut req = self.http_client.get(&self.base_url).query(&[ + ("module", "proxy"), + ("action", "eth_getBlockByNumber"), + ("tag", &tag), + ("boolean", "true"), + ("apikey", &self.api_key), + ]); + + if !self.base_url.contains("chainid=") { + // only append chainid if not part of the base url already + req = req.query(&[("chainid", &self.chain_id.to_string())]); + } + + let resp = req.send().await?.text().await?; + + debug!(target: "etherscan", %resp, "fetched block from etherscan"); + + let resp: Response = serde_json::from_str(&resp).inspect_err(|err| { + warn!(target: "etherscan", "Failed to parse block response from etherscan: {}", err); + })?; let payload = resp.payload; match payload { diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 1ff7dcb0885..ca10c80e578 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -34,8 +34,19 @@ reth-engine-local.workspace = true reth-tasks.workspace = true reth-node-ethereum.workspace = true reth-ethereum-primitives.workspace = true +reth-cli-commands.workspace = true +reth-config.workspace = true +reth-consensus.workspace = true +reth-evm.workspace = true +reth-static-file.workspace = true +reth-ethereum-consensus.workspace = true +reth-primitives.workspace = true +reth-prune-types.workspace = true +reth-db-common.workspace = true +reth-primitives-traits.workspace = true revm.workspace = true +tempfile.workspace = true # rpc jsonrpsee.workspace = true @@ -44,17 +55,24 @@ url.workspace = true # ethereum alloy-primitives.workspace = true alloy-eips.workspace = true - -futures-util.workspace = true -eyre.workspace = true -tokio.workspace = true -tokio-stream.workspace = true -serde_json.workspace = true +alloy-rlp.workspace = true alloy-signer.workspace = true alloy-signer-local = { workspace = true, features = ["mnemonic"] } alloy-rpc-types-eth.workspace = true alloy-rpc-types-engine.workspace = true alloy-network.workspace = true alloy-consensus = { workspace = true, features = ["kzg"] } +alloy-provider = { workspace = true, features = ["reqwest"] } +alloy-genesis.workspace = true + +futures-util.workspace = true +eyre.workspace = true +tokio.workspace = true +tokio-stream.workspace = true +serde_json.workspace = true tracing.workspace = true derive_more.workspace = true + +[[test]] +name = "e2e_testsuite" +path = "tests/e2e-testsuite/main.rs" diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 2953e752009..2fd5631dfb2 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -34,12 +34,18 @@ pub mod wallet; /// Helper for payload operations mod payload; +/// Helper for setting up nodes with pre-imported chain data +pub mod setup_import; + /// Helper for network operations mod network; /// Helper for rpc operations mod rpc; +/// Utilities for creating and writing RLP test data +pub mod test_rlp_utils; + /// Creates the initial setup with `num_nodes` started and interconnected. pub async fn setup( num_nodes: usize, diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 0261978a669..6572223f225 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -30,7 +30,7 @@ use std::pin::Pin; use tokio_stream::StreamExt; use url::Url; -/// An helper struct to handle node actions +/// A helper struct to handle node actions #[expect(missing_debug_implementations)] pub struct NodeTestContext where diff --git a/crates/e2e-test-utils/src/setup_import.rs b/crates/e2e-test-utils/src/setup_import.rs new file mode 100644 index 00000000000..cde8136ff83 --- /dev/null +++ b/crates/e2e-test-utils/src/setup_import.rs @@ -0,0 +1,579 @@ +//! Setup utilities for importing RLP chain data before starting nodes. + +use crate::{node::NodeTestContext, NodeHelperType, Wallet}; +use reth_chainspec::ChainSpec; +use reth_cli_commands::import_op::{import_blocks_from_file, ImportConfig}; +use reth_config::Config; +use reth_db::DatabaseEnv; +use reth_node_api::{NodeTypesWithDBAdapter, TreeConfig}; +use reth_node_builder::{EngineNodeLauncher, Node, NodeBuilder, NodeConfig, NodeHandle}; +use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; +use reth_node_ethereum::EthereumNode; +use reth_provider::{ + providers::BlockchainProvider, DatabaseProviderFactory, ProviderFactory, StageCheckpointReader, + StaticFileProviderFactory, +}; +use reth_rpc_server_types::RpcModuleSelection; +use reth_stages_types::StageId; +use reth_tasks::TaskManager; +use std::{path::Path, sync::Arc}; +use tempfile::TempDir; +use tracing::{debug, info, span, Level}; + +/// Setup result containing nodes and temporary directories that must be kept alive +pub struct ChainImportResult { + /// The nodes that were created + pub nodes: Vec>, + /// The task manager + pub task_manager: TaskManager, + /// The wallet for testing + pub wallet: Wallet, + /// Temporary directories that must be kept alive for the duration of the test + pub _temp_dirs: Vec, +} + +impl std::fmt::Debug for ChainImportResult { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ChainImportResult") + .field("nodes", &self.nodes.len()) + .field("wallet", &self.wallet) + .field("temp_dirs", &self._temp_dirs.len()) + .finish() + } +} + +/// Creates a test setup with Ethereum nodes that have pre-imported chain data from RLP files. +/// +/// This function: +/// 1. Creates a temporary datadir for each node +/// 2. Imports the specified RLP chain data into the datadir +/// 3. Starts the nodes with the pre-populated database +/// 4. Returns the running nodes ready for testing +/// +/// Note: This function is currently specific to `EthereumNode` because the import process +/// uses Ethereum-specific consensus and block format. It can be made generic in the future +/// by abstracting the import process. +/// It uses `NoopConsensus` during import to bypass validation checks like gas limit constraints, +/// which allows importing test chains that may not strictly conform to mainnet consensus rules. The +/// nodes themselves still run with proper consensus when started. +pub async fn setup_engine_with_chain_import( + num_nodes: usize, + chain_spec: Arc, + is_dev: bool, + tree_config: TreeConfig, + rlp_path: &Path, + attributes_generator: impl Fn(u64) -> reth_payload_builder::EthPayloadBuilderAttributes + + Send + + Sync + + Copy + + 'static, +) -> eyre::Result { + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + // Create nodes with imported data + let mut nodes: Vec> = Vec::with_capacity(num_nodes); + let mut temp_dirs = Vec::with_capacity(num_nodes); // Keep temp dirs alive + + for idx in 0..num_nodes { + // Create a temporary datadir for this node + let temp_dir = TempDir::new()?; + let datadir = temp_dir.path().to_path_buf(); + + let mut node_config = NodeConfig::new(chain_spec.clone()) + .with_network(network_config.clone()) + .with_unused_ports() + .with_rpc( + RpcServerArgs::default() + .with_unused_ports() + .with_http() + .with_http_api(RpcModuleSelection::All), + ) + .set_dev(is_dev); + + // Set the datadir + node_config.datadir.datadir = + reth_node_core::dirs::MaybePlatformPath::from(datadir.clone()); + debug!(target: "e2e::import", "Node {idx} datadir: {datadir:?}"); + + let span = span!(Level::INFO, "node", idx); + let _enter = span.enter(); + + // First, import the chain data into this datadir + info!(target: "test", "Importing chain data from {:?} for node {} into {:?}", rlp_path, idx, datadir); + + // Create database path and static files path + let db_path = datadir.join("db"); + let static_files_path = datadir.join("static_files"); + + // Initialize the database using init_db (same as CLI import command) + // Use the same database arguments as the node will use + let db_args = reth_node_core::args::DatabaseArgs::default().database_args(); + let db_env = reth_db::init_db(&db_path, db_args)?; + let db = Arc::new(db_env); + + // Create a provider factory with the initialized database (use regular DB, not + // TempDatabase) We need to specify the node types properly for the adapter + let provider_factory = ProviderFactory::< + NodeTypesWithDBAdapter>, + >::new( + db.clone(), + chain_spec.clone(), + reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone())?, + ); + + // Initialize genesis if needed + reth_db_common::init::init_genesis(&provider_factory)?; + + // Import the chain data + // Use no_state to skip state validation for test chains + let import_config = ImportConfig::default(); + let config = Config::default(); + + // Create EVM and consensus for Ethereum + let evm_config = reth_node_ethereum::EthEvmConfig::new(chain_spec.clone()); + // Use NoopConsensus to skip gas limit validation for test imports + let consensus = reth_consensus::noop::NoopConsensus::arc(); + + let result = import_blocks_from_file( + rlp_path, + import_config, + provider_factory.clone(), + &config, + evm_config, + consensus, + ) + .await?; + + info!( + target: "test", + "Imported {} blocks and {} transactions for node {}", + result.total_imported_blocks, + result.total_imported_txns, + idx + ); + + debug!(target: "e2e::import", + "Import result for node {}: decoded {} blocks, imported {} blocks, complete: {}", + idx, + result.total_decoded_blocks, + result.total_imported_blocks, + result.is_complete() + ); + + // The import counts genesis block in total_imported_blocks, so we expect + // total_imported_blocks to be total_decoded_blocks + 1 + let expected_imported = result.total_decoded_blocks + 1; // +1 for genesis + if result.total_imported_blocks != expected_imported { + debug!(target: "e2e::import", + "Import block count mismatch: expected {} (decoded {} + genesis), got {}", + expected_imported, result.total_decoded_blocks, result.total_imported_blocks + ); + return Err(eyre::eyre!("Chain import block count mismatch for node {}", idx)); + } + + if result.total_decoded_txns != result.total_imported_txns { + debug!(target: "e2e::import", + "Import transaction count mismatch: decoded {} != imported {}", + result.total_decoded_txns, result.total_imported_txns + ); + return Err(eyre::eyre!("Chain import transaction count mismatch for node {}", idx)); + } + + // Verify the database was properly initialized by checking stage checkpoints + { + let provider = provider_factory.database_provider_ro()?; + let headers_checkpoint = provider.get_stage_checkpoint(StageId::Headers)?; + if headers_checkpoint.is_none() { + return Err(eyre::eyre!("Headers stage checkpoint is missing after import!")); + } + debug!(target: "e2e::import", "Headers stage checkpoint after import: {headers_checkpoint:?}"); + drop(provider); + } + + // IMPORTANT: We need to properly flush and close the static files provider + // The static files provider may have open file handles that need to be closed + // before we can reopen the database in the node launcher + { + let static_file_provider = provider_factory.static_file_provider(); + // This will ensure all static file writers are properly closed + drop(static_file_provider); + } + + // Close all database handles to release locks before launching the node + drop(provider_factory); + drop(db); + + // Give the OS a moment to release file locks + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // Now launch the node with the pre-populated datadir + debug!(target: "e2e::import", "Launching node with datadir: {:?}", datadir); + + // Use the testing_node_with_datadir method which properly handles opening existing + // databases + let node = EthereumNode::default(); + + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node_with_datadir(exec.clone(), datadir.clone()) + .with_types_and_provider::>() + .with_components(node.components_builder()) + .with_add_ons(node.add_ons()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + tree_config.clone(), + ); + builder.launch_with(launcher) + }) + .await?; + + let node_ctx = NodeTestContext::new(node, attributes_generator).await?; + + nodes.push(node_ctx); + temp_dirs.push(temp_dir); // Keep temp dir alive + } + + Ok(ChainImportResult { + nodes, + task_manager: tasks, + wallet: crate::Wallet::default().with_chain_id(chain_spec.chain.id()), + _temp_dirs: temp_dirs, + }) +} + +/// Helper to load forkchoice state from a JSON file +pub fn load_forkchoice_state(path: &Path) -> eyre::Result { + let json_str = std::fs::read_to_string(path)?; + let fcu_data: serde_json::Value = serde_json::from_str(&json_str)?; + + // The headfcu.json file contains a JSON-RPC request with the forkchoice state in params[0] + let state = &fcu_data["params"][0]; + Ok(alloy_rpc_types_engine::ForkchoiceState { + head_block_hash: state["headBlockHash"] + .as_str() + .ok_or_else(|| eyre::eyre!("missing headBlockHash"))? + .parse()?, + safe_block_hash: state["safeBlockHash"] + .as_str() + .ok_or_else(|| eyre::eyre!("missing safeBlockHash"))? + .parse()?, + finalized_block_hash: state["finalizedBlockHash"] + .as_str() + .ok_or_else(|| eyre::eyre!("missing finalizedBlockHash"))? + .parse()?, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_rlp_utils::{create_fcu_json, generate_test_blocks, write_blocks_to_rlp}; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_db::mdbx::DatabaseArguments; + use reth_payload_builder::EthPayloadBuilderAttributes; + use reth_primitives::SealedBlock; + use reth_provider::{ + test_utils::MockNodeTypesWithDB, BlockHashReader, BlockNumReader, BlockReaderIdExt, + }; + use std::path::PathBuf; + + #[tokio::test] + async fn test_stage_checkpoints_persistence() { + // This test specifically verifies that stage checkpoints are persisted correctly + // when reopening the database + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis( + serde_json::from_str(include_str!("testsuite/assets/genesis.json")).unwrap(), + ) + .london_activated() + .shanghai_activated() + .build(), + ); + + // Generate test blocks + let test_blocks = generate_test_blocks(&chain_spec, 5); + + // Create temporary files for RLP data + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let rlp_path = temp_dir.path().join("test_chain.rlp"); + write_blocks_to_rlp(&test_blocks, &rlp_path).expect("Failed to write RLP data"); + + // Create a persistent datadir that won't be deleted + let datadir = temp_dir.path().join("datadir"); + std::fs::create_dir_all(&datadir).unwrap(); + let db_path = datadir.join("db"); + let static_files_path = datadir.join("static_files"); + + // Import the chain + { + let db_env = reth_db::init_db(&db_path, DatabaseArguments::default()).unwrap(); + let db = Arc::new(db_env); + + let provider_factory: ProviderFactory< + NodeTypesWithDBAdapter>, + > = ProviderFactory::new( + db.clone(), + chain_spec.clone(), + reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone()) + .unwrap(), + ); + + // Initialize genesis + reth_db_common::init::init_genesis(&provider_factory).unwrap(); + + // Import the chain data + let import_config = ImportConfig::default(); + let config = Config::default(); + let evm_config = reth_node_ethereum::EthEvmConfig::new(chain_spec.clone()); + // Use NoopConsensus to skip gas limit validation for test imports + let consensus = reth_consensus::noop::NoopConsensus::arc(); + + let result = import_blocks_from_file( + &rlp_path, + import_config, + provider_factory.clone(), + &config, + evm_config, + consensus, + ) + .await + .unwrap(); + + assert_eq!(result.total_decoded_blocks, 5); + assert_eq!(result.total_imported_blocks, 6); // +1 for genesis + + // Verify stage checkpoints exist + let provider = provider_factory.database_provider_ro().unwrap(); + let headers_checkpoint = provider.get_stage_checkpoint(StageId::Headers).unwrap(); + assert!(headers_checkpoint.is_some(), "Headers checkpoint should exist after import"); + assert_eq!( + headers_checkpoint.unwrap().block_number, + 5, + "Headers checkpoint should be at block 5" + ); + drop(provider); + + // Properly close static files to release all file handles + let static_file_provider = provider_factory.static_file_provider(); + drop(static_file_provider); + + drop(provider_factory); + drop(db); + } + + // Give the OS a moment to release file locks + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // Now reopen the database and verify checkpoints are still there + { + let db_env = reth_db::init_db(&db_path, DatabaseArguments::default()).unwrap(); + let db = Arc::new(db_env); + + let provider_factory: ProviderFactory< + NodeTypesWithDBAdapter>, + > = ProviderFactory::new( + db, + chain_spec.clone(), + reth_provider::providers::StaticFileProvider::read_only(static_files_path, false) + .unwrap(), + ); + + let provider = provider_factory.database_provider_ro().unwrap(); + + // Check that stage checkpoints are still present + let headers_checkpoint = provider.get_stage_checkpoint(StageId::Headers).unwrap(); + assert!( + headers_checkpoint.is_some(), + "Headers checkpoint should still exist after reopening database" + ); + assert_eq!( + headers_checkpoint.unwrap().block_number, + 5, + "Headers checkpoint should still be at block 5" + ); + + // Verify we can read blocks + let block_5_hash = provider.block_hash(5).unwrap(); + assert!(block_5_hash.is_some(), "Block 5 should exist in database"); + assert_eq!(block_5_hash.unwrap(), test_blocks[4].hash(), "Block 5 hash should match"); + + // Check all stage checkpoints + debug!(target: "e2e::import", "All stage checkpoints after reopening:"); + for stage in StageId::ALL { + let checkpoint = provider.get_stage_checkpoint(stage).unwrap(); + debug!(target: "e2e::import", " Stage {stage:?}: {checkpoint:?}"); + } + } + } + + /// Helper to create test chain spec + fn create_test_chain_spec() -> Arc { + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis( + serde_json::from_str(include_str!("testsuite/assets/genesis.json")).unwrap(), + ) + .london_activated() + .shanghai_activated() + .build(), + ) + } + + /// Helper to setup test blocks and write to RLP + async fn setup_test_blocks_and_rlp( + chain_spec: &ChainSpec, + block_count: u64, + temp_dir: &Path, + ) -> (Vec, PathBuf) { + let test_blocks = generate_test_blocks(chain_spec, block_count); + assert_eq!( + test_blocks.len(), + block_count as usize, + "Should have generated expected blocks" + ); + + let rlp_path = temp_dir.join("test_chain.rlp"); + write_blocks_to_rlp(&test_blocks, &rlp_path).expect("Failed to write RLP data"); + + let rlp_size = std::fs::metadata(&rlp_path).expect("RLP file should exist").len(); + debug!(target: "e2e::import", "Wrote RLP file with size: {rlp_size} bytes"); + + (test_blocks, rlp_path) + } + + #[tokio::test] + async fn test_import_blocks_only() { + // Tests just the block import functionality without full node setup + reth_tracing::init_test_tracing(); + + let chain_spec = create_test_chain_spec(); + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let (test_blocks, rlp_path) = + setup_test_blocks_and_rlp(&chain_spec, 10, temp_dir.path()).await; + + // Create a test database + let datadir = temp_dir.path().join("datadir"); + std::fs::create_dir_all(&datadir).unwrap(); + let db_path = datadir.join("db"); + let db_env = reth_db::init_db(&db_path, DatabaseArguments::default()).unwrap(); + let db = Arc::new(reth_db::test_utils::TempDatabase::new(db_env, db_path)); + + // Create static files path + let static_files_path = datadir.join("static_files"); + + // Create a provider factory + let provider_factory: ProviderFactory = ProviderFactory::new( + db.clone(), + chain_spec.clone(), + reth_provider::providers::StaticFileProvider::read_write(static_files_path).unwrap(), + ); + + // Initialize genesis + reth_db_common::init::init_genesis(&provider_factory).unwrap(); + + // Import the chain data + let import_config = ImportConfig::default(); + let config = Config::default(); + let evm_config = reth_node_ethereum::EthEvmConfig::new(chain_spec.clone()); + // Use NoopConsensus to skip gas limit validation for test imports + let consensus = reth_consensus::noop::NoopConsensus::arc(); + + let result = import_blocks_from_file( + &rlp_path, + import_config, + provider_factory.clone(), + &config, + evm_config, + consensus, + ) + .await + .unwrap(); + + debug!(target: "e2e::import", + "Import result: decoded {} blocks, imported {} blocks", + result.total_decoded_blocks, result.total_imported_blocks + ); + + // Verify the import was successful + assert_eq!(result.total_decoded_blocks, 10); + assert_eq!(result.total_imported_blocks, 11); // +1 for genesis + assert_eq!(result.total_decoded_txns, 0); + assert_eq!(result.total_imported_txns, 0); + + // Verify we can read the imported blocks + let provider = provider_factory.database_provider_ro().unwrap(); + let latest_block = provider.last_block_number().unwrap(); + assert_eq!(latest_block, 10, "Should have imported up to block 10"); + + let block_10_hash = provider.block_hash(10).unwrap().expect("Block 10 should exist"); + assert_eq!(block_10_hash, test_blocks[9].hash(), "Block 10 hash should match"); + } + + #[tokio::test] + async fn test_import_with_node_integration() { + // Tests the full integration with node setup, forkchoice updates, and syncing + reth_tracing::init_test_tracing(); + + let chain_spec = create_test_chain_spec(); + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let (test_blocks, rlp_path) = + setup_test_blocks_and_rlp(&chain_spec, 10, temp_dir.path()).await; + + // Create FCU data for the tip + let tip = test_blocks.last().expect("Should have generated blocks"); + let fcu_path = temp_dir.path().join("test_fcu.json"); + std::fs::write(&fcu_path, create_fcu_json(tip).to_string()) + .expect("Failed to write FCU data"); + + // Setup nodes with imported chain + let result = setup_engine_with_chain_import( + 1, + chain_spec, + false, + TreeConfig::default(), + &rlp_path, + |_| EthPayloadBuilderAttributes::default(), + ) + .await + .expect("Failed to setup nodes with chain import"); + + // Load and apply forkchoice state + let fcu_state = load_forkchoice_state(&fcu_path).expect("Failed to load forkchoice state"); + + let node = &result.nodes[0]; + + // Send forkchoice update to make the imported chain canonical + node.update_forkchoice(fcu_state.finalized_block_hash, fcu_state.head_block_hash) + .await + .expect("Failed to update forkchoice"); + + // Wait for the node to sync to the head + node.sync_to(fcu_state.head_block_hash).await.expect("Failed to sync to head"); + + // Verify the chain tip + let latest = node + .inner + .provider + .sealed_header_by_id(alloy_eips::BlockId::latest()) + .expect("Failed to get latest header") + .expect("No latest header found"); + + assert_eq!( + latest.hash(), + fcu_state.head_block_hash, + "Chain tip does not match expected head" + ); + } +} diff --git a/crates/e2e-test-utils/src/test_rlp_utils.rs b/crates/e2e-test-utils/src/test_rlp_utils.rs new file mode 100644 index 00000000000..b33b598fd0b --- /dev/null +++ b/crates/e2e-test-utils/src/test_rlp_utils.rs @@ -0,0 +1,185 @@ +//! Utilities for creating and writing RLP test data + +use alloy_consensus::{constants::EMPTY_WITHDRAWALS, BlockHeader, Header}; +use alloy_eips::eip4895::Withdrawals; +use alloy_primitives::{Address, B256, B64, U256}; +use alloy_rlp::Encodable; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_ethereum_primitives::{Block, BlockBody}; +use reth_primitives::SealedBlock; +use reth_primitives_traits::Block as BlockTrait; +use std::{io::Write, path::Path}; +use tracing::debug; + +/// Generate test blocks for a given chain spec +pub fn generate_test_blocks(chain_spec: &ChainSpec, count: u64) -> Vec { + let mut blocks: Vec = Vec::new(); + let genesis_header = chain_spec.sealed_genesis_header(); + let mut parent_hash = genesis_header.hash(); + let mut parent_number = genesis_header.number(); + let mut parent_base_fee = genesis_header.base_fee_per_gas; + let mut parent_gas_limit = genesis_header.gas_limit; + + debug!(target: "e2e::import", + "Genesis header base fee: {:?}, gas limit: {}, state root: {:?}", + parent_base_fee, + parent_gas_limit, + genesis_header.state_root() + ); + + for i in 1..=count { + // Create a simple header + let mut header = Header { + parent_hash, + number: parent_number + 1, + gas_limit: parent_gas_limit, // Use parent's gas limit + gas_used: 0, // Empty blocks use no gas + timestamp: genesis_header.timestamp() + i * 12, // 12 second blocks + beneficiary: Address::ZERO, + receipts_root: alloy_consensus::constants::EMPTY_RECEIPTS, + logs_bloom: Default::default(), + difficulty: U256::from(1), // Will be overridden for post-merge + // Use the same state root as parent for now (empty state changes) + state_root: if i == 1 { + genesis_header.state_root() + } else { + blocks.last().unwrap().state_root + }, + transactions_root: alloy_consensus::constants::EMPTY_TRANSACTIONS, + ommers_hash: alloy_consensus::constants::EMPTY_OMMER_ROOT_HASH, + mix_hash: B256::ZERO, + nonce: B64::from(0u64), + extra_data: Default::default(), + base_fee_per_gas: None, + withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_hash: None, + }; + + // Set required fields based on chain spec + if chain_spec.is_london_active_at_block(header.number) { + // Calculate base fee based on parent block + if let Some(parent_fee) = parent_base_fee { + // For the first block, we need to use the exact expected base fee + // The consensus rules expect it to be calculated from the genesis + let (parent_gas_used, parent_gas_limit) = if i == 1 { + // Genesis block parameters + (genesis_header.gas_used, genesis_header.gas_limit) + } else { + let last_block = blocks.last().unwrap(); + (last_block.gas_used, last_block.gas_limit) + }; + header.base_fee_per_gas = Some(alloy_eips::calc_next_block_base_fee( + parent_gas_used, + parent_gas_limit, + parent_fee, + chain_spec.base_fee_params_at_timestamp(header.timestamp), + )); + debug!(target: "e2e::import", "Block {} calculated base fee: {:?} (parent gas used: {}, parent gas limit: {}, parent base fee: {})", + i, header.base_fee_per_gas, parent_gas_used, parent_gas_limit, parent_fee); + parent_base_fee = header.base_fee_per_gas; + } + } + + // For post-merge blocks + if chain_spec.is_paris_active_at_block(header.number) { + header.difficulty = U256::ZERO; + header.nonce = B64::ZERO; + } + + // For post-shanghai blocks + if chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { + header.withdrawals_root = Some(EMPTY_WITHDRAWALS); + } + + // For post-cancun blocks + if chain_spec.is_cancun_active_at_timestamp(header.timestamp) { + header.blob_gas_used = Some(0); + header.excess_blob_gas = Some(0); + header.parent_beacon_block_root = Some(B256::ZERO); + } + + // Create an empty block body + let body = BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: header.withdrawals_root.is_some().then(Withdrawals::default), + }; + + // Create the block + let block = Block { header: header.clone(), body: body.clone() }; + let sealed_block = BlockTrait::seal_slow(block); + + debug!(target: "e2e::import", + "Generated block {} with hash {:?}", + sealed_block.number(), + sealed_block.hash() + ); + debug!(target: "e2e::import", + " Body has {} transactions, {} ommers, withdrawals: {}", + body.transactions.len(), + body.ommers.len(), + body.withdrawals.is_some() + ); + + // Update parent for next iteration + parent_hash = sealed_block.hash(); + parent_number = sealed_block.number(); + parent_gas_limit = sealed_block.gas_limit; + if header.base_fee_per_gas.is_some() { + parent_base_fee = header.base_fee_per_gas; + } + + blocks.push(sealed_block); + } + + blocks +} + +/// Write blocks to RLP file +pub fn write_blocks_to_rlp(blocks: &[SealedBlock], path: &Path) -> std::io::Result<()> { + let mut file = std::fs::File::create(path)?; + let mut total_bytes = 0; + + for (i, block) in blocks.iter().enumerate() { + // Convert SealedBlock to Block before encoding + let block_for_encoding = block.clone().unseal(); + + let mut buf = Vec::new(); + block_for_encoding.encode(&mut buf); + debug!(target: "e2e::import", + "Block {} has {} transactions, encoded to {} bytes", + i, + block.body().transactions.len(), + buf.len() + ); + + // Debug: check what's in the encoded data + debug!(target: "e2e::import", "Block {} encoded to {} bytes", i, buf.len()); + if buf.len() < 20 { + debug!(target: "e2e::import", " Raw bytes: {:?}", &buf); + } else { + debug!(target: "e2e::import", " First 20 bytes: {:?}", &buf[..20]); + } + + total_bytes += buf.len(); + file.write_all(&buf)?; + } + + file.flush()?; + debug!(target: "e2e::import", "Total RLP bytes written: {total_bytes}"); + Ok(()) +} + +/// Create FCU JSON for the tip of the chain +pub fn create_fcu_json(tip: &SealedBlock) -> serde_json::Value { + serde_json::json!({ + "params": [{ + "headBlockHash": format!("0x{:x}", tip.hash()), + "safeBlockHash": format!("0x{:x}", tip.hash()), + "finalizedBlockHash": format!("0x{:x}", tip.hash()), + }] + }) +} diff --git a/crates/e2e-test-utils/src/testsuite/README.md b/crates/e2e-test-utils/src/testsuite/README.md new file mode 100644 index 00000000000..1d91367fef0 --- /dev/null +++ b/crates/e2e-test-utils/src/testsuite/README.md @@ -0,0 +1,106 @@ +# E2E Test Suite Framework + +This directory contains the framework for writing end-to-end (e2e) tests in Reth. The framework provides utilities for setting up test environments, performing actions, and verifying blockchain behavior. + +## Test Organization + +E2E tests using this framework follow a consistent structure across the codebase: + +### Directory Structure +Each crate that requires e2e tests should organize them as follows: +``` +/ +├── src/ +│ └── ... (implementation code) +├── tests/ +│ └── e2e-testsuite/ +│ └── main.rs (or other test files) +└── Cargo.toml +``` + +### Cargo.toml Configuration +In your crate's `Cargo.toml`, define the e2e test binary: +```toml +[[test]] +name = "e2e_testsuite" +path = "tests/e2e-testsuite/main.rs" +harness = true +``` + +**Important**: The test binary MUST be named `e2e_testsuite` to be properly recognized by the nextest filter and CI workflows. + +## Running E2E Tests + +### Run all e2e tests across the workspace +```bash +cargo nextest run --workspace \ + --exclude 'example-*' \ + --exclude 'exex-subscription' \ + --exclude 'reth-bench' \ + --exclude 'ef-tests' \ + --exclude 'op-reth' \ + --exclude 'reth' \ + -E 'binary(e2e_testsuite)' +``` + +Note: The `--exclude` flags prevent compilation of crates that don't contain e2e tests (examples, benchmarks, binaries, and EF tests), significantly reducing build time. + +### Run e2e tests for a specific crate +```bash +cargo nextest run -p -E 'binary(e2e_testsuite)' +``` + +### Run with additional features +```bash +cargo nextest run --locked --features "asm-keccak" --workspace -E 'binary(e2e_testsuite)' +``` + +### Run a specific test +```bash +cargo nextest run --workspace -E 'binary(e2e_testsuite) and test(test_name)' +``` + +## Writing E2E Tests + +Tests use the framework components from this directory: + +```rust +use reth_e2e_test_utils::{setup_import, Environment, TestBuilder}; + +#[tokio::test] +async fn test_example() -> eyre::Result<()> { + // Create test environment + let (mut env, mut handle) = TestBuilder::new() + .build() + .await?; + + // Perform test actions... + + Ok(()) +} +``` + +## Framework Components + +- **Environment**: Core test environment managing nodes and network state +- **TestBuilder**: Builder pattern for configuring test environments +- **Actions** (`actions/`): Pre-built test actions like block production, reorgs, etc. +- **Setup utilities**: Helper functions for common test scenarios + +## CI Integration + +E2E tests run in a dedicated GitHub Actions workflow (`.github/workflows/e2e.yml`) with: +- Extended timeouts (2 minutes per test, with 3 retries) +- Isolation from unit and integration tests +- Parallel execution support + +## Nextest Configuration + +The framework uses custom nextest settings (`.config/nextest.toml`): +```toml +[[profile.default.overrides]] +filter = "binary(e2e_testsuite)" +slow-timeout = { period = "2m", terminate-after = 3 } +``` + +This ensures all e2e tests get appropriate timeouts for complex blockchain operations. \ No newline at end of file diff --git a/crates/e2e-test-utils/src/testsuite/actions/mod.rs b/crates/e2e-test-utils/src/testsuite/actions/mod.rs index 205eb9ac48e..58472618001 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/mod.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/mod.rs @@ -18,7 +18,8 @@ pub mod reorg; pub use engine_api::{ExpectedPayloadStatus, SendNewPayload, SendNewPayloads}; pub use fork::{CreateFork, ForkBase, SetForkBase, SetForkBaseFromBlockInfo, ValidateFork}; pub use node_ops::{ - CaptureBlockOnNode, CompareNodeChainTips, SelectActiveNode, ValidateBlockTag, WaitForSync, + AssertChainTip, CaptureBlockOnNode, CompareNodeChainTips, SelectActiveNode, ValidateBlockTag, + WaitForSync, }; pub use produce_blocks::{ AssertMineBlock, BroadcastLatestForkchoice, BroadcastNextNewPayload, CheckPayloadAccepted, diff --git a/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs index 2b3914339f8..f42951fc57b 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs @@ -338,3 +338,45 @@ where }) } } + +/// Action to assert the current chain tip is at a specific block number. +#[derive(Debug)] +pub struct AssertChainTip { + /// Expected block number + pub expected_block_number: u64, +} + +impl AssertChainTip { + /// Create a new `AssertChainTip` action + pub const fn new(expected_block_number: u64) -> Self { + Self { expected_block_number } + } +} + +impl Action for AssertChainTip +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let current_block = env + .current_block_info() + .ok_or_else(|| eyre::eyre!("No current block information available"))?; + + if current_block.number != self.expected_block_number { + return Err(eyre::eyre!( + "Expected chain tip to be at block {}, but found block {}", + self.expected_block_number, + current_block.number + )); + } + + debug!( + "Chain tip verified at block {} (hash: {})", + current_block.number, current_block.hash + ); + + Ok(()) + }) + } +} diff --git a/crates/e2e-test-utils/src/testsuite/mod.rs b/crates/e2e-test-utils/src/testsuite/mod.rs index 811d76a68db..580dc220665 100644 --- a/crates/e2e-test-utils/src/testsuite/mod.rs +++ b/crates/e2e-test-utils/src/testsuite/mod.rs @@ -15,25 +15,55 @@ use std::{collections::HashMap, marker::PhantomData}; pub mod actions; pub mod setup; use crate::testsuite::setup::Setup; +use alloy_provider::{Provider, ProviderBuilder}; use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; use reth_rpc_builder::auth::AuthServerHandle; - -#[cfg(test)] -mod examples; +use std::sync::Arc; +use url::Url; /// Client handles for both regular RPC and Engine API endpoints -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct NodeClient { /// Regular JSON-RPC client pub rpc: HttpClient, /// Engine API client pub engine: AuthServerHandle, + /// Alloy provider for interacting with the node + provider: Arc, } impl NodeClient { - /// Instantiates a new [`NodeClient`] with the given handles - pub const fn new(rpc: HttpClient, engine: AuthServerHandle) -> Self { - Self { rpc, engine } + /// Instantiates a new [`NodeClient`] with the given handles and RPC URL + pub fn new(rpc: HttpClient, engine: AuthServerHandle, url: Url) -> Self { + let provider = + Arc::new(ProviderBuilder::new().connect_http(url)) as Arc; + Self { rpc, engine, provider } + } + + /// Get a block by number using the alloy provider + pub async fn get_block_by_number( + &self, + number: alloy_eips::BlockNumberOrTag, + ) -> Result> { + self.provider + .get_block_by_number(number) + .await + .map_err(|e| eyre::eyre!("Failed to get block by number: {}", e)) + } + + /// Check if the node is ready by attempting to get the latest block + pub async fn is_ready(&self) -> bool { + self.get_block_by_number(alloy_eips::BlockNumberOrTag::Latest).await.is_ok() + } +} + +impl std::fmt::Debug for NodeClient { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("NodeClient") + .field("rpc", &self.rpc) + .field("engine", &self.engine) + .field("provider", &"") + .finish() } } @@ -261,6 +291,17 @@ where self } + /// Set the test setup with chain import from RLP file + pub fn with_setup_and_import( + mut self, + mut setup: Setup, + rlp_path: impl Into, + ) -> Self { + setup.import_rlp_path = Some(rlp_path.into()); + self.setup = Some(setup); + self + } + /// Add an action to the test pub fn with_action(mut self, action: A) -> Self where diff --git a/crates/e2e-test-utils/src/testsuite/setup.rs b/crates/e2e-test-utils/src/testsuite/setup.rs index 0970451526b..c91a50d3436 100644 --- a/crates/e2e-test-utils/src/testsuite/setup.rs +++ b/crates/e2e-test-utils/src/testsuite/setup.rs @@ -7,7 +7,6 @@ use crate::{ use alloy_eips::BlockNumberOrTag; use alloy_primitives::B256; use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; -use alloy_rpc_types_eth::{Block as RpcBlock, Header, Receipt, Transaction, TransactionRequest}; use eyre::{eyre, Result}; use reth_chainspec::ChainSpec; use reth_engine_local::LocalPayloadAttributesBuilder; @@ -15,14 +14,13 @@ use reth_ethereum_primitives::Block; use reth_node_api::{EngineTypes, NodeTypes, PayloadTypes, TreeConfig}; use reth_node_core::primitives::RecoveredBlock; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_rpc_api::clients::EthApiClient; use revm::state::EvmState; -use std::{marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, path::Path, sync::Arc}; use tokio::{ sync::mpsc, time::{sleep, Duration}, }; -use tracing::{debug, error}; +use tracing::debug; /// Configuration for setting up test environment #[derive(Debug)] @@ -45,6 +43,11 @@ pub struct Setup { pub is_dev: bool, /// Tracks instance generic. _phantom: PhantomData, + /// Holds the import result to keep nodes alive when using imported chain + /// This is stored as an option to avoid lifetime issues with `tokio::spawn` + import_result_holder: Option, + /// Path to RLP file to import during setup + pub import_rlp_path: Option, } impl Default for Setup { @@ -59,6 +62,8 @@ impl Default for Setup { shutdown_tx: None, is_dev: true, _phantom: Default::default(), + import_result_holder: None, + import_rlp_path: None, } } } @@ -129,6 +134,42 @@ where self } + /// Apply setup using pre-imported chain data from RLP file + pub async fn apply_with_import( + &mut self, + env: &mut Environment, + rlp_path: &Path, + ) -> Result<()> + where + N: NodeBuilderHelper, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Payload as PayloadTypes>::PayloadAttributes, + >, + TmpNodeAddOnsHandle: RpcHandleProvider, TmpNodeEthApi>, + { + // Create nodes with imported chain data + let import_result = self.create_nodes_with_import::(rlp_path).await?; + + // Extract node clients + let mut node_clients = Vec::new(); + let nodes = &import_result.nodes; + for node in nodes { + let rpc = node + .rpc_client() + .ok_or_else(|| eyre!("Failed to create HTTP RPC client for node"))?; + let auth = node.auth_server_handle(); + let url = node.rpc_url(); + node_clients.push(crate::testsuite::NodeClient::new(rpc, auth, url)); + } + + // Store the import result to keep nodes alive + // They will be dropped when the Setup is dropped + self.import_result_holder = Some(import_result); + + // Finalize setup - this will wait for nodes and initialize states + self.finalize_setup(env, node_clients, true).await + } + /// Apply the setup to the environment pub async fn apply(&mut self, env: &mut Environment) -> Result<()> where @@ -138,28 +179,21 @@ where >, TmpNodeAddOnsHandle: RpcHandleProvider, TmpNodeEthApi>, { + // If import_rlp_path is set, use apply_with_import instead + if let Some(rlp_path) = self.import_rlp_path.take() { + // Note: this future is quite large so we box it + return Box::pin(self.apply_with_import::(env, &rlp_path)).await; + } let chain_spec = self.chain_spec.clone().ok_or_else(|| eyre!("Chain specification is required"))?; let (shutdown_tx, mut shutdown_rx) = mpsc::channel(1); - self.shutdown_tx = Some(shutdown_tx); let is_dev = self.is_dev; let node_count = self.network.node_count; - let attributes_generator = move |timestamp| { - let attributes = PayloadAttributes { - timestamp, - prev_randao: B256::ZERO, - suggested_fee_recipient: alloy_primitives::Address::ZERO, - withdrawals: Some(vec![]), - parent_beacon_block_root: Some(B256::ZERO), - }; - <::Payload as PayloadTypes>::PayloadBuilderAttributes::from( - EthPayloadBuilderAttributes::new(B256::ZERO, attributes), - ) - }; + let attributes_generator = self.create_attributes_generator::(); let result = setup_engine_with_connection::( node_count, @@ -180,8 +214,9 @@ where .rpc_client() .ok_or_else(|| eyre!("Failed to create HTTP RPC client for node"))?; let auth = node.auth_server_handle(); + let url = node.rpc_url(); - node_clients.push(crate::testsuite::NodeClient::new(rpc, auth)); + node_clients.push(crate::testsuite::NodeClient::new(rpc, auth, url)); } // spawn a separate task just to handle the shutdown @@ -195,100 +230,182 @@ where }); } Err(e) => { - error!("Failed to setup nodes: {}", e); return Err(eyre!("Failed to setup nodes: {}", e)); } } - if node_clients.is_empty() { - return Err(eyre!("No nodes were created")); - } + // Finalize setup + self.finalize_setup(env, node_clients, false).await + } - // wait for all nodes to be ready to accept RPC requests before proceeding - for (idx, client) in node_clients.iter().enumerate() { - let mut retry_count = 0; - const MAX_RETRIES: usize = 5; - let mut last_error = None; + /// Create nodes with imported chain data + /// + /// Note: Currently this only supports `EthereumNode` due to the import process + /// being Ethereum-specific. The generic parameter N is kept for consistency + /// with other methods but is not used. + async fn create_nodes_with_import( + &self, + rlp_path: &Path, + ) -> Result + where + N: NodeBuilderHelper, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Payload as PayloadTypes>::PayloadAttributes, + >, + TmpNodeAddOnsHandle: RpcHandleProvider, TmpNodeEthApi>, + { + let chain_spec = + self.chain_spec.clone().ok_or_else(|| eyre!("Chain specification is required"))?; - while retry_count < MAX_RETRIES { - match EthApiClient::::block_by_number( - &client.rpc, - BlockNumberOrTag::Latest, - false, - ) - .await - { - Ok(_) => { - debug!("Node {idx} RPC endpoint is ready"); - break; - } - Err(e) => { - last_error = Some(e); - retry_count += 1; - debug!( - "Node {idx} RPC endpoint not ready, retry {retry_count}/{MAX_RETRIES}" - ); - sleep(Duration::from_millis(500)).await; - } - } - } - if retry_count == MAX_RETRIES { - return Err(eyre!("Failed to connect to node {idx} RPC endpoint after {MAX_RETRIES} retries: {:?}", last_error)); - } - } + let attributes_generator = move |timestamp| { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: alloy_primitives::Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + EthPayloadBuilderAttributes::new(B256::ZERO, attributes) + }; - env.node_clients = node_clients; + crate::setup_import::setup_engine_with_chain_import( + self.network.node_count, + chain_spec, + self.is_dev, + self.tree_config.clone(), + rlp_path, + attributes_generator, + ) + .await + } - // Initialize per-node states for all nodes - env.initialize_node_states(node_count); - - // Initialize each node's state with genesis block information - let genesis_block_info = { - let first_client = &env.node_clients[0]; - let genesis_block = EthApiClient::< - TransactionRequest, - Transaction, - RpcBlock, - Receipt, - Header, - >::block_by_number( - &first_client.rpc, BlockNumberOrTag::Number(0), false + /// Create the attributes generator function + fn create_attributes_generator( + &self, + ) -> impl Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + Copy + where + N: NodeBuilderHelper, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Payload as PayloadTypes>::PayloadAttributes, + >, + TmpNodeAddOnsHandle: RpcHandleProvider, TmpNodeEthApi>, + { + move |timestamp| { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: alloy_primitives::Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + <::Payload as PayloadTypes>::PayloadBuilderAttributes::from( + EthPayloadBuilderAttributes::new(B256::ZERO, attributes), ) - .await? - .ok_or_else(|| eyre!("Genesis block not found"))?; + } + } - crate::testsuite::BlockInfo { - hash: genesis_block.header.hash, - number: genesis_block.header.number, - timestamp: genesis_block.header.timestamp, - } + /// Common finalization logic for both apply methods + async fn finalize_setup( + &self, + env: &mut Environment, + node_clients: Vec, + use_latest_block: bool, + ) -> Result<()> { + if node_clients.is_empty() { + return Err(eyre!("No nodes were created")); + } + + // Wait for all nodes to be ready + self.wait_for_nodes_ready(&node_clients).await?; + + env.node_clients = node_clients; + env.initialize_node_states(self.network.node_count); + + // Get initial block info (genesis or latest depending on use_latest_block) + let (initial_block_info, genesis_block_info) = if use_latest_block { + // For imported chain, get both latest and genesis + let latest = + self.get_block_info(&env.node_clients[0], BlockNumberOrTag::Latest).await?; + let genesis = + self.get_block_info(&env.node_clients[0], BlockNumberOrTag::Number(0)).await?; + (latest, genesis) + } else { + // For fresh chain, both are genesis + let genesis = + self.get_block_info(&env.node_clients[0], BlockNumberOrTag::Number(0)).await?; + (genesis, genesis) }; - // Initialize all node states with the same genesis block + // Initialize all node states for (node_idx, node_state) in env.node_states.iter_mut().enumerate() { - node_state.current_block_info = Some(genesis_block_info); - node_state.latest_header_time = genesis_block_info.timestamp; + node_state.current_block_info = Some(initial_block_info); + node_state.latest_header_time = initial_block_info.timestamp; node_state.latest_fork_choice_state = ForkchoiceState { - head_block_hash: genesis_block_info.hash, - safe_block_hash: genesis_block_info.hash, + head_block_hash: initial_block_info.hash, + safe_block_hash: initial_block_info.hash, finalized_block_hash: genesis_block_info.hash, }; debug!( - "Node {} initialized with genesis block {} (hash: {})", - node_idx, genesis_block_info.number, genesis_block_info.hash + "Node {} initialized with block {} (hash: {})", + node_idx, initial_block_info.number, initial_block_info.hash ); } debug!( - "Environment initialized with {} nodes, all starting from genesis block {} (hash: {})", - node_count, genesis_block_info.number, genesis_block_info.hash + "Environment initialized with {} nodes, starting from block {} (hash: {})", + self.network.node_count, initial_block_info.number, initial_block_info.hash ); - // TODO: For each block in self.blocks, replay it on the node + Ok(()) + } + + /// Wait for all nodes to be ready to accept RPC requests + async fn wait_for_nodes_ready( + &self, + node_clients: &[crate::testsuite::NodeClient], + ) -> Result<()> { + for (idx, client) in node_clients.iter().enumerate() { + let mut retry_count = 0; + const MAX_RETRIES: usize = 10; + + while retry_count < MAX_RETRIES { + if client.is_ready().await { + debug!("Node {idx} RPC endpoint is ready"); + break; + } + retry_count += 1; + debug!("Node {idx} RPC endpoint not ready, retry {retry_count}/{MAX_RETRIES}"); + sleep(Duration::from_millis(500)).await; + } + + if retry_count == MAX_RETRIES { + return Err(eyre!( + "Failed to connect to node {idx} RPC endpoint after {MAX_RETRIES} retries" + )); + } + } Ok(()) } + + /// Get block info for a given block number or tag + async fn get_block_info( + &self, + client: &crate::testsuite::NodeClient, + block: BlockNumberOrTag, + ) -> Result { + let block = client + .get_block_by_number(block) + .await? + .ok_or_else(|| eyre!("Block {:?} not found", block))?; + + Ok(crate::testsuite::BlockInfo { + hash: block.header.hash, + number: block.header.number, + timestamp: block.header.timestamp, + }) + } } /// Genesis block configuration diff --git a/crates/e2e-test-utils/src/testsuite/examples.rs b/crates/e2e-test-utils/tests/e2e-testsuite/main.rs similarity index 55% rename from crates/e2e-test-utils/src/testsuite/examples.rs rename to crates/e2e-test-utils/tests/e2e-testsuite/main.rs index fc7afd04359..96c976a44ca 100644 --- a/crates/e2e-test-utils/src/testsuite/examples.rs +++ b/crates/e2e-test-utils/tests/e2e-testsuite/main.rs @@ -1,20 +1,128 @@ //! Example tests using the test suite framework. -use crate::testsuite::{ - actions::{ - AssertMineBlock, CaptureBlock, CaptureBlockOnNode, CompareNodeChainTips, CreateFork, - MakeCanonical, ProduceBlocks, ReorgTo, SelectActiveNode, - }, - setup::{NetworkSetup, Setup}, - TestBuilder, -}; use alloy_primitives::{Address, B256}; use alloy_rpc_types_engine::PayloadAttributes; use eyre::Result; use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::{ + test_rlp_utils::{generate_test_blocks, write_blocks_to_rlp}, + testsuite::{ + actions::{ + Action, AssertChainTip, AssertMineBlock, CaptureBlock, CaptureBlockOnNode, + CompareNodeChainTips, CreateFork, MakeCanonical, ProduceBlocks, ReorgTo, + SelectActiveNode, UpdateBlockInfo, + }, + setup::{NetworkSetup, Setup}, + Environment, TestBuilder, + }, +}; use reth_node_api::TreeConfig; use reth_node_ethereum::{EthEngineTypes, EthereumNode}; use std::sync::Arc; +use tempfile::TempDir; +use tracing::debug; + +#[tokio::test] +async fn test_apply_with_import() -> Result<()> { + reth_tracing::init_test_tracing(); + + // Create test chain spec + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) + .london_activated() + .shanghai_activated() + .cancun_activated() + .build(), + ); + + // Generate test blocks + let test_blocks = generate_test_blocks(&chain_spec, 10); + + // Write blocks to RLP file + let temp_dir = TempDir::new()?; + let rlp_path = temp_dir.path().join("test_chain.rlp"); + write_blocks_to_rlp(&test_blocks, &rlp_path)?; + + // Create setup with imported chain + let mut setup = + Setup::default().with_chain_spec(chain_spec).with_network(NetworkSetup::single_node()); + + // Create environment and apply setup with import + let mut env = Environment::::default(); + setup.apply_with_import::(&mut env, &rlp_path).await?; + + // Now run test actions on the environment with imported chain + // First check what block we're at after import + debug!("Current block info after import: {:?}", env.current_block_info()); + + // Update block info to sync environment state with the node + let mut update_block_info = UpdateBlockInfo::default(); + update_block_info.execute(&mut env).await?; + + // Make the imported chain canonical first + let mut make_canonical = MakeCanonical::new(); + make_canonical.execute(&mut env).await?; + + // Wait for the pipeline to finish processing all stages + debug!("Waiting for pipeline to finish processing imported blocks..."); + let start = std::time::Instant::now(); + loop { + // Check if we can get the block from RPC (indicates pipeline finished) + let client = &env.node_clients[0]; + let block_result = reth_rpc_api::clients::EthApiClient::< + alloy_rpc_types_eth::TransactionRequest, + alloy_rpc_types_eth::Transaction, + alloy_rpc_types_eth::Block, + alloy_rpc_types_eth::Receipt, + alloy_rpc_types_eth::Header, + >::block_by_number( + &client.rpc, + alloy_eips::BlockNumberOrTag::Number(10), + true, // Include full transaction details + ) + .await; + + if let Ok(Some(block)) = block_result { + if block.header.number == 10 { + debug!("Pipeline finished, block 10 is fully available"); + break; + } + } + + if start.elapsed() > std::time::Duration::from_secs(10) { + return Err(eyre::eyre!("Timeout waiting for pipeline to finish")); + } + + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + // Update block info again after making canonical + let mut update_block_info_2 = UpdateBlockInfo::default(); + update_block_info_2.execute(&mut env).await?; + + // Assert we're at block 10 after import + let mut assert_tip = AssertChainTip::new(10); + assert_tip.execute(&mut env).await?; + + debug!("Successfully imported chain to block 10"); + + // Produce 5 more blocks + let mut produce_blocks = ProduceBlocks::::new(5); + produce_blocks.execute(&mut env).await?; + + // Assert we're now at block 15 + let mut assert_new_tip = AssertChainTip::new(15); + assert_new_tip.execute(&mut env).await?; + + Ok(()) +} #[tokio::test] async fn test_testsuite_assert_mine_block() -> Result<()> { @@ -24,7 +132,12 @@ async fn test_testsuite_assert_mine_block() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .paris_activated() .build(), )) @@ -61,7 +174,12 @@ async fn test_testsuite_produce_blocks() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .cancun_activated() .build(), )) @@ -85,7 +203,12 @@ async fn test_testsuite_create_fork() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .cancun_activated() .build(), )) @@ -110,7 +233,12 @@ async fn test_testsuite_reorg_with_tagging() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .cancun_activated() .build(), )) @@ -137,7 +265,12 @@ async fn test_testsuite_deep_reorg() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .cancun_activated() .build(), )) @@ -182,7 +315,12 @@ async fn test_testsuite_multinode_block_production() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .cancun_activated() .build(), )) diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 54e18c07a70..b78cf462f52 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -230,8 +230,11 @@ where if let Some(healthy_node_client) = &self.healthy_node_client { // Compare the witness against the healthy node. let healthy_node_witness = futures::executor::block_on(async move { - DebugApiClient::debug_execution_witness(healthy_node_client, block.number().into()) - .await + DebugApiClient::<()>::debug_execution_witness( + healthy_node_client, + block.number().into(), + ) + .await })?; let healthy_path = self.save_file( diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index a3318f1f5c2..290790d61f5 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -69,7 +69,7 @@ impl Future for MiningMode { } } -/// Local miner advancing the chain/ +/// Local miner advancing the chain #[derive(Debug)] pub struct LocalMiner { /// The payload attribute builder for the engine diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 070936381c9..eb9a3370aeb 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -11,7 +11,8 @@ use std::sync::Arc; #[derive(Debug)] #[non_exhaustive] pub struct LocalPayloadAttributesBuilder { - chain_spec: Arc, + /// The chainspec + pub chain_spec: Arc, } impl LocalPayloadAttributesBuilder { @@ -80,19 +81,3 @@ where } } } - -/// A temporary workaround to support local payload engine launcher for arbitrary payload -/// attributes. -// TODO(mattsse): This should be reworked so that LocalPayloadAttributesBuilder can be implemented -// for any -pub trait UnsupportedLocalAttributes: Send + Sync + 'static {} - -impl PayloadAttributesBuilder for LocalPayloadAttributesBuilder -where - ChainSpec: Send + Sync + 'static, - T: UnsupportedLocalAttributes, -{ - fn build(&self, _: u64) -> T { - panic!("Unsupported payload attributes") - } -} diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 9794caf4473..ccff97bc064 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -65,6 +65,8 @@ pub struct TreeConfig { always_compare_trie_updates: bool, /// Whether to disable cross-block caching and parallel prewarming. disable_caching_and_prewarming: bool, + /// Whether to enable the parallel sparse trie state root algorithm. + enable_parallel_sparse_trie: bool, /// Whether to enable state provider metrics. state_provider_metrics: bool, /// Cross-block cache size in bytes. @@ -106,6 +108,7 @@ impl Default for TreeConfig { legacy_state_root: false, always_compare_trie_updates: false, disable_caching_and_prewarming: false, + enable_parallel_sparse_trie: false, state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE, has_enough_parallelism: has_enough_parallelism(), @@ -130,6 +133,7 @@ impl TreeConfig { legacy_state_root: bool, always_compare_trie_updates: bool, disable_caching_and_prewarming: bool, + enable_parallel_sparse_trie: bool, state_provider_metrics: bool, cross_block_cache_size: u64, has_enough_parallelism: bool, @@ -148,6 +152,7 @@ impl TreeConfig { legacy_state_root, always_compare_trie_updates, disable_caching_and_prewarming, + enable_parallel_sparse_trie, state_provider_metrics, cross_block_cache_size, has_enough_parallelism, @@ -205,6 +210,11 @@ impl TreeConfig { self.state_provider_metrics } + /// Returns whether or not the parallel sparse trie is enabled. + pub const fn enable_parallel_sparse_trie(&self) -> bool { + self.enable_parallel_sparse_trie + } + /// Returns whether or not cross-block caching and parallel prewarming should be used. pub const fn disable_caching_and_prewarming(&self) -> bool { self.disable_caching_and_prewarming @@ -329,6 +339,15 @@ impl TreeConfig { self } + /// Setter for using the parallel sparse trie + pub const fn with_enable_parallel_sparse_trie( + mut self, + enable_parallel_sparse_trie: bool, + ) -> Self { + self.enable_parallel_sparse_trie = enable_parallel_sparse_trie; + self + } + /// Setter for maximum number of concurrent proof tasks. pub const fn with_max_proof_task_concurrency( mut self, diff --git a/crates/engine/primitives/src/forkchoice.rs b/crates/engine/primitives/src/forkchoice.rs index 2fe47d807c5..69cb5990711 100644 --- a/crates/engine/primitives/src/forkchoice.rs +++ b/crates/engine/primitives/src/forkchoice.rs @@ -56,7 +56,7 @@ impl ForkchoiceStateTracker { self.latest_status().is_some_and(|s| s.is_syncing()) } - /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`] + /// Returns whether the latest received FCU is invalid: [`ForkchoiceStatus::Invalid`] pub fn is_latest_invalid(&self) -> bool { self.latest_status().is_some_and(|s| s.is_invalid()) } diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index b9ac213e5d9..75e3bd81ca7 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -104,15 +104,30 @@ pub trait EngineTypes: + 'static; } +/// Type that validates the payloads processed by the engine. +pub trait EngineValidator: PayloadValidator { + /// Validates the presence or exclusion of fork-specific fields based on the payload attributes + /// and the message version. + fn validate_version_specific_fields( + &self, + version: EngineApiMessageVersion, + payload_or_attrs: PayloadOrAttributes<'_, Types::ExecutionData, Types::PayloadAttributes>, + ) -> Result<(), EngineObjectValidationError>; + + /// Ensures that the payload attributes are valid for the given [`EngineApiMessageVersion`]. + fn ensure_well_formed_attributes( + &self, + version: EngineApiMessageVersion, + attributes: &Types::PayloadAttributes, + ) -> Result<(), EngineObjectValidationError>; +} + /// Type that validates an [`ExecutionPayload`]. #[auto_impl::auto_impl(&, Arc)] -pub trait PayloadValidator: Send + Sync + Unpin + 'static { +pub trait PayloadValidator: Send + Sync + Unpin + 'static { /// The block type used by the engine. type Block: Block; - /// The execution payload type used by the engine. - type ExecutionData; - /// Ensures that the given payload does not violate any consensus rules that concern the block's /// layout. /// @@ -123,7 +138,7 @@ pub trait PayloadValidator: Send + Sync + Unpin + 'static { /// engine-API specification. fn ensure_well_formed_payload( &self, - payload: Self::ExecutionData, + payload: Types::ExecutionData, ) -> Result, NewPayloadError>; /// Verifies payload post-execution w.r.t. hashed state updates. @@ -135,30 +150,6 @@ pub trait PayloadValidator: Send + Sync + Unpin + 'static { // method not used by l1 Ok(()) } -} - -/// Type that validates the payloads processed by the engine. -pub trait EngineValidator: - PayloadValidator -{ - /// Validates the presence or exclusion of fork-specific fields based on the payload attributes - /// and the message version. - fn validate_version_specific_fields( - &self, - version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes< - '_, - Types::ExecutionData, - ::PayloadAttributes, - >, - ) -> Result<(), EngineObjectValidationError>; - - /// Ensures that the payload attributes are valid for the given [`EngineApiMessageVersion`]. - fn ensure_well_formed_attributes( - &self, - version: EngineApiMessageVersion, - attributes: &::PayloadAttributes, - ) -> Result<(), EngineObjectValidationError>; /// Validates the payload attributes with respect to the header. /// @@ -168,10 +159,10 @@ pub trait EngineValidator: /// > timestamp /// > of a block referenced by forkchoiceState.headBlockHash. /// - /// See also [engine api spec](https://github.com/ethereum/execution-apis/tree/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine) + /// See also: fn validate_payload_attributes_against_header( &self, - attr: &::PayloadAttributes, + attr: &Types::PayloadAttributes, header: &::Header, ) -> Result<(), InvalidPayloadAttributesError> { if attr.timestamp() <= header.timestamp() { diff --git a/crates/engine/primitives/src/message.rs b/crates/engine/primitives/src/message.rs index 283f6a4135b..6f67d59d8f0 100644 --- a/crates/engine/primitives/src/message.rs +++ b/crates/engine/primitives/src/message.rs @@ -1,6 +1,5 @@ use crate::{ - error::BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, EngineApiMessageVersion, - ExecutionPayload, ForkchoiceStatus, + error::BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, ExecutionPayload, ForkchoiceStatus, }; use alloy_rpc_types_engine::{ ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, @@ -15,7 +14,7 @@ use core::{ use futures::{future::Either, FutureExt, TryFutureExt}; use reth_errors::RethResult; use reth_payload_builder_primitives::PayloadBuilderError; -use reth_payload_primitives::PayloadTypes; +use reth_payload_primitives::{EngineApiMessageVersion, PayloadTypes}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; /// Represents the outcome of forkchoice update. diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index e2932ec6faa..89eb6bdda51 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -39,7 +39,6 @@ reth-ethereum-consensus.workspace = true reth-ethereum-engine-primitives.workspace = true reth-evm-ethereum.workspace = true reth-exex-types.workspace = true -reth-chainspec.workspace = true reth-primitives-traits.workspace = true reth-node-ethereum.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index f634d2a3264..367186995f9 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -2,13 +2,13 @@ use futures::{Stream, StreamExt}; use pin_project::pin_project; use reth_chainspec::EthChainSpec; use reth_consensus::{ConsensusError, FullConsensus}; -use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineValidator}; +use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage}; use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, engine::{EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineHandler}, persistence::PersistenceHandle, - tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, + tree::{EngineApiTreeHandler, EngineValidator, TreeConfig}, }; pub use reth_engine_tree::{ chain::{ChainEvent, ChainOrchestrator}, @@ -82,12 +82,11 @@ where payload_builder: PayloadBuilderHandle, payload_validator: V, tree_config: TreeConfig, - invalid_block_hook: Box>, sync_metrics_tx: MetricEventsSender, evm_config: C, ) -> Self where - V: EngineValidator>, + V: EngineValidator, C: ConfigureEvm + 'static, { let engine_kind = @@ -108,7 +107,6 @@ where payload_builder, canonical_in_memory_state, tree_config, - invalid_block_hook, engine_kind, evm_config, ); @@ -150,7 +148,10 @@ mod tests { use super::*; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_engine_primitives::BeaconEngineMessage; - use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; + use reth_engine_tree::{ + test_utils::TestPipelineBuilder, + tree::{BasicEngineValidator, NoopInvalidBlockHook}, + }; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm_ethereum::EthEvmConfig; @@ -195,6 +196,15 @@ mod tests { let pruner = Pruner::new_with_factory(provider_factory.clone(), vec![], 0, 0, None, rx); let evm_config = EthEvmConfig::new(chain_spec.clone()); + let engine_validator = BasicEngineValidator::new( + blockchain_db.clone(), + consensus.clone(), + evm_config.clone(), + engine_payload_validator, + TreeConfig::default(), + Box::new(NoopInvalidBlockHook::default()), + ); + let (sync_metrics_tx, _sync_metrics_rx) = unbounded_channel(); let (tx, _rx) = unbounded_channel(); let _eth_service = EngineService::new( @@ -208,9 +218,8 @@ mod tests { blockchain_db, pruner, PayloadBuilderHandle::new(tx), - engine_payload_validator, + engine_validator, TreeConfig::default(), - Box::new(NoopInvalidBlockHook::default()), sync_metrics_tx, evm_config, ); diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index b5515142cad..6ed37c342c5 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -32,6 +32,7 @@ reth-tasks.workspace = true reth-trie-db.workspace = true reth-trie-parallel.workspace = true reth-trie-sparse = { workspace = true, features = ["std", "metrics"] } +reth-trie-sparse-parallel = { workspace = true, features = ["std"] } reth-trie.workspace = true # alloy @@ -71,7 +72,7 @@ reth-tracing = { workspace = true, optional = true } [dev-dependencies] # reth -reth-evm-ethereum.workspace = true +reth-evm-ethereum = { workspace = true, features = ["test-utils"] } reth-chain-state = { workspace = true, features = ["test-utils"] } reth-chainspec.workspace = true reth-db-common.workspace = true @@ -81,18 +82,15 @@ reth-evm = { workspace = true, features = ["test-utils"] } reth-exex-types.workspace = true reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-prune-types.workspace = true -reth-prune.workspace = true reth-rpc-convert.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } reth-static-file.workspace = true reth-testing-utils.workspace = true reth-tracing.workspace = true -reth-trie-db.workspace = true reth-node-ethereum.workspace = true reth-e2e-test-utils.workspace = true # alloy -alloy-rlp.workspace = true revm-state.workspace = true assert_matches.workspace = true @@ -139,3 +137,7 @@ test-utils = [ "reth-node-ethereum/test-utils", "reth-evm-ethereum/test-utils", ] + +[[test]] +name = "e2e_testsuite" +path = "tests/e2e-testsuite/main.rs" diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index 0d19bad0b14..f4a497a709a 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -9,7 +9,6 @@ use alloy_primitives::{Address, B256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::test_runner::TestRunner; use rand::Rng; -use reth_chain_state::EthPrimitives; use reth_chainspec::ChainSpec; use reth_db_common::init::init_genesis; use reth_engine_tree::tree::{ @@ -222,7 +221,7 @@ fn bench_state_root(c: &mut Criterion) { let state_updates = create_bench_state_updates(params); setup_provider(&factory, &state_updates).expect("failed to setup provider"); - let payload_processor = PayloadProcessor::::new( + let payload_processor = PayloadProcessor::new( WorkloadExecutor::default(), EthEvmConfig::new(factory.chain_spec()), &TreeConfig::default(), diff --git a/crates/engine/tree/docs/mermaid/state-root-task.mmd b/crates/engine/tree/docs/mermaid/state-root-task.mmd index 011196d9e0d..d1993035f21 100644 --- a/crates/engine/tree/docs/mermaid/state-root-task.mmd +++ b/crates/engine/tree/docs/mermaid/state-root-task.mmd @@ -4,7 +4,7 @@ flowchart TD StateRootMessage::PrefetchProofs StateRootMessage::EmptyProof StateRootMessage::ProofCalculated - StataRootMessage::FinishedStateUpdates + StateRootMessage::FinishedStateUpdates end subgraph StateRootTask[State Root Task thread] @@ -40,5 +40,5 @@ flowchart TD StateRootMessage::ProofCalculated --> NewProof NewProof ---> MultiProofCompletion ProofSequencerCondition -->|Yes, send multiproof and state update| SparseTrieUpdate - StataRootMessage::FinishedStateUpdates --> EndCondition1 + StateRootMessage::FinishedStateUpdates --> EndCondition1 EndCondition3 -->|Close SparseTrieUpdate channel| SparseTrieUpdate diff --git a/crates/engine/tree/docs/root.md b/crates/engine/tree/docs/root.md index d3c4e1e5757..a5b9bcb1d48 100644 --- a/crates/engine/tree/docs/root.md +++ b/crates/engine/tree/docs/root.md @@ -10,7 +10,7 @@ root of the new state. 4. Compares the root with the one received in the block header. 5. Considers the block valid. -This document describes the lifecycle of a payload with the focus on state root calculation, +This document describes the lifecycle of a payload with a focus on state root calculation, from the moment the payload is received, to the moment we have a new state root. We will look at the following components: @@ -26,7 +26,7 @@ We will look at the following components: It all starts with the `engine_newPayload` request coming from the [Consensus Client](https://ethereum.org/en/developers/docs/nodes-and-clients/#consensus-clients). We extract the block from the payload, and eventually pass it to the `EngineApiTreeHandler::insert_block_inner` -method which executes the block and calculates the state root. +method that executes the block and calculates the state root. https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20a/crates/engine/tree/src/tree/mod.rs#L2359-L2362 Let's walk through the steps involved in the process. @@ -166,7 +166,7 @@ and send `StateRootMessage::ProofCalculated` to the [State Root Task](#state-roo ### Exhausting the pending queue -To exhaust the pending queue from the step 2 of the `spawn_or_queue` described above, +To exhaust the pending queue from step 2 of the `spawn_or_queue` described above, the [State Root Task](#state-root-task) calls into another method `on_calculation_complete` every time a proof is calculated. https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20a/crates/engine/tree/src/tree/root.rs#L379-L387 @@ -230,11 +230,11 @@ https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20 https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20a/crates/engine/tree/src/tree/root.rs#L1093 3. Update accounts trie https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20a/crates/engine/tree/src/tree/root.rs#L1133 -4. Calculate keccak hashes of the nodes below the certain level +4. Calculate keccak hashes of the nodes below a certain level https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20a/crates/engine/tree/src/tree/root.rs#L1139 As you can see, we do not calculate the state root hash of the accounts trie -(the one that will be the result of the whole task), but instead calculate only the certain hashes. +(the one that will be the result of the whole task), but instead calculate only certain hashes. This is an optimization that comes from the fact that we will likely update the top 2-3 levels of the trie in every transaction, so doing that work every time would be wasteful. diff --git a/crates/engine/tree/src/tree/error.rs b/crates/engine/tree/src/tree/error.rs index b7932f876ed..f7b1111df06 100644 --- a/crates/engine/tree/src/tree/error.rs +++ b/crates/engine/tree/src/tree/error.rs @@ -5,6 +5,7 @@ use alloy_primitives::B256; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError}; use reth_evm::execute::InternalBlockExecutionError; +use reth_payload_primitives::NewPayloadError; use reth_primitives_traits::{Block, BlockBody, SealedBlock}; use tokio::sync::oneshot::error::TryRecvError; @@ -189,3 +190,14 @@ pub enum InsertBlockValidationError { #[error(transparent)] Validation(#[from] BlockValidationError), } + +/// Errors that may occur when inserting a payload. +#[derive(Debug, thiserror::Error)] +pub enum InsertPayloadError { + /// Block validation error + #[error(transparent)] + Block(#[from] InsertBlockError), + /// Payload validation error + #[error(transparent)] + Payload(#[from] NewPayloadError), +} diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index f78756f72e9..96002180049 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -15,13 +15,13 @@ pub(crate) struct EngineApiMetrics { /// Metrics for block validation pub(crate) block_validation: BlockValidationMetrics, /// A copy of legacy blockchain tree metrics, to be replaced when we replace the old tree - pub(crate) tree: TreeMetrics, + pub tree: TreeMetrics, } /// Metrics for the entire blockchain tree #[derive(Metrics)] #[metrics(scope = "blockchain_tree")] -pub(super) struct TreeMetrics { +pub(crate) struct TreeMetrics { /// The highest block number in the canonical chain pub canonical_chain_height: Gauge, /// The number of reorgs @@ -71,6 +71,10 @@ pub(crate) struct BlockValidationMetrics { pub(crate) state_root_duration: Gauge, /// Trie input computation duration pub(crate) trie_input_duration: Histogram, + /// Payload conversion and validation latency + pub(crate) payload_validation_duration: Gauge, + /// Histogram of payload validation latency + pub(crate) payload_validation_histogram: Histogram, } impl BlockValidationMetrics { @@ -81,6 +85,13 @@ impl BlockValidationMetrics { self.state_root_duration.set(elapsed_as_secs); self.state_root_histogram.record(elapsed_as_secs); } + + /// Records a new payload validation time, updating both the histogram and the payload + /// validation gauge + pub(crate) fn record_payload_validation(&self, elapsed_as_secs: f64) { + self.payload_validation_duration.set(elapsed_as_secs); + self.payload_validation_histogram.record(elapsed_as_secs); + } } /// Metrics for the blockchain tree block buffer diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index a88c1097875..55b2bc4c21b 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -3,22 +3,16 @@ use crate::{ chain::FromOrchestrator, engine::{DownloadRequest, EngineApiEvent, EngineApiKind, EngineApiRequest, FromEngine}, persistence::PersistenceHandle, - tree::{ - cached_state::CachedStateProvider, executor::WorkloadExecutor, metrics::EngineApiMetrics, - }, + tree::{error::InsertPayloadError, metrics::EngineApiMetrics, payload_validator::TreeCtx}, }; use alloy_consensus::BlockHeader; -use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash, NumHash}; -use alloy_evm::block::BlockExecutor; -use alloy_primitives::{Address, B256}; +use alloy_eips::{eip1898::BlockWithParent, merge::EPOCH_SLOTS, BlockNumHash, NumHash}; +use alloy_primitives::B256; use alloy_rpc_types_engine::{ ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; -use error::{InsertBlockError, InsertBlockErrorKind, InsertBlockFatalError}; -use instrumented_state::InstrumentedStateProvider; -use payload_processor::sparse_trie::StateRootComputeOutcome; +use error::{InsertBlockError, InsertBlockFatalError}; use persistence_state::CurrentPersistenceAction; -use precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}; use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, MemoryOverlayStateProvider, NewCanonicalChain, @@ -26,31 +20,27 @@ use reth_chain_state::{ use reth_consensus::{Consensus, FullConsensus}; pub use reth_engine_primitives::InvalidBlockHook; use reth_engine_primitives::{ - BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconOnNewPayloadError, EngineValidator, - ExecutionPayload, ForkchoiceStateTracker, OnForkChoiceUpdated, + BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconOnNewPayloadError, ExecutionPayload, + ForkchoiceStateTracker, OnForkChoiceUpdated, }; use reth_errors::{ConsensusError, ProviderResult}; -use reth_evm::{ConfigureEvm, Evm, SpecFor}; +use reth_evm::ConfigureEvm; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{EngineApiMessageVersion, PayloadBuilderAttributes, PayloadTypes}; -use reth_primitives_traits::{ - Block, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, +use reth_payload_primitives::{ + BuiltPayload, EngineApiMessageVersion, NewPayloadError, PayloadBuilderAttributes, PayloadTypes, }; +use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; use reth_provider::{ providers::ConsistentDbView, BlockNumReader, BlockReader, DBProvider, DatabaseProviderFactory, - ExecutionOutcome, HashedPostStateProvider, ProviderError, StateCommitmentProvider, - StateProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - TransactionVariant, + HashedPostStateProvider, ProviderError, StateCommitmentProvider, StateProviderBox, + StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, }; -use reth_revm::{database::StateProviderDatabase, State}; +use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; -use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; +use reth_trie::{HashedPostState, TrieInput}; use reth_trie_db::{DatabaseHashedPostState, StateCommitment}; -use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use state::TreeState; use std::{ - borrow::Cow, - collections::HashMap, fmt::Debug, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, @@ -66,14 +56,13 @@ use tracing::*; mod block_buffer; mod cached_state; -#[cfg(test)] -mod e2e_tests; pub mod error; mod instrumented_state; mod invalid_block_hook; mod invalid_headers; mod metrics; mod payload_processor; +pub mod payload_validator; mod persistence_state; pub mod precompile_cache; #[cfg(test)] @@ -87,9 +76,9 @@ pub use block_buffer::BlockBuffer; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use invalid_headers::InvalidHeaderCache; pub use payload_processor::*; +pub use payload_validator::{BasicEngineValidator, EngineValidator}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::TreeConfig; -use reth_evm::execute::BlockExecutionOutput; pub mod state; @@ -267,18 +256,10 @@ where config: TreeConfig, /// Metrics for the engine api. metrics: EngineApiMetrics, - /// An invalid block hook. - invalid_block_hook: Box>, /// The engine API variant of this handler engine_kind: EngineApiKind, - /// The type responsible for processing new payloads - payload_processor: PayloadProcessor, /// The EVM configuration. evm_config: C, - /// Precompile cache map. - precompile_cache_map: PrecompileCacheMap>, - /// Metrics for precompile cache, stored per address to avoid re-allocation. - precompile_cache_metrics: HashMap, } impl std::fmt::Debug @@ -301,9 +282,7 @@ where .field("payload_builder", &self.payload_builder) .field("config", &self.config) .field("metrics", &self.metrics) - .field("invalid_block_hook", &format!("{:p}", self.invalid_block_hook)) .field("engine_kind", &self.engine_kind) - .field("payload_processor", &self.payload_processor) .field("evm_config", &self.evm_config) .finish() } @@ -323,8 +302,8 @@ where

::Provider: BlockReader, C: ConfigureEvm + 'static, - T: PayloadTypes, - V: EngineValidator, + T: PayloadTypes>, + V: EngineValidator, { /// Creates a new [`EngineApiTreeHandler`]. #[expect(clippy::too_many_arguments)] @@ -344,15 +323,6 @@ where ) -> Self { let (incoming_tx, incoming) = std::sync::mpsc::channel(); - let precompile_cache_map = PrecompileCacheMap::default(); - - let payload_processor = PayloadProcessor::new( - WorkloadExecutor::default(), - evm_config.clone(), - &config, - precompile_cache_map.clone(), - ); - Self { provider, consensus, @@ -368,20 +338,11 @@ where config, metrics: Default::default(), incoming_tx, - invalid_block_hook: Box::new(NoopInvalidBlockHook), engine_kind, - payload_processor, evm_config, - precompile_cache_map, - precompile_cache_metrics: HashMap::new(), } } - /// Sets the invalid block hook. - fn set_invalid_block_hook(&mut self, invalid_block_hook: Box>) { - self.invalid_block_hook = invalid_block_hook; - } - /// Creates a new [`EngineApiTreeHandler`] instance and spawns it in its /// own thread. /// @@ -396,7 +357,6 @@ where payload_builder: PayloadBuilderHandle, canonical_in_memory_state: CanonicalInMemoryState, config: TreeConfig, - invalid_block_hook: Box>, kind: EngineApiKind, evm_config: C, ) -> (Sender, N::Block>>, UnboundedReceiver>) @@ -417,7 +377,7 @@ where kind, ); - let mut task = Self::new( + let task = Self::new( provider, consensus, payload_validator, @@ -431,7 +391,6 @@ where kind, evm_config, ); - task.set_invalid_block_hook(invalid_block_hook); let incoming = task.incoming_tx.clone(); std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| task.run()).unwrap(); (incoming, outgoing) @@ -528,6 +487,8 @@ where trace!(target: "engine::tree", "invoked new payload"); self.metrics.engine.new_payload_messages.increment(1); + let validation_start = Instant::now(); + // Ensures that the given payload does not violate any consensus rules that concern the // block's layout, like: // - missing or invalid base fee @@ -554,48 +515,43 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self.payload_validator.ensure_well_formed_payload(payload) { - Ok(block) => block, - Err(error) => { - error!(target: "engine::tree", %error, "Invalid payload"); - // we need to convert the error to a payload status (response to the CL) - - let latest_valid_hash = - if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { - // Engine-API rules: - // > `latestValidHash: null` if the blockHash validation has failed () - // > `latestValidHash: null` if the expected and the actual arrays don't match () - None - } else { - self.latest_valid_hash_for_invalid_payload(parent_hash)? - }; - let status = PayloadStatusEnum::from(error); - return Ok(TreeOutcome::new(PayloadStatus::new(status, latest_valid_hash))) - } - }; + self.metrics + .block_validation + .record_payload_validation(validation_start.elapsed().as_secs_f64()); - let num_hash = block.num_hash(); + let num_hash = payload.num_hash(); let engine_event = BeaconConsensusEngineEvent::BlockReceived(num_hash); self.emit_event(EngineApiEvent::BeaconConsensus(engine_event)); - let block_hash = block.hash(); + let block_hash = num_hash.hash; let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); if lowest_buffered_ancestor == block_hash { - lowest_buffered_ancestor = block.parent_hash(); - } + lowest_buffered_ancestor = parent_hash; + } + + // now check if the block has an invalid ancestor + if let Some(invalid) = self.state.invalid_headers.get(&lowest_buffered_ancestor) { + // Here we might have 2 cases + // 1. the block is well formed and indeed links to an invalid header, meaning we should + // remember it as invalid + // 2. the block is not well formed (i.e block hash is incorrect), and we should just + // return an error and forget it + let block = match self.payload_validator.ensure_well_formed_payload(payload) { + Ok(block) => block, + Err(error) => { + let status = self.on_new_payload_error(error, parent_hash)?; + return Ok(TreeOutcome::new(status)) + } + }; - // now check the block itself - if let Some(status) = - self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, &block)? - { + let status = self.on_invalid_new_payload(block.into_sealed_block(), invalid)?; return Ok(TreeOutcome::new(status)) } let status = if self.backfill_sync_state.is_idle() { let mut latest_valid_hash = None; - let num_hash = block.num_hash(); - match self.insert_block(block) { + match self.insert_payload(payload) { Ok(status) => { let status = match status { InsertPayloadOk::Inserted(BlockStatus::Valid) => { @@ -616,12 +572,25 @@ where PayloadStatus::new(status, latest_valid_hash) } - Err(error) => self.on_insert_block_error(error)?, + Err(error) => match error { + InsertPayloadError::Block(error) => self.on_insert_block_error(error)?, + InsertPayloadError::Payload(error) => { + self.on_new_payload_error(error, parent_hash)? + } + }, } - } else if let Err(error) = self.buffer_block(block) { - self.on_insert_block_error(error)? } else { - PayloadStatus::from_status(PayloadStatusEnum::Syncing) + match self.payload_validator.ensure_well_formed_payload(payload) { + // if the block is well-formed, buffer it for later + Ok(block) => { + if let Err(error) = self.buffer_block(block) { + self.on_insert_block_error(error)? + } else { + PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } + } + Err(error) => self.on_new_payload_error(error, parent_hash)?, + } }; let mut outcome = TreeOutcome::new(status); @@ -743,24 +712,24 @@ where /// /// The header is required as an arg, because we might be checking that the header is a fork /// block before it's in the tree state and before it's in the database. - fn is_fork(&self, target_header: &SealedHeader) -> ProviderResult { - let target_hash = target_header.hash(); + fn is_fork(&self, target: BlockWithParent) -> ProviderResult { + let target_hash = target.block.hash; // verify that the given hash is not part of an extension of the canon chain. let canonical_head = self.state.tree_state.canonical_head(); let mut current_hash; - let mut current_block = Cow::Borrowed(target_header); + let mut current_block = target; loop { - if current_block.hash() == canonical_head.hash { + if current_block.block.hash == canonical_head.hash { return Ok(false) } // We already passed the canonical head - if current_block.number() <= canonical_head.number { + if current_block.block.number <= canonical_head.number { break } - current_hash = current_block.parent_hash(); + current_hash = current_block.parent; let Some(next_block) = self.sealed_header_by_hash(current_hash)? else { break }; - current_block = Cow::Owned(next_block); + current_block = next_block.block_with_parent(); } // verify that the given hash is not already part of canonical chain stored in memory @@ -776,26 +745,6 @@ where Ok(true) } - /// Check if the given block has any ancestors with missing trie updates. - fn has_ancestors_with_missing_trie_updates( - &self, - target_header: &SealedHeader, - ) -> bool { - // Walk back through the chain starting from the parent of the target block - let mut current_hash = target_header.parent_hash(); - while let Some(block) = self.state.tree_state.blocks_by_hash.get(¤t_hash) { - // Check if this block is missing trie updates - if block.trie.is_missing() { - return true; - } - - // Move to the parent block - current_hash = block.recovered_block().parent_hash(); - } - - false - } - /// Returns the persisting kind for the input block. fn persisting_kind_for(&self, block: &N::BlockHeader) -> PersistingKind { // Check that we're currently persisting. @@ -1659,14 +1608,23 @@ where // check if the check hash was previously marked as invalid let Some(header) = self.state.invalid_headers.get(&check) else { return Ok(None) }; + Ok(Some(self.on_invalid_new_payload(head.clone(), header)?)) + } + + /// Invoked when a new payload received is invalid. + fn on_invalid_new_payload( + &mut self, + head: SealedBlock, + invalid: BlockWithParent, + ) -> ProviderResult { // populate the latest valid hash field - let status = self.prepare_invalid_response(header.parent)?; + let status = self.prepare_invalid_response(invalid.parent)?; // insert the head block into the invalid header cache - self.state.invalid_headers.insert_with_invalid_ancestor(head.hash(), header); - self.emit_event(BeaconConsensusEngineEvent::InvalidBlock(Box::new(head.clone()))); + self.state.invalid_headers.insert_with_invalid_ancestor(head.hash(), invalid); + self.emit_event(BeaconConsensusEngineEvent::InvalidBlock(Box::new(head))); - Ok(Some(status)) + Ok(status) } /// Checks if the given `head` points to an invalid header, which requires a specific response @@ -1970,21 +1928,6 @@ where } } - /// Invoke the invalid block hook if this is a new invalid block. - fn on_invalid_block( - &mut self, - parent_header: &SealedHeader, - block: &RecoveredBlock, - output: &BlockExecutionOutput, - trie_updates: Option<(&TrieUpdates, B256)>, - ) { - if self.state.invalid_headers.get(&block.hash()).is_some() { - // we already marked this block as invalid - return; - } - self.invalid_block_hook.on_invalid_block(parent_header, block, output, trie_updates); - } - /// This handles downloaded blocks that are shown to be disconnected from the canonical chain. /// /// This mainly compares the missing parent of the downloaded block with the current canonical @@ -2088,318 +2031,108 @@ where Ok(None) } + fn insert_payload( + &mut self, + payload: T::ExecutionData, + ) -> Result> { + self.insert_block_or_payload( + payload.block_with_parent(), + payload, + |validator, payload, ctx| validator.validate_payload(payload, ctx), + |this, payload| Ok(this.payload_validator.ensure_well_formed_payload(payload)?), + ) + } + fn insert_block( &mut self, block: RecoveredBlock, ) -> Result> { - match self.insert_block_inner(block) { - Ok(result) => Ok(result), - Err((kind, block)) => Err(InsertBlockError::new(block.into_sealed_block(), kind)), - } + self.insert_block_or_payload( + block.block_with_parent(), + block, + |validator, block, ctx| validator.validate_block(block, ctx), + |_, block| Ok(block), + ) } - fn insert_block_inner( + fn insert_block_or_payload( &mut self, - block: RecoveredBlock, - ) -> Result)> { - /// A helper macro that returns the block in case there was an error - macro_rules! ensure_ok { - ($expr:expr) => { - match $expr { - Ok(val) => val, - Err(e) => return Err((e.into(), block)), - } - }; - } - - let block_num_hash = block.num_hash(); - debug!(target: "engine::tree", block=?block_num_hash, parent = ?block.parent_hash(), state_root = ?block.state_root(), "Inserting new block into tree"); - - if ensure_ok!(self.block_by_hash(block.hash())).is_some() { - return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid)) - } - - let start = Instant::now(); - - trace!(target: "engine::tree", block=?block_num_hash, "Validating block consensus"); - - // validate block consensus rules - ensure_ok!(self.validate_block(&block)); - - trace!(target: "engine::tree", block=?block_num_hash, parent=?block.parent_hash(), "Fetching block state provider"); - let Some(provider_builder) = ensure_ok!(self.state_provider_builder(block.parent_hash())) - else { - // we don't have the state required to execute this block, buffering it and find the - // missing parent block - let missing_ancestor = self - .state - .buffer - .lowest_ancestor(&block.parent_hash()) - .map(|block| block.parent_num_hash()) - .unwrap_or_else(|| block.parent_num_hash()); - - self.state.buffer.insert_block(block); - - return Ok(InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head: self.state.tree_state.current_canonical_head, - missing_ancestor, - })) - }; - - // now validate against the parent - let Some(parent_block) = ensure_ok!(self.sealed_header_by_hash(block.parent_hash())) else { - return Err(( - InsertBlockErrorKind::Provider(ProviderError::HeaderNotFound( - block.parent_hash().into(), - )), - block, - )) - }; - - if let Err(e) = - self.consensus.validate_header_against_parent(block.sealed_header(), &parent_block) - { - warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); - return Err((e.into(), block)) - } - - let state_provider = ensure_ok!(provider_builder.build()); - - // We only run the parallel state root if we are not currently persisting any blocks or - // persisting blocks that are all ancestors of the one we are executing. - // - // If we're committing ancestor blocks, then: any trie updates being committed are a subset - // of the in-memory trie updates collected before fetching reverts. So any diff in - // reverts (pre vs post commit) is already covered by the in-memory trie updates we - // collect in `compute_state_root_parallel`. - // - // See https://github.com/paradigmxyz/reth/issues/12688 for more details - let persisting_kind = self.persisting_kind_for(block.header()); - // don't run parallel if state root fallback is set - let run_parallel_state_root = - persisting_kind.can_run_parallel_state_root() && !self.config.state_root_fallback(); - - // Use state root task only if: - // 1. No persistence is in progress - // 2. Config allows it - // 3. No ancestors with missing trie updates. If any exist, it will mean that every state - // root task proof calculation will include a lot of unrelated paths in the prefix sets. - // It's cheaper to run a parallel state root that does one walk over trie tables while - // accounting for the prefix sets. - let has_ancestors_with_missing_trie_updates = - self.has_ancestors_with_missing_trie_updates(block.sealed_header()); - let mut use_state_root_task = run_parallel_state_root && - self.config.use_state_root_task() && - !has_ancestors_with_missing_trie_updates; - - debug!( - target: "engine::tree", - block=?block_num_hash, - run_parallel_state_root, - has_ancestors_with_missing_trie_updates, - use_state_root_task, - config_allows_state_root_task=self.config.use_state_root_task(), - "Deciding which state root algorithm to run" - ); - - // use prewarming background task - let header = block.clone_sealed_header(); - let txs = block.clone_transactions_recovered().collect(); - let mut handle = if use_state_root_task { - // use background tasks for state root calc - let consistent_view = - ensure_ok!(ConsistentDbView::new_with_latest_tip(self.provider.clone())); - - // Compute trie input - let trie_input_start = Instant::now(); - let res = self.compute_trie_input( - persisting_kind, - ensure_ok!(consistent_view.provider_ro()), - block.header().parent_hash(), - ); - let trie_input = match res { - Ok(val) => val, - Err(e) => return Err((InsertBlockErrorKind::Other(Box::new(e)), block)), - }; + block_id: BlockWithParent, + input: Input, + execute: impl FnOnce( + &mut V, + Input, + TreeCtx<'_, N>, + ) -> Result, Err>, + convert_to_block: impl FnOnce(&mut Self, Input) -> Result, Err>, + ) -> Result + where + Err: From>, + { + let block_num_hash = block_id.block; + debug!(target: "engine::tree", block=?block_num_hash, parent = ?block_id.parent, "Inserting new block into tree"); - self.metrics - .block_validation - .trie_input_duration - .record(trie_input_start.elapsed().as_secs_f64()); - - // Use state root task only if prefix sets are empty, otherwise proof generation is too - // expensive because it requires walking over the paths in the prefix set in every - // proof. - if trie_input.prefix_sets.is_empty() { - self.payload_processor.spawn( - header, - txs, - provider_builder, - consistent_view, - trie_input, - &self.config, - ) - } else { - debug!(target: "engine::tree", block=?block_num_hash, "Disabling state root task due to non-empty prefix sets"); - use_state_root_task = false; - self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) + match self.block_by_hash(block_num_hash.hash) { + Err(err) => { + let block = convert_to_block(self, input)?; + return Err(InsertBlockError::new(block.into_sealed_block(), err.into()).into()); } - } else { - self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) - }; - - // Use cached state provider before executing, used in execution after prewarming threads - // complete - let state_provider = CachedStateProvider::new_with_caches( - state_provider, - handle.caches(), - handle.cache_metrics(), - ); - - let (output, execution_finish) = if self.config.state_provider_metrics() { - let state_provider = InstrumentedStateProvider::from_state_provider(&state_provider); - let (output, execution_finish) = - ensure_ok!(self.execute_block(&state_provider, &block, &handle)); - state_provider.record_total_latency(); - (output, execution_finish) - } else { - let (output, execution_finish) = - ensure_ok!(self.execute_block(&state_provider, &block, &handle)); - (output, execution_finish) + Ok(Some(_)) => { + // We now assume that we already have this block in the tree. However, we need to + // run the conversion to ensure that the block hash is valid. + convert_to_block(self, input)?; + return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid)) + } + _ => {} }; - // after executing the block we can stop executing transactions - handle.stop_prewarming_execution(); + // Ensure that the parent state is available. + match self.state_provider_builder(block_id.parent) { + Err(err) => { + let block = convert_to_block(self, input)?; + return Err(InsertBlockError::new(block.into_sealed_block(), err.into()).into()); + } + Ok(None) => { + let block = convert_to_block(self, input)?; - if let Err(err) = self.consensus.validate_block_post_execution(&block, &output) { - // call post-block hook - self.on_invalid_block(&parent_block, &block, &output, None); - return Err((err.into(), block)) - } + // we don't have the state required to execute this block, buffering it and find the + // missing parent block + let missing_ancestor = self + .state + .buffer + .lowest_ancestor(&block.parent_hash()) + .map(|block| block.parent_num_hash()) + .unwrap_or_else(|| block.parent_num_hash()); - let hashed_state = self.provider.hashed_post_state(&output.state); + self.state.buffer.insert_block(block); - if let Err(err) = self - .payload_validator - .validate_block_post_execution_with_hashed_state(&hashed_state, &block) - { - // call post-block hook - self.on_invalid_block(&parent_block, &block, &output, None); - return Err((err.into(), block)) - } - - debug!(target: "engine::tree", block=?block_num_hash, "Calculating block state root"); - - let root_time = Instant::now(); - - let mut maybe_state_root = None; - - if run_parallel_state_root { - // if we new payload extends the current canonical change we attempt to use the - // background task or try to compute it in parallel - if use_state_root_task { - debug!(target: "engine::tree", block=?block_num_hash, "Using sparse trie state root algorithm"); - match handle.state_root() { - Ok(StateRootComputeOutcome { state_root, trie_updates, trie }) => { - let elapsed = execution_finish.elapsed(); - info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); - // we double check the state root here for good measure - if self.consensus.validate_state_root(block.header(), state_root).is_ok() { - maybe_state_root = Some((state_root, trie_updates, elapsed)) - } else { - warn!( - target: "engine::tree", - ?state_root, - block_state_root = ?block.header().state_root(), - "State root task returned incorrect state root" - ); - } - - // hold on to the sparse trie for the next payload - self.payload_processor.set_sparse_trie(trie); - } - Err(error) => { - debug!(target: "engine::tree", %error, "Background parallel state root computation failed"); - } - } - } else { - debug!(target: "engine::tree", block=?block_num_hash, "Using parallel state root algorithm"); - match self.compute_state_root_parallel( - persisting_kind, - block.header().parent_hash(), - &hashed_state, - ) { - Ok(result) => { - info!( - target: "engine::tree", - block = ?block_num_hash, - regular_state_root = ?result.0, - "Regular root task finished" - ); - maybe_state_root = Some((result.0, result.1, root_time.elapsed())); - } - Err(ParallelStateRootError::Provider(ProviderError::ConsistentView(error))) => { - debug!(target: "engine::tree", %error, "Parallel state root computation failed consistency check, falling back"); - } - Err(error) => return Err((InsertBlockErrorKind::Other(Box::new(error)), block)), - } + return Ok(InsertPayloadOk::Inserted(BlockStatus::Disconnected { + head: self.state.tree_state.current_canonical_head, + missing_ancestor, + })) } + Ok(Some(_)) => {} } - let (state_root, trie_output, root_elapsed) = if let Some(maybe_state_root) = - maybe_state_root - { - maybe_state_root - } else { - // fallback is to compute the state root regularly in sync - if self.config.state_root_fallback() { - debug!(target: "engine::tree", block=?block_num_hash, "Using state root fallback for testing"); - } else { - warn!(target: "engine::tree", block=?block_num_hash, ?persisting_kind, "Failed to compute state root in parallel"); - self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); + // determine whether we are on a fork chain + let is_fork = match self.is_fork(block_id) { + Err(err) => { + let block = convert_to_block(self, input)?; + return Err(InsertBlockError::new(block.into_sealed_block(), err.into()).into()); } - - let (root, updates) = - ensure_ok!(state_provider.state_root_with_updates(hashed_state.clone())); - (root, updates, root_time.elapsed()) + Ok(is_fork) => is_fork, }; - self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); - debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root"); - - // ensure state root matches - if self.consensus.validate_state_root(block.header(), state_root).is_err() { - // call post-block hook - self.on_invalid_block(&parent_block, &block, &output, Some((&trie_output, state_root))); - return Err(( - ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.header().state_root() }.into(), - ) - .into(), - block, - )) - } - - // terminate prewarming task with good state output - handle.terminate_caching(Some(output.state.clone())); + let ctx = TreeCtx::new( + &mut self.state, + &self.persistence_state, + &self.canonical_in_memory_state, + is_fork, + ); - let is_fork = ensure_ok!(self.is_fork(block.sealed_header())); + let start = Instant::now(); - // If the block is a fork, we don't save the trie updates, because they may be incorrect. - // Instead, they will be recomputed on persistence. - let trie_updates = if is_fork { - ExecutedTrieUpdates::Missing - } else { - ExecutedTrieUpdates::Present(Arc::new(trie_output)) - }; - let executed: ExecutedBlockWithTrieUpdates = ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block), - execution_output: Arc::new(ExecutionOutcome::from((output, block_num_hash.number))), - hashed_state: Arc::new(hashed_state), - }, - trie: trie_updates, - }; + let executed = execute(&mut self.payload_validator, input, ctx)?; // if the parent is the canonical head, we can insert the block as the pending block if self.state.tree_state.canonical_block_hash() == executed.recovered_block().parent_hash() @@ -2424,73 +2157,6 @@ where Ok(InsertPayloadOk::Inserted(BlockStatus::Valid)) } - /// Executes a block with the given state provider - fn execute_block( - &mut self, - state_provider: S, - block: &RecoveredBlock, - handle: &PayloadHandle, - ) -> Result<(BlockExecutionOutput, Instant), InsertBlockErrorKind> { - debug!(target: "engine::tree", block=?block.num_hash(), "Executing block"); - let mut db = State::builder() - .with_database(StateProviderDatabase::new(&state_provider)) - .with_bundle_update() - .without_state_clear() - .build(); - let mut executor = self.evm_config.executor_for_block(&mut db, block); - - if !self.config.precompile_cache_disabled() { - executor.evm_mut().precompiles_mut().map_precompiles(|address, precompile| { - let metrics = self - .precompile_cache_metrics - .entry(*address) - .or_insert_with(|| CachedPrecompileMetrics::new_with_address(*address)) - .clone(); - CachedPrecompile::wrap( - precompile, - self.precompile_cache_map.cache_for_address(*address), - *self.evm_config.evm_env(block.header()).spec_id(), - Some(metrics), - ) - }); - } - - let execution_start = Instant::now(); - let output = self.metrics.executor.execute_metered( - executor, - block, - Box::new(handle.state_hook()), - )?; - let execution_finish = Instant::now(); - let execution_time = execution_finish.duration_since(execution_start); - debug!(target: "engine::tree", elapsed = ?execution_time, number=?block.number(), "Executed block"); - Ok((output, execution_finish)) - } - - /// Compute state root for the given hashed post state in parallel. - /// - /// # Returns - /// - /// Returns `Ok(_)` if computed successfully. - /// Returns `Err(_)` if error was encountered during computation. - /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation - /// should be used instead. - fn compute_state_root_parallel( - &self, - persisting_kind: PersistingKind, - parent_hash: B256, - hashed_state: &HashedPostState, - ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { - let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; - - let mut input = - self.compute_trie_input(persisting_kind, consistent_view.provider_ro()?, parent_hash)?; - // Extend with block we are validating root for. - input.append_ref(hashed_state); - - ParallelStateRoot::new(consistent_view, input).incremental_root_with_updates() - } - /// Computes the trie input at the provided parent hash. /// /// The goal of this function is to take in-memory blocks and generate a [`TrieInput`] that @@ -2629,6 +2295,29 @@ where )) } + /// Handles a [`NewPayloadError`] by converting it to a [`PayloadStatus`]. + fn on_new_payload_error( + &mut self, + error: NewPayloadError, + parent_hash: B256, + ) -> ProviderResult { + error!(target: "engine::tree", %error, "Invalid payload"); + // we need to convert the error to a payload status (response to the CL) + + let latest_valid_hash = + if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { + // Engine-API rules: + // > `latestValidHash: null` if the blockHash validation has failed () + // > `latestValidHash: null` if the expected and the actual arrays don't match () + None + } else { + self.latest_valid_hash_for_invalid_payload(parent_hash)? + }; + + let status = PayloadStatusEnum::from(error); + Ok(PayloadStatus::new(status, latest_valid_hash)) + } + /// Attempts to find the header for the given block hash if it is canonical. pub fn find_canonical_header( &self, diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs new file mode 100644 index 00000000000..d59f14c796a --- /dev/null +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -0,0 +1,175 @@ +//! Configured sparse trie enum for switching between serial and parallel implementations. + +use alloy_primitives::B256; +use reth_trie::{Nibbles, TrieNode}; +use reth_trie_sparse::{ + errors::SparseTrieResult, provider::TrieNodeProvider, LeafLookup, LeafLookupError, + RevealedSparseNode, SerialSparseTrie, SparseTrieInterface, SparseTrieUpdates, TrieMasks, +}; +use reth_trie_sparse_parallel::ParallelSparseTrie; +use std::borrow::Cow; + +/// Enum for switching between serial and parallel sparse trie implementations. +/// +/// This type allows runtime selection between different sparse trie implementations, +/// providing flexibility in choosing the appropriate implementation based on workload +/// characteristics. +#[derive(Debug)] +pub(crate) enum ConfiguredSparseTrie { + /// Serial implementation of the sparse trie. + Serial(Box), + /// Parallel implementation of the sparse trie. + Parallel(Box), +} + +impl From for ConfiguredSparseTrie { + fn from(trie: SerialSparseTrie) -> Self { + Self::Serial(Box::new(trie)) + } +} + +impl From for ConfiguredSparseTrie { + fn from(trie: ParallelSparseTrie) -> Self { + Self::Parallel(Box::new(trie)) + } +} + +impl Default for ConfiguredSparseTrie { + fn default() -> Self { + Self::Serial(Default::default()) + } +} + +impl SparseTrieInterface for ConfiguredSparseTrie { + fn with_root( + self, + root: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult { + match self { + Self::Serial(trie) => { + trie.with_root(root, masks, retain_updates).map(|t| Self::Serial(Box::new(t))) + } + Self::Parallel(trie) => { + trie.with_root(root, masks, retain_updates).map(|t| Self::Parallel(Box::new(t))) + } + } + } + + fn with_updates(self, retain_updates: bool) -> Self { + match self { + Self::Serial(trie) => Self::Serial(Box::new(trie.with_updates(retain_updates))), + Self::Parallel(trie) => Self::Parallel(Box::new(trie.with_updates(retain_updates))), + } + } + + fn reserve_nodes(&mut self, additional: usize) { + match self { + Self::Serial(trie) => trie.reserve_nodes(additional), + Self::Parallel(trie) => trie.reserve_nodes(additional), + } + } + + fn reveal_node( + &mut self, + path: Nibbles, + node: TrieNode, + masks: TrieMasks, + ) -> SparseTrieResult<()> { + match self { + Self::Serial(trie) => trie.reveal_node(path, node, masks), + Self::Parallel(trie) => trie.reveal_node(path, node, masks), + } + } + + fn reveal_nodes(&mut self, nodes: Vec) -> SparseTrieResult<()> { + match self { + Self::Serial(trie) => trie.reveal_nodes(nodes), + Self::Parallel(trie) => trie.reveal_nodes(nodes), + } + } + + fn update_leaf( + &mut self, + full_path: Nibbles, + value: Vec, + provider: P, + ) -> SparseTrieResult<()> { + match self { + Self::Serial(trie) => trie.update_leaf(full_path, value, provider), + Self::Parallel(trie) => trie.update_leaf(full_path, value, provider), + } + } + + fn remove_leaf( + &mut self, + full_path: &Nibbles, + provider: P, + ) -> SparseTrieResult<()> { + match self { + Self::Serial(trie) => trie.remove_leaf(full_path, provider), + Self::Parallel(trie) => trie.remove_leaf(full_path, provider), + } + } + + fn root(&mut self) -> B256 { + match self { + Self::Serial(trie) => trie.root(), + Self::Parallel(trie) => trie.root(), + } + } + + fn update_subtrie_hashes(&mut self) { + match self { + Self::Serial(trie) => trie.update_subtrie_hashes(), + Self::Parallel(trie) => trie.update_subtrie_hashes(), + } + } + + fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec> { + match self { + Self::Serial(trie) => trie.get_leaf_value(full_path), + Self::Parallel(trie) => trie.get_leaf_value(full_path), + } + } + + fn find_leaf( + &self, + full_path: &Nibbles, + expected_value: Option<&Vec>, + ) -> Result { + match self { + Self::Serial(trie) => trie.find_leaf(full_path, expected_value), + Self::Parallel(trie) => trie.find_leaf(full_path, expected_value), + } + } + + fn take_updates(&mut self) -> SparseTrieUpdates { + match self { + Self::Serial(trie) => trie.take_updates(), + Self::Parallel(trie) => trie.take_updates(), + } + } + + fn wipe(&mut self) { + match self { + Self::Serial(trie) => trie.wipe(), + Self::Parallel(trie) => trie.wipe(), + } + } + + fn clear(&mut self) { + match self { + Self::Serial(trie) => trie.clear(), + Self::Parallel(trie) => trie.clear(), + } + } + + fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + match self { + Self::Serial(trie) => trie.updates_ref(), + Self::Parallel(trie) => trie.updates_ref(), + } + } +} diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 055d4622d1e..a6c6969049d 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -13,7 +13,7 @@ use alloy_consensus::{transaction::Recovered, BlockHeader}; use alloy_evm::block::StateChangeSource; use alloy_primitives::B256; use executor::WorkloadExecutor; -use multiproof::*; +use multiproof::{SparseTrieUpdate, *}; use parking_lot::RwLock; use prewarm::PrewarmMetrics; use reth_evm::{ConfigureEvm, OnStateHook, SpecFor}; @@ -28,7 +28,10 @@ use reth_trie_parallel::{ proof_task::{ProofTaskCtx, ProofTaskManager}, root::ParallelStateRootError, }; -use reth_trie_sparse::SparseTrieState; +use reth_trie_sparse::{ + provider::{TrieNodeProvider, TrieNodeProviderFactory}, + ClearedSparseStateTrie, SerialSparseTrie, SparseStateTrie, SparseTrie, +}; use std::{ collections::VecDeque, sync::{ @@ -40,17 +43,19 @@ use std::{ use super::precompile_cache::PrecompileCacheMap; +mod configured_sparse_trie; pub mod executor; pub mod multiproof; pub mod prewarm; pub mod sparse_trie; +use configured_sparse_trie::ConfiguredSparseTrie; + /// Entrypoint for executing the payload. -#[derive(Debug, Clone)] -pub struct PayloadProcessor +#[derive(Debug)] +pub struct PayloadProcessor where - N: NodePrimitives, - Evm: ConfigureEvm, + Evm: ConfigureEvm, { /// The executor used by to spawn tasks. executor: WorkloadExecutor, @@ -68,13 +73,16 @@ where precompile_cache_disabled: bool, /// Precompile cache map. precompile_cache_map: PrecompileCacheMap>, - /// A sparse trie, kept around to be used for the state root computation so that allocations - /// can be minimized. - sparse_trie: Option, - _marker: std::marker::PhantomData, + /// A cleared `SparseStateTrie`, kept around to be reused for the state root computation so + /// that allocations can be minimized. + sparse_state_trie: Arc< + parking_lot::Mutex>>, + >, + /// Whether to use the parallel sparse trie. + use_parallel_sparse_trie: bool, } -impl PayloadProcessor +impl PayloadProcessor where N: NodePrimitives, Evm: ConfigureEvm, @@ -95,13 +103,13 @@ where evm_config, precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, - sparse_trie: None, - _marker: Default::default(), + sparse_state_trie: Arc::default(), + use_parallel_sparse_trie: config.enable_parallel_sparse_trie(), } } } -impl PayloadProcessor +impl PayloadProcessor where N: NodePrimitives, Evm: ConfigureEvm + 'static, @@ -196,23 +204,11 @@ where multi_proof_task.run(); }); - // take the sparse trie if it was set - let sparse_trie = self.sparse_trie.take(); - - let mut sparse_trie_task = SparseTrieTask::new_with_stored_trie( - self.executor.clone(), - sparse_trie_rx, - proof_task.handle(), - self.trie_metrics.clone(), - sparse_trie, - ); - // wire the sparse trie to the state root response receiver let (state_root_tx, state_root_rx) = channel(); - self.executor.spawn_blocking(move || { - let res = sparse_trie_task.run(); - let _ = state_root_tx.send(res); - }); + + // Spawn the sparse trie task using any stored trie and parallel trie configuration. + self.spawn_sparse_trie_task(sparse_trie_rx, proof_task.handle(), state_root_tx); // spawn the proof task self.executor.spawn_blocking(move || { @@ -250,11 +246,6 @@ where PayloadHandle { to_multi_proof: None, prewarm_handle, state_root: None } } - /// Sets the sparse trie to be kept around for the state root computation. - pub(super) fn set_sparse_trie(&mut self, sparse_trie: SparseTrieState) { - self.sparse_trie = Some(sparse_trie); - } - /// Spawn prewarming optionally wired to the multiproof task for target updates. fn spawn_caching_with

( &self, @@ -317,6 +308,53 @@ where SavedCache::new(parent_hash, cache, CachedStateMetrics::zeroed()) }) } + + /// Spawns the [`SparseTrieTask`] for this payload processor. + fn spawn_sparse_trie_task( + &self, + sparse_trie_rx: mpsc::Receiver, + proof_task_handle: BPF, + state_root_tx: mpsc::Sender>, + ) where + BPF: TrieNodeProviderFactory + Clone + Send + Sync + 'static, + BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, + BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, + { + // Reuse a stored SparseStateTrie, or create a new one using the desired configuration if + // there's none to reuse. + let cleared_sparse_trie = Arc::clone(&self.sparse_state_trie); + let sparse_state_trie = cleared_sparse_trie.lock().take().unwrap_or_else(|| { + let accounts_trie = if self.use_parallel_sparse_trie { + ConfiguredSparseTrie::Parallel(Default::default()) + } else { + ConfiguredSparseTrie::Serial(Default::default()) + }; + ClearedSparseStateTrie::from_state_trie( + SparseStateTrie::new() + .with_accounts_trie(SparseTrie::Blind(Some(Box::new(accounts_trie)))) + .with_updates(true), + ) + }); + + let task = + SparseTrieTask::<_, ConfiguredSparseTrie, SerialSparseTrie>::new_with_cleared_trie( + self.executor.clone(), + sparse_trie_rx, + proof_task_handle, + self.trie_metrics.clone(), + sparse_state_trie, + ); + + self.executor.spawn_blocking(move || { + let (result, trie) = task.run(); + // Send state root computation result + let _ = state_root_tx.send(result); + + // Clear the SparseStateTrie and replace it back into the mutex _after_ sending results + // to the next step, so that time spent clearing doesn't block the step after this one. + cleared_sparse_trie.lock().replace(ClearedSparseStateTrie::from_state_trie(trie)); + }); + } } /// Handle to all the spawned tasks. @@ -467,7 +505,6 @@ mod tests { use rand::Rng; use reth_chainspec::ChainSpec; use reth_db_common::init::init_genesis; - use reth_ethereum_primitives::EthPrimitives; use reth_evm::OnStateHook; use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::{Account, StorageEntry}; @@ -582,7 +619,7 @@ mod tests { } } - let mut payload_processor = PayloadProcessor::::new( + let mut payload_processor = PayloadProcessor::new( WorkloadExecutor::default(), EthEvmConfig::new(factory.chain_spec()), &TreeConfig::default(), diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index c8de07c1ec5..9879a2c58bf 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -9,9 +9,9 @@ use rayon::iter::{ParallelBridge, ParallelIterator}; use reth_trie::{updates::TrieUpdates, Nibbles}; use reth_trie_parallel::root::ParallelStateRootError; use reth_trie_sparse::{ - blinded::{BlindedProvider, BlindedProviderFactory}, errors::{SparseStateTrieResult, SparseTrieErrorKind}, - SparseStateTrie, SparseTrieState, + provider::{TrieNodeProvider, TrieNodeProviderFactory}, + ClearedSparseStateTrie, SerialSparseTrie, SparseStateTrie, SparseTrieInterface, }; use std::{ sync::mpsc, @@ -19,96 +19,72 @@ use std::{ }; use tracing::{debug, trace, trace_span}; -/// The level below which the sparse trie hashes are calculated in -/// [`update_sparse_trie`]. -const SPARSE_TRIE_INCREMENTAL_LEVEL: usize = 2; - /// A task responsible for populating the sparse trie. -pub(super) struct SparseTrieTask +pub(super) struct SparseTrieTask where - BPF: BlindedProviderFactory + Send + Sync, - BPF::AccountNodeProvider: BlindedProvider + Send + Sync, - BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + BPF: TrieNodeProviderFactory + Send + Sync, + BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, + BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, { /// Executor used to spawn subtasks. #[expect(unused)] // TODO use this for spawning trie tasks pub(super) executor: WorkloadExecutor, /// Receives updates from the state root task. pub(super) updates: mpsc::Receiver, - /// Sparse Trie initialized with the blinded provider factory. - /// - /// It's kept as a field on the struct to prevent blocking on de-allocation in [`Self::run`]. - pub(super) trie: SparseStateTrie, + /// `SparseStateTrie` used for computing the state root. + pub(super) trie: SparseStateTrie, pub(super) metrics: MultiProofTaskMetrics, + /// Trie node provider factory. + blinded_provider_factory: BPF, } -impl SparseTrieTask +impl SparseTrieTask where - BPF: BlindedProviderFactory + Send + Sync, - BPF::AccountNodeProvider: BlindedProvider + Send + Sync, - BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + BPF: TrieNodeProviderFactory + Send + Sync + Clone, + BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, + BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, + A: SparseTrieInterface + Send + Sync + Default, + S: SparseTrieInterface + Send + Sync + Default, { - /// Creates a new sparse trie task. - pub(super) fn new( + /// Creates a new sparse trie, pre-populating with a [`ClearedSparseStateTrie`]. + pub(super) fn new_with_cleared_trie( executor: WorkloadExecutor, updates: mpsc::Receiver, blinded_provider_factory: BPF, metrics: MultiProofTaskMetrics, + sparse_state_trie: ClearedSparseStateTrie, ) -> Self { Self { executor, updates, metrics, - trie: SparseStateTrie::new(blinded_provider_factory).with_updates(true), + trie: sparse_state_trie.into_inner(), + blinded_provider_factory, } } - /// Creates a new sparse trie, populating the accounts trie with the given cleared - /// `SparseTrieState` if it exists. - pub(super) fn new_with_stored_trie( - executor: WorkloadExecutor, - updates: mpsc::Receiver, - blinded_provider_factory: BPF, - trie_metrics: MultiProofTaskMetrics, - sparse_trie_state: Option, - ) -> Self { - if let Some(sparse_trie_state) = sparse_trie_state { - Self::with_accounts_trie( - executor, - updates, - blinded_provider_factory, - trie_metrics, - sparse_trie_state, - ) - } else { - Self::new(executor, updates, blinded_provider_factory, trie_metrics) - } - } - - /// Creates a new sparse trie task, using the given cleared `SparseTrieState` for the accounts - /// trie. - pub(super) fn with_accounts_trie( - executor: WorkloadExecutor, - updates: mpsc::Receiver, - blinded_provider_factory: BPF, - metrics: MultiProofTaskMetrics, - sparse_trie_state: SparseTrieState, - ) -> Self { - let mut trie = SparseStateTrie::new(blinded_provider_factory).with_updates(true); - trie.populate_from(sparse_trie_state); - - Self { executor, updates, metrics, trie } - } - /// Runs the sparse trie task to completion. /// /// This waits for new incoming [`SparseTrieUpdate`]. /// /// This concludes once the last trie update has been received. /// - /// NOTE: This function does not take `self` by value to prevent blocking on [`SparseStateTrie`] - /// drop. - pub(super) fn run(&mut self) -> Result { + /// # Returns + /// + /// - State root computation outcome. + /// - `SparseStateTrie` that needs to be cleared and reused to avoid reallocations. + pub(super) fn run( + mut self, + ) -> (Result, SparseStateTrie) { + // run the main loop to completion + let result = self.run_inner(); + (result, self.trie) + } + + /// Inner function to run the sparse trie task to completion. + /// + /// See [`Self::run`] for more information. + fn run_inner(&mut self) -> Result { let now = Instant::now(); let mut num_iterations = 0; @@ -129,9 +105,13 @@ where "Updating sparse trie" ); - let elapsed = update_sparse_trie(&mut self.trie, update).map_err(|e| { - ParallelStateRootError::Other(format!("could not calculate state root: {e:?}")) - })?; + let elapsed = + update_sparse_trie(&mut self.trie, update, &self.blinded_provider_factory) + .map_err(|e| { + ParallelStateRootError::Other(format!( + "could not calculate state root: {e:?}" + )) + })?; self.metrics.sparse_trie_update_duration_histogram.record(elapsed); trace!(target: "engine::root", ?elapsed, num_iterations, "Root calculation completed"); } @@ -139,17 +119,15 @@ where debug!(target: "engine::root", num_iterations, "All proofs processed, ending calculation"); let start = Instant::now(); - let (state_root, trie_updates) = self.trie.root_with_updates().map_err(|e| { - ParallelStateRootError::Other(format!("could not calculate state root: {e:?}")) - })?; + let (state_root, trie_updates) = + self.trie.root_with_updates(&self.blinded_provider_factory).map_err(|e| { + ParallelStateRootError::Other(format!("could not calculate state root: {e:?}")) + })?; self.metrics.sparse_trie_final_update_duration_histogram.record(start.elapsed()); self.metrics.sparse_trie_total_duration_histogram.record(now.elapsed()); - // take the account trie - let trie = self.trie.take_cleared_account_trie_state(); - - Ok(StateRootComputeOutcome { state_root, trie_updates, trie }) + Ok(StateRootComputeOutcome { state_root, trie_updates }) } } @@ -161,19 +139,20 @@ pub struct StateRootComputeOutcome { pub state_root: B256, /// The trie updates. pub trie_updates: TrieUpdates, - /// The account state trie. - pub trie: SparseTrieState, } /// Updates the sparse trie with the given proofs and state, and returns the elapsed time. -pub(crate) fn update_sparse_trie( - trie: &mut SparseStateTrie, +pub(crate) fn update_sparse_trie( + trie: &mut SparseStateTrie, SparseTrieUpdate { mut state, multiproof }: SparseTrieUpdate, + blinded_provider_factory: &BPF, ) -> SparseStateTrieResult where - BPF: BlindedProviderFactory + Send + Sync, - BPF::AccountNodeProvider: BlindedProvider + Send + Sync, - BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + BPF: TrieNodeProviderFactory + Send + Sync, + BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, + BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, + A: SparseTrieInterface + Send + Sync + Default, + S: SparseTrieInterface + Send + Sync + Default, { trace!(target: "engine::root::sparse", "Updating sparse trie"); let started_at = Instant::now(); @@ -198,6 +177,7 @@ where let span = trace_span!(target: "engine::root::sparse", "Storage trie", ?address); let _enter = span.enter(); trace!(target: "engine::root::sparse", "Updating storage"); + let storage_provider = blinded_provider_factory.storage_node_provider(address); let mut storage_trie = storage_trie.ok_or(SparseTrieErrorKind::Blind)?; if storage.wiped { @@ -208,11 +188,14 @@ where let slot_nibbles = Nibbles::unpack(slot); if value.is_zero() { trace!(target: "engine::root::sparse", ?slot, "Removing storage slot"); - storage_trie.remove_leaf(&slot_nibbles)?; + storage_trie.remove_leaf(&slot_nibbles, &storage_provider)?; } else { trace!(target: "engine::root::sparse", ?slot, "Updating storage slot"); - storage_trie - .update_leaf(slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; + storage_trie.update_leaf( + slot_nibbles, + alloy_rlp::encode_fixed_size(&value).to_vec(), + &storage_provider, + )?; } } @@ -232,33 +215,31 @@ where // If the account itself has an update, remove it from the state update and update in // one go instead of doing it down below. trace!(target: "engine::root::sparse", ?address, "Updating account and its storage root"); - trie.update_account(address, account.unwrap_or_default())?; + trie.update_account(address, account.unwrap_or_default(), blinded_provider_factory)?; } else if trie.is_account_revealed(address) { // Otherwise, if the account is revealed, only update its storage root. trace!(target: "engine::root::sparse", ?address, "Updating account storage root"); - trie.update_account_storage_root(address)?; + trie.update_account_storage_root(address, blinded_provider_factory)?; } } // Update accounts for (address, account) in state.accounts { trace!(target: "engine::root::sparse", ?address, "Updating account"); - trie.update_account(address, account.unwrap_or_default())?; + trie.update_account(address, account.unwrap_or_default(), blinded_provider_factory)?; } let elapsed_before = started_at.elapsed(); trace!( target: "engine::root::sparse", - level=SPARSE_TRIE_INCREMENTAL_LEVEL, - "Calculating intermediate nodes below trie level" + "Calculating subtries" ); - trie.calculate_below_level(SPARSE_TRIE_INCREMENTAL_LEVEL); + trie.calculate_subtries(); let elapsed = started_at.elapsed(); let below_level_elapsed = elapsed - elapsed_before; trace!( target: "engine::root::sparse", - level=SPARSE_TRIE_INCREMENTAL_LEVEL, ?below_level_elapsed, "Intermediate nodes calculated" ); diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs new file mode 100644 index 00000000000..4677845fc0b --- /dev/null +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -0,0 +1,892 @@ +//! Types and traits for validating blocks and payloads. + +use crate::tree::{ + cached_state::CachedStateProvider, + error::{InsertBlockError, InsertBlockErrorKind, InsertPayloadError}, + executor::WorkloadExecutor, + instrumented_state::InstrumentedStateProvider, + payload_processor::PayloadProcessor, + persistence_state::CurrentPersistenceAction, + precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}, + sparse_trie::StateRootComputeOutcome, + ConsistentDbView, EngineApiMetrics, EngineApiTreeState, PayloadHandle, PersistenceState, + PersistingKind, StateProviderBuilder, StateProviderDatabase, TreeConfig, +}; +use alloy_evm::{block::BlockExecutor, Evm}; +use alloy_primitives::B256; +use reth_chain_state::{ + CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, +}; +use reth_consensus::{ConsensusError, FullConsensus, HeaderValidator}; +use reth_engine_primitives::{InvalidBlockHook, PayloadValidator}; +use reth_errors::ProviderResult; +use reth_evm::{ConfigureEvm, SpecFor}; +use reth_payload_primitives::{ + BuiltPayload, InvalidPayloadAttributesError, NewPayloadError, PayloadTypes, +}; +use reth_primitives_traits::{ + AlloyBlockHeader, BlockTy, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, +}; +use reth_provider::{ + BlockExecutionOutput, BlockNumReader, BlockReader, DBProvider, DatabaseProviderFactory, + ExecutionOutcome, HashedPostStateProvider, ProviderError, StateCommitmentProvider, + StateProvider, StateProviderFactory, StateReader, StateRootProvider, +}; +use reth_revm::db::State; +use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; +use reth_trie_db::{DatabaseHashedPostState, StateCommitment}; +use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; +use std::{collections::HashMap, sync::Arc, time::Instant}; +use tracing::{debug, error, info, trace, warn}; + +/// Context providing access to tree state during validation. +/// +/// This context is provided to the [`EngineValidator`] and includes the state of the tree's +/// internals +pub struct TreeCtx<'a, N: NodePrimitives> { + /// The engine API tree state + state: &'a mut EngineApiTreeState, + /// Information about the current persistence state + persistence: &'a PersistenceState, + /// Reference to the canonical in-memory state + canonical_in_memory_state: &'a CanonicalInMemoryState, + /// Whether the currently validated block is on a fork chain. + is_fork: bool, +} + +impl<'a, N: NodePrimitives> std::fmt::Debug for TreeCtx<'a, N> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TreeCtx") + .field("state", &"EngineApiTreeState") + .field("persistence_info", &self.persistence) + .field("canonical_in_memory_state", &self.canonical_in_memory_state) + .finish() + } +} + +impl<'a, N: NodePrimitives> TreeCtx<'a, N> { + /// Creates a new tree context + pub const fn new( + state: &'a mut EngineApiTreeState, + persistence: &'a PersistenceState, + canonical_in_memory_state: &'a CanonicalInMemoryState, + is_fork: bool, + ) -> Self { + Self { state, persistence, canonical_in_memory_state, is_fork } + } + + /// Returns a reference to the engine tree state + pub const fn state(&self) -> &EngineApiTreeState { + &*self.state + } + + /// Returns a mutable reference to the engine tree state + pub const fn state_mut(&mut self) -> &mut EngineApiTreeState { + self.state + } + + /// Returns a reference to the persistence info + pub const fn persistence(&self) -> &PersistenceState { + self.persistence + } + + /// Returns a reference to the canonical in-memory state + pub const fn canonical_in_memory_state(&self) -> &'a CanonicalInMemoryState { + self.canonical_in_memory_state + } + + /// Returns whether the currently validated block is on a fork chain. + pub const fn is_fork(&self) -> bool { + self.is_fork + } + + /// Determines the persisting kind for the given block based on persistence info. + /// + /// Based on the given header it returns whether any conflicting persistence operation is + /// currently in progress. + /// + /// This is adapted from the `persisting_kind_for` method in `EngineApiTreeHandler`. + pub fn persisting_kind_for(&self, block: &N::BlockHeader) -> PersistingKind { + // Check that we're currently persisting. + let Some(action) = self.persistence().current_action() else { + return PersistingKind::NotPersisting + }; + // Check that the persistince action is saving blocks, not removing them. + let CurrentPersistenceAction::SavingBlocks { highest } = action else { + return PersistingKind::PersistingNotDescendant + }; + + // The block being validated can only be a descendant if its number is higher than + // the highest block persisting. Otherwise, it's likely a fork of a lower block. + if block.number() > highest.number && self.state().tree_state.is_descendant(*highest, block) + { + return PersistingKind::PersistingDescendant + } + + // In all other cases, the block is not a descendant. + PersistingKind::PersistingNotDescendant + } +} + +/// A helper type that provides reusable payload validation logic for network-specific validators. +/// +/// This type satisfies [`EngineValidator`] and is responsible for executing blocks/payloads. +/// +/// This type contains common validation, execution, and state root computation logic that can be +/// used by network-specific payload validators (e.g., Ethereum, Optimism). It is not meant to be +/// used as a standalone component, but rather as a building block for concrete implementations. +#[derive(derive_more::Debug)] +pub struct BasicEngineValidator +where + Evm: ConfigureEvm, +{ + /// Provider for database access. + provider: P, + /// Consensus implementation for validation. + consensus: Arc>, + /// EVM configuration. + evm_config: Evm, + /// Configuration for the tree. + config: TreeConfig, + /// Payload processor for state root computation. + payload_processor: PayloadProcessor, + /// Precompile cache map. + precompile_cache_map: PrecompileCacheMap>, + /// Precompile cache metrics. + precompile_cache_metrics: HashMap, + /// Hook to call when invalid blocks are encountered. + #[debug(skip)] + invalid_block_hook: Box>, + /// Metrics for the engine api. + metrics: EngineApiMetrics, + /// Validator for the payload. + validator: V, +} + +impl BasicEngineValidator +where + N: NodePrimitives, + P: DatabaseProviderFactory + + BlockReader

+ + StateProviderFactory + + StateReader + + StateCommitmentProvider + + HashedPostStateProvider + + Clone + + 'static, + Evm: ConfigureEvm + 'static, +{ + /// Creates a new `TreePayloadValidator`. + #[allow(clippy::too_many_arguments)] + pub fn new( + provider: P, + consensus: Arc>, + evm_config: Evm, + validator: V, + config: TreeConfig, + invalid_block_hook: Box>, + ) -> Self { + let precompile_cache_map = PrecompileCacheMap::default(); + let payload_processor = PayloadProcessor::new( + WorkloadExecutor::default(), + evm_config.clone(), + &config, + precompile_cache_map.clone(), + ); + Self { + provider, + consensus, + evm_config, + payload_processor, + precompile_cache_map, + precompile_cache_metrics: HashMap::new(), + config, + invalid_block_hook, + metrics: EngineApiMetrics::default(), + validator, + } + } + + /// Validates a block that has already been converted from a payload. + /// + /// This method performs: + /// - Consensus validation + /// - Block execution + /// - State root computation + /// - Fork detection + pub fn validate_block_with_state>>( + &mut self, + block: RecoveredBlock, + mut ctx: TreeCtx<'_, N>, + ) -> ValidationOutcome)> + where + V: PayloadValidator, + { + /// A helper macro that returns the block in case there was an error + macro_rules! ensure_ok { + ($expr:expr) => { + match $expr { + Ok(val) => val, + Err(e) => return Err((e.into(), block)), + } + }; + } + + let block_num_hash = block.num_hash(); + + trace!(target: "engine::tree", block=?block_num_hash, "Validating block consensus"); + // validate block consensus rules + ensure_ok!(self.validate_block_inner(&block)); + + trace!(target: "engine::tree", block=?block_num_hash, parent=?block.parent_hash(), "Fetching block state provider"); + let Some(provider_builder) = + ensure_ok!(self.state_provider_builder(block.parent_hash(), ctx.state())) + else { + // this is pre-validated in the tree + return Err(( + InsertBlockErrorKind::Provider(ProviderError::HeaderNotFound( + block.parent_hash().into(), + )), + block, + )) + }; + + // now validate against the parent + let Some(parent_block) = + ensure_ok!(self.sealed_header_by_hash(block.parent_hash(), ctx.state())) + else { + return Err(( + InsertBlockErrorKind::Provider(ProviderError::HeaderNotFound( + block.parent_hash().into(), + )), + block, + )) + }; + + if let Err(e) = + self.consensus.validate_header_against_parent(block.sealed_header(), &parent_block) + { + warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); + return Err((e.into(), block)) + } + + let state_provider = ensure_ok!(provider_builder.build()); + + // We only run the parallel state root if we are not currently persisting any blocks or + // persisting blocks that are all ancestors of the one we are executing. + // + // If we're committing ancestor blocks, then: any trie updates being committed are a subset + // of the in-memory trie updates collected before fetching reverts. So any diff in + // reverts (pre vs post commit) is already covered by the in-memory trie updates we + // collect in `compute_state_root_parallel`. + // + // See https://github.com/paradigmxyz/reth/issues/12688 for more details + let persisting_kind = ctx.persisting_kind_for(block.header()); + // don't run parallel if state root fallback is set + let run_parallel_state_root = + persisting_kind.can_run_parallel_state_root() && !self.config.state_root_fallback(); + + // Use state root task only if: + // 1. No persistence is in progress + // 2. Config allows it + // 3. No ancestors with missing trie updates. If any exist, it will mean that every state + // root task proof calculation will include a lot of unrelated paths in the prefix sets. + // It's cheaper to run a parallel state root that does one walk over trie tables while + // accounting for the prefix sets. + let has_ancestors_with_missing_trie_updates = + self.has_ancestors_with_missing_trie_updates(block.sealed_header(), ctx.state()); + let mut use_state_root_task = run_parallel_state_root && + self.config.use_state_root_task() && + !has_ancestors_with_missing_trie_updates; + + debug!( + target: "engine::tree", + block=?block_num_hash, + run_parallel_state_root, + has_ancestors_with_missing_trie_updates, + use_state_root_task, + config_allows_state_root_task=self.config.use_state_root_task(), + "Deciding which state root algorithm to run" + ); + + // use prewarming background task + let header = block.clone_sealed_header(); + let txs = block.clone_transactions_recovered().collect(); + let mut handle = if use_state_root_task { + // use background tasks for state root calc + let consistent_view = + ensure_ok!(ConsistentDbView::new_with_latest_tip(self.provider.clone())); + + // Compute trie input + let trie_input_start = Instant::now(); + let res = self.compute_trie_input( + persisting_kind, + ensure_ok!(consistent_view.provider_ro()), + block.header().parent_hash(), + ctx.state(), + ); + let trie_input = match res { + Ok(val) => val, + Err(e) => return Err((InsertBlockErrorKind::Other(Box::new(e)), block)), + }; + + self.metrics + .block_validation + .trie_input_duration + .record(trie_input_start.elapsed().as_secs_f64()); + + // Use state root task only if prefix sets are empty, otherwise proof generation is too + // expensive because it requires walking over the paths in the prefix set in every + // proof. + if trie_input.prefix_sets.is_empty() { + self.payload_processor.spawn( + header, + txs, + provider_builder, + consistent_view, + trie_input, + &self.config, + ) + } else { + debug!(target: "engine::tree", block=?block_num_hash, "Disabling state root task due to non-empty prefix sets"); + use_state_root_task = false; + self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) + } + } else { + self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) + }; + + // Use cached state provider before executing, used in execution after prewarming threads + // complete + let state_provider = CachedStateProvider::new_with_caches( + state_provider, + handle.caches(), + handle.cache_metrics(), + ); + + let (output, execution_finish) = if self.config.state_provider_metrics() { + let state_provider = InstrumentedStateProvider::from_state_provider(&state_provider); + let (output, execution_finish) = + ensure_ok!(self.execute_block(&state_provider, &block, &handle)); + state_provider.record_total_latency(); + (output, execution_finish) + } else { + let (output, execution_finish) = + ensure_ok!(self.execute_block(&state_provider, &block, &handle)); + (output, execution_finish) + }; + + // after executing the block we can stop executing transactions + handle.stop_prewarming_execution(); + + if let Err(err) = self.consensus.validate_block_post_execution(&block, &output) { + // call post-block hook + self.on_invalid_block(&parent_block, &block, &output, None, ctx.state_mut()); + return Err((err.into(), block)) + } + + let hashed_state = self.provider.hashed_post_state(&output.state); + + if let Err(err) = + self.validator.validate_block_post_execution_with_hashed_state(&hashed_state, &block) + { + // call post-block hook + self.on_invalid_block(&parent_block, &block, &output, None, ctx.state_mut()); + return Err((err.into(), block)) + } + + debug!(target: "engine::tree", block=?block_num_hash, "Calculating block state root"); + + let root_time = Instant::now(); + + let mut maybe_state_root = None; + + if run_parallel_state_root { + // if we new payload extends the current canonical change we attempt to use the + // background task or try to compute it in parallel + if use_state_root_task { + debug!(target: "engine::tree", block=?block_num_hash, "Using sparse trie state root algorithm"); + match handle.state_root() { + Ok(StateRootComputeOutcome { state_root, trie_updates }) => { + let elapsed = execution_finish.elapsed(); + info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); + // we double check the state root here for good measure + if self.consensus.validate_state_root(block.header(), state_root).is_ok() { + maybe_state_root = Some((state_root, trie_updates, elapsed)) + } else { + warn!( + target: "engine::tree", + ?state_root, + block_state_root = ?block.header().state_root(), + "State root task returned incorrect state root" + ); + } + } + Err(error) => { + debug!(target: "engine::tree", %error, "Background parallel state root computation failed"); + } + } + } else { + debug!(target: "engine::tree", block=?block_num_hash, "Using parallel state root algorithm"); + match self.compute_state_root_parallel( + persisting_kind, + block.header().parent_hash(), + &hashed_state, + ctx.state(), + ) { + Ok(result) => { + info!( + target: "engine::tree", + block = ?block_num_hash, + regular_state_root = ?result.0, + "Regular root task finished" + ); + maybe_state_root = Some((result.0, result.1, root_time.elapsed())); + } + Err(ParallelStateRootError::Provider(ProviderError::ConsistentView(error))) => { + debug!(target: "engine::tree", %error, "Parallel state root computation failed consistency check, falling back"); + } + Err(error) => return Err((InsertBlockErrorKind::Other(Box::new(error)), block)), + } + } + } + + let (state_root, trie_output, root_elapsed) = if let Some(maybe_state_root) = + maybe_state_root + { + maybe_state_root + } else { + // fallback is to compute the state root regularly in sync + if self.config.state_root_fallback() { + debug!(target: "engine::tree", block=?block_num_hash, "Using state root fallback for testing"); + } else { + warn!(target: "engine::tree", block=?block_num_hash, ?persisting_kind, "Failed to compute state root in parallel"); + self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); + } + + let (root, updates) = + ensure_ok!(state_provider.state_root_with_updates(hashed_state.clone())); + (root, updates, root_time.elapsed()) + }; + + self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); + debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root"); + + // ensure state root matches + if self.consensus.validate_state_root(block.header(), state_root).is_err() { + // call post-block hook + self.on_invalid_block( + &parent_block, + &block, + &output, + Some((&trie_output, state_root)), + ctx.state_mut(), + ); + return Err(( + ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.header().state_root() }.into(), + ) + .into(), + block, + )) + } + + // terminate prewarming task with good state output + handle.terminate_caching(Some(output.state.clone())); + + // If the block is a fork, we don't save the trie updates, because they may be incorrect. + // Instead, they will be recomputed on persistence. + let trie_updates = if ctx.is_fork() { + ExecutedTrieUpdates::Missing + } else { + ExecutedTrieUpdates::Present(Arc::new(trie_output)) + }; + + Ok(ExecutedBlockWithTrieUpdates { + block: ExecutedBlock { + recovered_block: Arc::new(block), + execution_output: Arc::new(ExecutionOutcome::from((output, block_num_hash.number))), + hashed_state: Arc::new(hashed_state), + }, + trie: trie_updates, + }) + } + + /// Return sealed block from database or in-memory state by hash. + fn sealed_header_by_hash( + &self, + hash: B256, + state: &EngineApiTreeState, + ) -> ProviderResult>> { + // check memory first + let block = + state.tree_state.block_by_hash(hash).map(|block| block.as_ref().clone_sealed_header()); + + if block.is_some() { + Ok(block) + } else { + self.provider.sealed_header_by_hash(hash) + } + } + + /// Validate if block is correct and satisfies all the consensus rules that concern the header + /// and block body itself. + fn validate_block_inner(&self, block: &RecoveredBlock) -> Result<(), ConsensusError> { + if let Err(e) = self.consensus.validate_header(block.sealed_header()) { + error!(target: "engine::tree", ?block, "Failed to validate header {}: {e}", block.hash()); + return Err(e) + } + + if let Err(e) = self.consensus.validate_block_pre_execution(block.sealed_block()) { + error!(target: "engine::tree", ?block, "Failed to validate block {}: {e}", block.hash()); + return Err(e) + } + + Ok(()) + } + + /// Executes a block with the given state provider + fn execute_block( + &mut self, + state_provider: S, + block: &RecoveredBlock, + handle: &PayloadHandle, + ) -> Result<(BlockExecutionOutput, Instant), InsertBlockErrorKind> { + debug!(target: "engine::tree", block=?block.num_hash(), "Executing block"); + let mut db = State::builder() + .with_database(StateProviderDatabase::new(&state_provider)) + .with_bundle_update() + .without_state_clear() + .build(); + let mut executor = self.evm_config.executor_for_block(&mut db, block); + + if !self.config.precompile_cache_disabled() { + executor.evm_mut().precompiles_mut().map_precompiles(|address, precompile| { + let metrics = self + .precompile_cache_metrics + .entry(*address) + .or_insert_with(|| CachedPrecompileMetrics::new_with_address(*address)) + .clone(); + CachedPrecompile::wrap( + precompile, + self.precompile_cache_map.cache_for_address(*address), + *self.evm_config.evm_env(block.header()).spec_id(), + Some(metrics), + ) + }); + } + + let execution_start = Instant::now(); + let output = self.metrics.executor.execute_metered( + executor, + block, + Box::new(handle.state_hook()), + )?; + let execution_finish = Instant::now(); + let execution_time = execution_finish.duration_since(execution_start); + debug!(target: "engine::tree", elapsed = ?execution_time, number=?block.number(), "Executed block"); + Ok((output, execution_finish)) + } + + /// Compute state root for the given hashed post state in parallel. + /// + /// # Returns + /// + /// Returns `Ok(_)` if computed successfully. + /// Returns `Err(_)` if error was encountered during computation. + /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation + /// should be used instead. + fn compute_state_root_parallel( + &self, + persisting_kind: PersistingKind, + parent_hash: B256, + hashed_state: &HashedPostState, + state: &EngineApiTreeState, + ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { + let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; + + let mut input = self.compute_trie_input( + persisting_kind, + consistent_view.provider_ro()?, + parent_hash, + state, + )?; + // Extend with block we are validating root for. + input.append_ref(hashed_state); + + ParallelStateRoot::new(consistent_view, input).incremental_root_with_updates() + } + + /// Check if the given block has any ancestors with missing trie updates. + fn has_ancestors_with_missing_trie_updates( + &self, + target_header: &SealedHeader, + state: &EngineApiTreeState, + ) -> bool { + // Walk back through the chain starting from the parent of the target block + let mut current_hash = target_header.parent_hash(); + while let Some(block) = state.tree_state.blocks_by_hash.get(¤t_hash) { + // Check if this block is missing trie updates + if block.trie.is_missing() { + return true; + } + + // Move to the parent block + current_hash = block.recovered_block().parent_hash(); + } + + false + } + + /// Creates a `StateProviderBuilder` for the given parent hash. + /// + /// This method checks if the parent is in the tree state (in-memory) or persisted to disk, + /// and creates the appropriate provider builder. + fn state_provider_builder( + &self, + hash: B256, + state: &EngineApiTreeState, + ) -> ProviderResult>> { + if let Some((historical, blocks)) = state.tree_state.blocks_by_hash(hash) { + debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory, creating provider builder"); + // the block leads back to the canonical chain + return Ok(Some(StateProviderBuilder::new( + self.provider.clone(), + historical, + Some(blocks), + ))) + } + + // Check if the block is persisted + if let Some(header) = self.provider.header(&hash)? { + debug!(target: "engine::tree", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); + // For persisted blocks, we create a builder that will fetch state directly from the + // database + return Ok(Some(StateProviderBuilder::new(self.provider.clone(), hash, None))) + } + + debug!(target: "engine::tree", %hash, "no canonical state found for block"); + Ok(None) + } + + /// Called when an invalid block is encountered during validation. + fn on_invalid_block( + &self, + parent_header: &SealedHeader, + block: &RecoveredBlock, + output: &BlockExecutionOutput, + trie_updates: Option<(&TrieUpdates, B256)>, + state: &mut EngineApiTreeState, + ) { + if state.invalid_headers.get(&block.hash()).is_some() { + // we already marked this block as invalid + return; + } + self.invalid_block_hook.on_invalid_block(parent_header, block, output, trie_updates); + } + + /// Computes the trie input at the provided parent hash. + /// + /// The goal of this function is to take in-memory blocks and generate a [`TrieInput`] that + /// serves as an overlay to the database blocks. + /// + /// It works as follows: + /// 1. Collect in-memory blocks that are descendants of the provided parent hash using + /// [`crate::tree::TreeState::blocks_by_hash`]. + /// 2. If the persistence is in progress, and the block that we're computing the trie input for + /// is a descendant of the currently persisting blocks, we need to be sure that in-memory + /// blocks are not overlapping with the database blocks that may have been already persisted. + /// To do that, we're filtering out in-memory blocks that are lower than the highest database + /// block. + /// 3. Once in-memory blocks are collected and optionally filtered, we compute the + /// [`HashedPostState`] from them. + fn compute_trie_input( + &self, + persisting_kind: PersistingKind, + provider: TP, + parent_hash: B256, + state: &EngineApiTreeState, + ) -> ProviderResult { + let mut input = TrieInput::default(); + + let best_block_number = provider.best_block_number()?; + + let (mut historical, mut blocks) = state + .tree_state + .blocks_by_hash(parent_hash) + .map_or_else(|| (parent_hash.into(), vec![]), |(hash, blocks)| (hash.into(), blocks)); + + // If the current block is a descendant of the currently persisting blocks, then we need to + // filter in-memory blocks, so that none of them are already persisted in the database. + if persisting_kind.is_descendant() { + // Iterate over the blocks from oldest to newest. + while let Some(block) = blocks.last() { + let recovered_block = block.recovered_block(); + if recovered_block.number() <= best_block_number { + // Remove those blocks that lower than or equal to the highest database + // block. + blocks.pop(); + } else { + // If the block is higher than the best block number, stop filtering, as it's + // the first block that's not in the database. + break + } + } + + historical = if let Some(block) = blocks.last() { + // If there are any in-memory blocks left after filtering, set the anchor to the + // parent of the oldest block. + (block.recovered_block().number() - 1).into() + } else { + // Otherwise, set the anchor to the original provided parent hash. + parent_hash.into() + }; + } + + if blocks.is_empty() { + debug!(target: "engine::tree", %parent_hash, "Parent found on disk"); + } else { + debug!(target: "engine::tree", %parent_hash, %historical, blocks = blocks.len(), "Parent found in memory"); + } + + // Convert the historical block to the block number. + let block_number = provider + .convert_hash_or_number(historical)? + .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; + + // Retrieve revert state for historical block. + let revert_state = if block_number == best_block_number { + // We do not check against the `last_block_number` here because + // `HashedPostState::from_reverts` only uses the database tables, and not static files. + debug!(target: "engine::tree", block_number, best_block_number, "Empty revert state"); + HashedPostState::default() + } else { + let revert_state = HashedPostState::from_reverts::< + ::KeyHasher, + >(provider.tx_ref(), block_number + 1) + .map_err(ProviderError::from)?; + debug!( + target: "engine::tree", + block_number, + best_block_number, + accounts = revert_state.accounts.len(), + storages = revert_state.storages.len(), + "Non-empty revert state" + ); + revert_state + }; + input.append(revert_state); + + // Extend with contents of parent in-memory blocks. + input.extend_with_blocks( + blocks.iter().rev().map(|block| (block.hashed_state(), block.trie_updates())), + ); + + Ok(input) + } +} + +/// Output of block or payload validation. +pub type ValidationOutcome>> = + Result, E>; + +/// Type that validates the payloads processed by the engine. +/// +/// This provides the necessary functions for validating/executing payloads/blocks. +pub trait EngineValidator< + Types: PayloadTypes, + N: NodePrimitives = <::BuiltPayload as BuiltPayload>::Primitives, +>: Send + Sync + 'static +{ + /// Validates the payload attributes with respect to the header. + /// + /// By default, this enforces that the payload attributes timestamp is greater than the + /// timestamp according to: + /// > 7. Client software MUST ensure that payloadAttributes.timestamp is greater than + /// > timestamp + /// > of a block referenced by forkchoiceState.headBlockHash. + /// + /// See also: + fn validate_payload_attributes_against_header( + &self, + attr: &Types::PayloadAttributes, + header: &N::BlockHeader, + ) -> Result<(), InvalidPayloadAttributesError>; + + /// Ensures that the given payload does not violate any consensus rules that concern the block's + /// layout. + /// + /// This function must convert the payload into the executable block and pre-validate its + /// fields. + /// + /// Implementers should ensure that the checks are done in the order that conforms with the + /// engine-API specification. + fn ensure_well_formed_payload( + &self, + payload: Types::ExecutionData, + ) -> Result, NewPayloadError>; + + /// Validates a payload received from engine API. + fn validate_payload( + &mut self, + payload: Types::ExecutionData, + ctx: TreeCtx<'_, N>, + ) -> ValidationOutcome>; + + /// Validates a block downloaded from the network. + fn validate_block( + &mut self, + block: RecoveredBlock, + ctx: TreeCtx<'_, N>, + ) -> ValidationOutcome; +} + +impl EngineValidator for BasicEngineValidator +where + P: DatabaseProviderFactory + + BlockReader
+ + StateProviderFactory + + StateReader + + StateCommitmentProvider + + HashedPostStateProvider + + Clone + + 'static, + N: NodePrimitives, + Evm: ConfigureEvm + 'static, + Types: PayloadTypes>, + V: PayloadValidator, +{ + fn validate_payload_attributes_against_header( + &self, + attr: &Types::PayloadAttributes, + header: &N::BlockHeader, + ) -> Result<(), InvalidPayloadAttributesError> { + self.validator.validate_payload_attributes_against_header(attr, header) + } + + fn ensure_well_formed_payload( + &self, + payload: Types::ExecutionData, + ) -> Result, NewPayloadError> { + let block = self.validator.ensure_well_formed_payload(payload)?; + Ok(block) + } + + fn validate_payload( + &mut self, + payload: Types::ExecutionData, + ctx: TreeCtx<'_, N>, + ) -> ValidationOutcome> { + let block = self.validator.ensure_well_formed_payload(payload)?; + Ok(EngineValidator::::validate_block(self, block, ctx)?) + } + + fn validate_block( + &mut self, + block: RecoveredBlock, + ctx: TreeCtx<'_, N>, + ) -> ValidationOutcome { + self.validate_block_with_state(block, ctx) + .map_err(|(kind, block)| InsertBlockError::new(block.into_sealed_block(), kind)) + } +} diff --git a/crates/engine/tree/src/tree/precompile_cache.rs b/crates/engine/tree/src/tree/precompile_cache.rs index a3eb3a5ba2b..cc3d173fb84 100644 --- a/crates/engine/tree/src/tree/precompile_cache.rs +++ b/crates/engine/tree/src/tree/precompile_cache.rs @@ -191,11 +191,12 @@ where } } + let calldata = input.data; let result = self.precompile.call(input); match &result { Ok(output) => { - let key = CacheKey::new(self.spec_id.clone(), Bytes::copy_from_slice(input.data)); + let key = CacheKey::new(self.spec_id.clone(), Bytes::copy_from_slice(calldata)); let size = self.cache.insert(key, CacheEntry(output.clone())); self.set_precompile_cache_size_metric(size as f64); self.increment_by_one_precompile_cache_misses(); @@ -240,8 +241,10 @@ mod tests { use std::hash::DefaultHasher; use super::*; - use revm::precompile::PrecompileOutput; - use revm_primitives::{hardfork::SpecId, U256}; + use reth_evm::{EthEvmFactory, Evm, EvmEnv, EvmFactory}; + use reth_revm::db::EmptyDB; + use revm::{context::TxEnv, precompile::PrecompileOutput}; + use revm_primitives::hardfork::SpecId; #[test] fn test_cache_key_ref_hash() { @@ -263,7 +266,7 @@ mod tests { #[test] fn test_precompile_cache_basic() { let dyn_precompile: DynPrecompile = |_input: PrecompileInput<'_>| -> PrecompileResult { - Ok(PrecompileOutput { gas_used: 0, bytes: Bytes::default() }) + Ok(PrecompileOutput { gas_used: 0, bytes: Bytes::default(), reverted: false }) } .into(); @@ -273,6 +276,7 @@ mod tests { let output = PrecompileOutput { gas_used: 50, bytes: alloy_primitives::Bytes::copy_from_slice(b"cached_result"), + reverted: false, }; let key = CacheKey::new(SpecId::PRAGUE, b"test_input".into()); @@ -287,6 +291,7 @@ mod tests { #[test] fn test_precompile_cache_map_separate_addresses() { + let mut evm = EthEvmFactory::default().create_evm(EmptyDB::default(), EvmEnv::default()); let input_data = b"same_input"; let gas_limit = 100_000; @@ -303,6 +308,7 @@ mod tests { Ok(PrecompileOutput { gas_used: 5000, bytes: alloy_primitives::Bytes::copy_from_slice(b"output_from_precompile_1"), + reverted: false, }) } } @@ -316,6 +322,7 @@ mod tests { Ok(PrecompileOutput { gas_used: 7000, bytes: alloy_primitives::Bytes::copy_from_slice(b"output_from_precompile_2"), + reverted: false, }) } } @@ -334,38 +341,56 @@ mod tests { None, ); + let precompile1_address = Address::with_last_byte(1); + let precompile2_address = Address::with_last_byte(2); + + evm.precompiles_mut().apply_precompile(&precompile1_address, |_| Some(wrapped_precompile1)); + evm.precompiles_mut().apply_precompile(&precompile2_address, |_| Some(wrapped_precompile2)); + // first invocation of precompile1 (cache miss) - let result1 = wrapped_precompile1 - .call(PrecompileInput { - data: input_data, - gas: gas_limit, + let result1 = evm + .transact_raw(TxEnv { caller: Address::ZERO, - value: U256::ZERO, + gas_limit, + data: input_data.into(), + kind: precompile1_address.into(), + ..Default::default() }) + .unwrap() + .result + .into_output() .unwrap(); - assert_eq!(result1.bytes.as_ref(), b"output_from_precompile_1"); + assert_eq!(result1.as_ref(), b"output_from_precompile_1"); // first invocation of precompile2 with the same input (should be a cache miss) // if cache was incorrectly shared, we'd get precompile1's result - let result2 = wrapped_precompile2 - .call(PrecompileInput { - data: input_data, - gas: gas_limit, + let result2 = evm + .transact_raw(TxEnv { caller: Address::ZERO, - value: U256::ZERO, + gas_limit, + data: input_data.into(), + kind: precompile2_address.into(), + ..Default::default() }) + .unwrap() + .result + .into_output() .unwrap(); - assert_eq!(result2.bytes.as_ref(), b"output_from_precompile_2"); + assert_eq!(result2.as_ref(), b"output_from_precompile_2"); // second invocation of precompile1 (should be a cache hit) - let result3 = wrapped_precompile1 - .call(PrecompileInput { - data: input_data, - gas: gas_limit, + let result3 = evm + .transact_raw(TxEnv { caller: Address::ZERO, - value: U256::ZERO, + gas_limit, + data: input_data.into(), + kind: precompile1_address.into(), + ..Default::default() }) + .unwrap() + .result + .into_output() .unwrap(); - assert_eq!(result3.bytes.as_ref(), b"output_from_precompile_1"); + assert_eq!(result3.as_ref(), b"output_from_precompile_1"); } } diff --git a/crates/engine/tree/src/tree/state.rs b/crates/engine/tree/src/tree/state.rs index 7bc443db935..380e100b475 100644 --- a/crates/engine/tree/src/tree/state.rs +++ b/crates/engine/tree/src/tree/state.rs @@ -362,13 +362,15 @@ impl TreeState { } // iterate through parents of the second until we reach the number - let Some(mut current_block) = self.block_by_hash(second.parent_hash()) else { + let Some(mut current_block) = self.blocks_by_hash.get(&second.parent_hash()) else { // If we can't find its parent in the tree, we can't continue, so return false return false }; - while current_block.number() > first.number + 1 { - let Some(block) = self.block_by_hash(current_block.header().parent_hash()) else { + while current_block.recovered_block().number() > first.number + 1 { + let Some(block) = + self.blocks_by_hash.get(¤t_block.recovered_block().parent_hash()) + else { // If we can't find its parent in the tree, we can't continue, so return false return false }; @@ -377,7 +379,7 @@ impl TreeState { } // Now the block numbers should be equal, so we compare hashes. - current_block.parent_hash() == first.hash + current_block.recovered_block().parent_hash() == first.hash } /// Updates the canonical head to the given block. diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 9922d29ff1d..fde19023ece 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -10,14 +10,13 @@ use alloy_rpc_types_engine::{ExecutionData, ExecutionPayloadSidecar, ExecutionPa use assert_matches::assert_matches; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; -use reth_engine_primitives::ForkchoiceStatus; +use reth_engine_primitives::{EngineValidator, ForkchoiceStatus}; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_ethereum_primitives::{Block, EthPrimitives}; use reth_evm_ethereum::MockEvmConfig; -use reth_node_ethereum::EthereumEngineValidator; use reth_primitives_traits::Block as _; -use reth_provider::test_utils::MockEthProvider; +use reth_provider::{test_utils::MockEthProvider, ExecutionOutcome}; use reth_trie::HashedPostState; use std::{ collections::BTreeMap, @@ -25,6 +24,53 @@ use std::{ sync::mpsc::{channel, Sender}, }; +/// Mock engine validator for tests +#[derive(Debug, Clone)] +struct MockEngineValidator; + +impl reth_engine_primitives::PayloadValidator for MockEngineValidator { + type Block = Block; + + fn ensure_well_formed_payload( + &self, + payload: ExecutionData, + ) -> Result< + reth_primitives_traits::RecoveredBlock, + reth_payload_primitives::NewPayloadError, + > { + // For tests, convert the execution payload to a block + let block = reth_ethereum_primitives::Block::try_from(payload.payload).map_err(|e| { + reth_payload_primitives::NewPayloadError::Other(format!("{e:?}").into()) + })?; + let sealed = block.seal_slow(); + sealed.try_recover().map_err(|e| reth_payload_primitives::NewPayloadError::Other(e.into())) + } +} + +impl EngineValidator for MockEngineValidator { + fn validate_version_specific_fields( + &self, + _version: reth_payload_primitives::EngineApiMessageVersion, + _payload_or_attrs: reth_payload_primitives::PayloadOrAttributes< + '_, + alloy_rpc_types_engine::ExecutionData, + alloy_rpc_types_engine::PayloadAttributes, + >, + ) -> Result<(), reth_payload_primitives::EngineObjectValidationError> { + // Mock implementation - always valid + Ok(()) + } + + fn ensure_well_formed_attributes( + &self, + _version: reth_payload_primitives::EngineApiMessageVersion, + _attributes: &alloy_rpc_types_engine::PayloadAttributes, + ) -> Result<(), reth_payload_primitives::EngineObjectValidationError> { + // Mock implementation - always valid + Ok(()) + } +} + /// This is a test channel that allows you to `release` any value that is in the channel. /// /// If nothing has been sent, then the next value will be immediately sent. @@ -83,7 +129,7 @@ struct TestHarness { EthPrimitives, MockEthProvider, EthEngineTypes, - EthereumEngineValidator, + BasicEngineValidator, MockEvmConfig, >, to_tree_tx: Sender, Block>>, @@ -117,7 +163,7 @@ impl TestHarness { let provider = MockEthProvider::default(); - let payload_validator = EthereumEngineValidator::new(chain_spec.clone()); + let payload_validator = MockEngineValidator; let (from_tree_tx, from_tree_rx) = unbounded_channel(); @@ -131,11 +177,19 @@ impl TestHarness { let payload_builder = PayloadBuilderHandle::new(to_payload_service); let evm_config = MockEvmConfig::default(); + let engine_validator = BasicEngineValidator::new( + provider.clone(), + consensus.clone(), + evm_config.clone(), + payload_validator, + TreeConfig::default(), + Box::new(NoopInvalidBlockHook::default()), + ); let tree = EngineApiTreeHandler::new( provider.clone(), consensus, - payload_validator, + engine_validator, from_tree_tx, engine_api_tree_state, canonical_in_memory_state, diff --git a/crates/engine/tree/src/tree/e2e_tests.rs b/crates/engine/tree/tests/e2e-testsuite/main.rs similarity index 98% rename from crates/engine/tree/src/tree/e2e_tests.rs rename to crates/engine/tree/tests/e2e-testsuite/main.rs index 9eb6a64c885..0b9162ab8c2 100644 --- a/crates/engine/tree/src/tree/e2e_tests.rs +++ b/crates/engine/tree/tests/e2e-testsuite/main.rs @@ -1,6 +1,5 @@ //! E2E test implementations using the e2e test framework for engine tree functionality. -use crate::tree::TreeConfig; use eyre::Result; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::testsuite::{ @@ -12,6 +11,7 @@ use reth_e2e_test_utils::testsuite::{ setup::{NetworkSetup, Setup}, TestBuilder, }; +use reth_engine_tree::tree::TreeConfig; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_node_ethereum::EthereumNode; use std::sync::Arc; @@ -33,7 +33,10 @@ fn default_engine_tree_setup() -> Setup { )) .with_network(NetworkSetup::single_node()) .with_tree_config( - TreeConfig::default().with_legacy_state_root(false).with_has_enough_parallelism(true), + TreeConfig::default() + .with_legacy_state_root(false) + .with_has_enough_parallelism(true) + .with_enable_parallel_sparse_trie(true), ) } diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 050a384c446..269c9eb1500 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -103,7 +103,7 @@ where + StateProviderFactory + ChainSpecProvider, Evm: ConfigureEvm, - Validator: PayloadValidator>, + Validator: PayloadValidator>, { type Item = S::Item; @@ -236,19 +236,20 @@ where } } -fn create_reorg_head( +fn create_reorg_head( provider: &Provider, evm_config: &Evm, payload_validator: &Validator, mut depth: usize, - next_payload: Validator::ExecutionData, + next_payload: T::ExecutionData, ) -> RethResult>> where Provider: BlockReader
, Block = BlockTy> + StateProviderFactory + ChainSpecProvider, Evm: ConfigureEvm, - Validator: PayloadValidator>, + T: PayloadTypes, + Validator: PayloadValidator>, { // Ensure next payload is valid. let next_block = diff --git a/crates/era-downloader/Cargo.toml b/crates/era-downloader/Cargo.toml index 84a5187a70f..54ae581813a 100644 --- a/crates/era-downloader/Cargo.toml +++ b/crates/era-downloader/Cargo.toml @@ -35,8 +35,6 @@ sha2.workspace = true sha2.features = ["std"] [dev-dependencies] -tokio.workspace = true -tokio.features = ["fs", "io-util", "macros"] tempfile.workspace = true test-case.workspace = true futures.workspace = true diff --git a/crates/era-utils/Cargo.toml b/crates/era-utils/Cargo.toml index 6d48e338386..731a9bb9242 100644 --- a/crates/era-utils/Cargo.toml +++ b/crates/era-utils/Cargo.toml @@ -28,8 +28,7 @@ reth-storage-api.workspace = true reth-primitives-traits.workspace = true # async -tokio.workspace = true -tokio.features = ["fs", "io-util"] +tokio = { workspace = true, features = ["fs", "io-util", "macros", "rt-multi-thread"] } futures-util.workspace = true # errors @@ -43,8 +42,6 @@ reth-provider.features = ["test-utils"] reth-db-common.workspace = true # async -tokio.workspace = true -tokio.features = ["fs", "io-util", "macros", "rt-multi-thread"] tokio-util.workspace = true futures.workspace = true bytes.workspace = true diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs index 2eba464e509..49909d80958 100644 --- a/crates/era-utils/src/export.rs +++ b/crates/era-utils/src/export.rs @@ -1,10 +1,11 @@ //! Logic to export from database era1 block history //! and injecting them into era1 files with `Era1Writer`. -use alloy_consensus::{BlockBody, BlockHeader, Header}; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256, U256}; use eyre::{eyre, Result}; use reth_era::{ + e2s_types::IndexEntry, era1_file::Era1Writer, era1_types::{BlockIndex, Era1Id}, execution_types::{ @@ -18,7 +19,7 @@ use std::{ path::PathBuf, time::{Duration, Instant}, }; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; const REPORT_INTERVAL_SECS: u64 = 10; const ENTRY_HEADER_SIZE: usize = 8; @@ -38,7 +39,7 @@ pub struct ExportConfig { /// It can never be larger than `MAX_BLOCKS_PER_ERA1 = 8192` /// See also <`https://github.com/eth-clients/e2store-format-specs/blob/main/formats/era1.md`> pub max_blocks_per_file: u64, - /// Network name + /// Network name. pub network: String, } @@ -76,11 +77,9 @@ impl ExportConfig { /// Fetches block history data from the provider /// and prepares it for export to era1 files /// for a given number of blocks then writes them to disk. -pub fn export(provider: &P, config: &ExportConfig) -> Result> +pub fn export

(provider: &P, config: &ExportConfig) -> Result> where - P: BlockReader, - B: Into>, - P::Header: Into

, + P: BlockReader, { config.validate()?; info!( @@ -135,14 +134,26 @@ where let headers = provider.headers_range(start_block..=end_block)?; - let era1_id = Era1Id::new(&config.network, start_block, block_count as u32); + // Extract first 4 bytes of last block's state root as historical identifier + let historical_root = headers + .last() + .map(|header| { + let state_root = header.state_root(); + [state_root[0], state_root[1], state_root[2], state_root[3]] + }) + .unwrap_or([0u8; 4]); + + let era1_id = Era1Id::new(&config.network, start_block, block_count as u32) + .with_hash(historical_root); + + debug!("Final file name {}", era1_id.to_file_name()); let file_path = config.dir.join(era1_id.to_file_name()); let file = std::fs::File::create(&file_path)?; let mut writer = Era1Writer::new(file); writer.write_version()?; - let mut offsets = Vec::with_capacity(block_count); - let mut position = VERSION_ENTRY_SIZE as i64; + let mut offsets = Vec::::with_capacity(block_count); + let mut position = VERSION_ENTRY_SIZE as u64; let mut blocks_written = 0; let mut final_header_data = Vec::new(); @@ -167,7 +178,7 @@ where let body_size = compressed_body.data.len() + ENTRY_HEADER_SIZE; let receipts_size = compressed_receipts.data.len() + ENTRY_HEADER_SIZE; let difficulty_size = 32 + ENTRY_HEADER_SIZE; // U256 is 32 + 8 bytes header overhead - let total_size = header_size + body_size + receipts_size + difficulty_size; + let total_size = (header_size + body_size + receipts_size + difficulty_size) as u64; let block_tuple = BlockTuple::new( compressed_header, @@ -177,7 +188,7 @@ where ); offsets.push(position); - position += total_size as i64; + position += total_size; writer.write_block(&block_tuple)?; blocks_written += 1; @@ -259,16 +270,14 @@ where } // Compresses block data and returns compressed components with metadata -fn compress_block_data( +fn compress_block_data

( provider: &P, header: P::Header, expected_block_number: BlockNumber, total_difficulty: &mut U256, ) -> Result<(CompressedHeader, CompressedBody, CompressedReceipts)> where - P: BlockReader, - B: Into>, - P::Header: Into

, + P: BlockReader, { let actual_block_number = header.number(); @@ -286,8 +295,8 @@ where *total_difficulty += header.difficulty(); - let compressed_header = CompressedHeader::from_header(&header.into())?; - let compressed_body = CompressedBody::from_body(&body.into())?; + let compressed_header = CompressedHeader::from_header(&header)?; + let compressed_body = CompressedBody::from_body(&body)?; let compressed_receipts = CompressedReceipts::from_encodable_list(&receipts) .map_err(|e| eyre!("Failed to compress receipts: {}", e))?; diff --git a/crates/era-utils/src/history.rs b/crates/era-utils/src/history.rs index 75eaa4591cf..5d212c1694c 100644 --- a/crates/era-utils/src/history.rs +++ b/crates/era-utils/src/history.rs @@ -10,7 +10,8 @@ use reth_db_api::{ use reth_era::{ e2s_types::E2sError, era1_file::{BlockTupleIterator, Era1Reader}, - execution_types::{BlockTuple, DecodeCompressed}, + execution_types::BlockTuple, + DecodeCompressed, }; use reth_era_downloader::EraMeta; use reth_etl::Collector; diff --git a/crates/era-utils/tests/it/genesis.rs b/crates/era-utils/tests/it/genesis.rs index dacef15eeac..0c35c458aac 100644 --- a/crates/era-utils/tests/it/genesis.rs +++ b/crates/era-utils/tests/it/genesis.rs @@ -23,7 +23,10 @@ fn test_export_with_genesis_only() { let file_path = &exported_files[0]; assert!(file_path.exists(), "Exported file should exist on disk"); let file_name = file_path.file_name().unwrap().to_str().unwrap(); - assert!(file_name.starts_with("mainnet-0-"), "File should have correct prefix"); + assert!( + file_name.starts_with("mainnet-00000-00001-"), + "File should have correct prefix with era format" + ); assert!(file_name.ends_with(".era1"), "File should have correct extension"); let metadata = fs::metadata(file_path).unwrap(); assert!(metadata.len() > 0, "Exported file should not be empty"); diff --git a/crates/era-utils/tests/it/history.rs b/crates/era-utils/tests/it/history.rs index 4811e729539..8e720f1001b 100644 --- a/crates/era-utils/tests/it/history.rs +++ b/crates/era-utils/tests/it/history.rs @@ -1,6 +1,7 @@ use crate::{ClientWithFakeIndex, ITHACA_ERA_INDEX_URL}; use reqwest::{Client, Url}; use reth_db_common::init::init_genesis; +use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; use reth_era_downloader::{EraClient, EraStream, EraStreamConfig}; use reth_era_utils::{export, import, ExportConfig}; use reth_etl::Collector; @@ -129,10 +130,30 @@ async fn test_roundtrip_export_after_import() { blocks_numbers_per_file ); - // Verify exact ERA1 naming convention: `mainnet-{start_block}-{block_count}.era1` + // Verify format: mainnet-{era_number:05}-{era_count:05}-{8hexchars}.era1 + let era_number = file_start_block / MAX_BLOCKS_PER_ERA1 as u64; + + // Era count is always 1 for this test, as we are only exporting one era + let expected_prefix = format!("mainnet-{:05}-{:05}-", era_number, 1); + let file_name = file_path.file_name().unwrap().to_str().unwrap(); - let expected_filename = - format!("mainnet-{file_start_block}-{blocks_numbers_per_file}.era1"); - assert_eq!(file_name, expected_filename, "File {} should have correct name", i + 1); + assert!( + file_name.starts_with(&expected_prefix), + "File {} should start with '{expected_prefix}', got '{file_name}'", + i + 1 + ); + + // Verify the hash part is 8 characters + let hash_start = expected_prefix.len(); + let hash_end = file_name.len() - 5; // remove ".era1" + let hash_part = &file_name[hash_start..hash_end]; + assert_eq!( + hash_part.len(), + 8, + "File {} hash should be 8 characters, got {} in '{}'", + i + 1, + hash_part.len(), + file_name + ); } } diff --git a/crates/era/src/consensus_types.rs b/crates/era/src/consensus_types.rs new file mode 100644 index 00000000000..cdcc77ce57a --- /dev/null +++ b/crates/era/src/consensus_types.rs @@ -0,0 +1,235 @@ +//! Consensus types for Era post-merge history files + +use crate::{ + e2s_types::{E2sError, Entry}, + DecodeCompressedSsz, +}; +use snap::{read::FrameDecoder, write::FrameEncoder}; +use ssz::Decode; +use std::io::{Read, Write}; + +/// `CompressedSignedBeaconBlock` record type: [0x01, 0x00] +pub const COMPRESSED_SIGNED_BEACON_BLOCK: [u8; 2] = [0x01, 0x00]; + +/// `CompressedBeaconState` record type: [0x02, 0x00] +pub const COMPRESSED_BEACON_STATE: [u8; 2] = [0x02, 0x00]; + +/// Compressed signed beacon block +/// +/// See also . +#[derive(Debug, Clone)] +pub struct CompressedSignedBeaconBlock { + /// Snappy-compressed ssz-encoded `SignedBeaconBlock` + pub data: Vec, +} + +impl CompressedSignedBeaconBlock { + /// Create a new [`CompressedSignedBeaconBlock`] from compressed data + pub const fn new(data: Vec) -> Self { + Self { data } + } + + /// Create from ssz-encoded block by compressing it with snappy + pub fn from_ssz(ssz_data: &[u8]) -> Result { + let mut compressed = Vec::new(); + { + let mut encoder = FrameEncoder::new(&mut compressed); + + Write::write_all(&mut encoder, ssz_data).map_err(|e| { + E2sError::SnappyCompression(format!("Failed to compress signed beacon block: {e}")) + })?; + + encoder.flush().map_err(|e| { + E2sError::SnappyCompression(format!("Failed to flush encoder: {e}")) + })?; + } + Ok(Self { data: compressed }) + } + + /// Decompress to get the original ssz-encoded signed beacon block + pub fn decompress(&self) -> Result, E2sError> { + let mut decoder = FrameDecoder::new(self.data.as_slice()); + let mut decompressed = Vec::new(); + Read::read_to_end(&mut decoder, &mut decompressed).map_err(|e| { + E2sError::SnappyDecompression(format!("Failed to decompress signed beacon block: {e}")) + })?; + + Ok(decompressed) + } + + /// Convert to an [`Entry`] + pub fn to_entry(&self) -> Entry { + Entry::new(COMPRESSED_SIGNED_BEACON_BLOCK, self.data.clone()) + } + + /// Create from an [`Entry`] + pub fn from_entry(entry: &Entry) -> Result { + if entry.entry_type != COMPRESSED_SIGNED_BEACON_BLOCK { + return Err(E2sError::Ssz(format!( + "Invalid entry type for CompressedSignedBeaconBlock: expected {:02x}{:02x}, got {:02x}{:02x}", + COMPRESSED_SIGNED_BEACON_BLOCK[0], + COMPRESSED_SIGNED_BEACON_BLOCK[1], + entry.entry_type[0], + entry.entry_type[1] + ))); + } + + Ok(Self { data: entry.data.clone() }) + } + + /// Decode the compressed signed beacon block into ssz bytes + pub fn decode_to_ssz(&self) -> Result, E2sError> { + self.decompress() + } +} + +impl DecodeCompressedSsz for CompressedSignedBeaconBlock { + fn decode(&self) -> Result { + let ssz_bytes = self.decompress()?; + T::from_ssz_bytes(&ssz_bytes).map_err(|e| { + E2sError::Ssz(format!("Failed to decode SSZ data into target type: {e:?}")) + }) + } +} + +/// Compressed beacon state +/// +/// See also . +#[derive(Debug, Clone)] +pub struct CompressedBeaconState { + /// Snappy-compressed ssz-encoded `BeaconState` + pub data: Vec, +} + +impl CompressedBeaconState { + /// Create a new [`CompressedBeaconState`] from compressed data + pub const fn new(data: Vec) -> Self { + Self { data } + } + + /// Compress with snappy from ssz-encoded state + pub fn from_ssz(ssz_data: &[u8]) -> Result { + let mut compressed = Vec::new(); + { + let mut encoder = FrameEncoder::new(&mut compressed); + + Write::write_all(&mut encoder, ssz_data).map_err(|e| { + E2sError::SnappyCompression(format!("Failed to compress beacon state: {e}")) + })?; + + encoder.flush().map_err(|e| { + E2sError::SnappyCompression(format!("Failed to flush encoder: {e}")) + })?; + } + Ok(Self { data: compressed }) + } + + /// Decompress to get the original ssz-encoded beacon state + pub fn decompress(&self) -> Result, E2sError> { + let mut decoder = FrameDecoder::new(self.data.as_slice()); + let mut decompressed = Vec::new(); + Read::read_to_end(&mut decoder, &mut decompressed).map_err(|e| { + E2sError::SnappyDecompression(format!("Failed to decompress beacon state: {e}")) + })?; + + Ok(decompressed) + } + + /// Convert to an [`Entry`] + pub fn to_entry(&self) -> Entry { + Entry::new(COMPRESSED_BEACON_STATE, self.data.clone()) + } + + /// Create from an [`Entry`] + pub fn from_entry(entry: &Entry) -> Result { + if entry.entry_type != COMPRESSED_BEACON_STATE { + return Err(E2sError::Ssz(format!( + "Invalid entry type for CompressedBeaconState: expected {:02x}{:02x}, got {:02x}{:02x}", + COMPRESSED_BEACON_STATE[0], + COMPRESSED_BEACON_STATE[1], + entry.entry_type[0], + entry.entry_type[1] + ))); + } + + Ok(Self { data: entry.data.clone() }) + } + + /// Decode the compressed beacon state into ssz bytes + pub fn decode_to_ssz(&self) -> Result, E2sError> { + self.decompress() + } +} + +impl DecodeCompressedSsz for CompressedBeaconState { + fn decode(&self) -> Result { + let ssz_bytes = self.decompress()?; + T::from_ssz_bytes(&ssz_bytes).map_err(|e| { + E2sError::Ssz(format!("Failed to decode SSZ data into target type: {e:?}")) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_signed_beacon_block_compression_roundtrip() { + let ssz_data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + let compressed_block = CompressedSignedBeaconBlock::from_ssz(&ssz_data).unwrap(); + let decompressed = compressed_block.decompress().unwrap(); + + assert_eq!(decompressed, ssz_data); + } + + #[test] + fn test_beacon_state_compression_roundtrip() { + let ssz_data = vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 1]; + + let compressed_state = CompressedBeaconState::from_ssz(&ssz_data).unwrap(); + let decompressed = compressed_state.decompress().unwrap(); + + assert_eq!(decompressed, ssz_data); + } + + #[test] + fn test_entry_conversion_signed_beacon_block() { + let ssz_data = vec![1, 2, 3, 4, 5]; + let compressed_block = CompressedSignedBeaconBlock::from_ssz(&ssz_data).unwrap(); + + let entry = compressed_block.to_entry(); + assert_eq!(entry.entry_type, COMPRESSED_SIGNED_BEACON_BLOCK); + + let recovered = CompressedSignedBeaconBlock::from_entry(&entry).unwrap(); + let recovered_ssz = recovered.decode_to_ssz().unwrap(); + + assert_eq!(recovered_ssz, ssz_data); + } + + #[test] + fn test_entry_conversion_beacon_state() { + let ssz_data = vec![5, 4, 3, 2, 1]; + let compressed_state = CompressedBeaconState::from_ssz(&ssz_data).unwrap(); + + let entry = compressed_state.to_entry(); + assert_eq!(entry.entry_type, COMPRESSED_BEACON_STATE); + + let recovered = CompressedBeaconState::from_entry(&entry).unwrap(); + let recovered_ssz = recovered.decode_to_ssz().unwrap(); + + assert_eq!(recovered_ssz, ssz_data); + } + + #[test] + fn test_invalid_entry_type() { + let invalid_entry = Entry::new([0xFF, 0xFF], vec![1, 2, 3]); + + let result = CompressedSignedBeaconBlock::from_entry(&invalid_entry); + assert!(result.is_err()); + + let result = CompressedBeaconState::from_entry(&invalid_entry); + assert!(result.is_err()); + } +} diff --git a/crates/era/src/e2s_types.rs b/crates/era/src/e2s_types.rs index c2d4734c2e7..3e5681eb119 100644 --- a/crates/era/src/e2s_types.rs +++ b/crates/era/src/e2s_types.rs @@ -165,3 +165,96 @@ impl Entry { self.entry_type == SLOT_INDEX } } + +/// Serialize and deserialize index entries with format: +/// `starting-number | offsets... | count` +pub trait IndexEntry: Sized { + /// Get the entry type identifier for this index + fn entry_type() -> [u8; 2]; + + /// Create a new instance with starting number and offsets + fn new(starting_number: u64, offsets: Vec) -> Self; + + /// Get the starting number - can be starting slot or block number for example + fn starting_number(&self) -> u64; + + /// Get the offsets vector + fn offsets(&self) -> &[u64]; + + /// Convert to an [`Entry`] for storage in an e2store file + /// Format: starting-number | offset1 | offset2 | ... | count + fn to_entry(&self) -> Entry { + let mut data = Vec::with_capacity(8 + self.offsets().len() * 8 + 8); + + // Add starting number + data.extend_from_slice(&self.starting_number().to_le_bytes()); + + // Add all offsets + data.extend(self.offsets().iter().flat_map(|offset| offset.to_le_bytes())); + + // Encode count - 8 bytes again + let count = self.offsets().len() as u64; + data.extend_from_slice(&count.to_le_bytes()); + + Entry::new(Self::entry_type(), data) + } + + /// Create from an [`Entry`] + fn from_entry(entry: &Entry) -> Result { + let expected_type = Self::entry_type(); + + if entry.entry_type != expected_type { + return Err(E2sError::Ssz(format!( + "Invalid entry type: expected {:02x}{:02x}, got {:02x}{:02x}", + expected_type[0], expected_type[1], entry.entry_type[0], entry.entry_type[1] + ))); + } + + if entry.data.len() < 16 { + return Err(E2sError::Ssz( + "Index entry too short: need at least 16 bytes for starting_number and count" + .to_string(), + )); + } + + // Extract count from last 8 bytes + let count_bytes = &entry.data[entry.data.len() - 8..]; + let count = u64::from_le_bytes( + count_bytes + .try_into() + .map_err(|_| E2sError::Ssz("Failed to read count bytes".to_string()))?, + ) as usize; + + // Verify entry has correct size + let expected_len = 8 + count * 8 + 8; + if entry.data.len() != expected_len { + return Err(E2sError::Ssz(format!( + "Index entry has incorrect length: expected {expected_len}, got {}", + entry.data.len() + ))); + } + + // Extract starting number from first 8 bytes + let starting_number = u64::from_le_bytes( + entry.data[0..8] + .try_into() + .map_err(|_| E2sError::Ssz("Failed to read starting_number bytes".to_string()))?, + ); + + // Extract all offsets + let mut offsets = Vec::with_capacity(count); + for i in 0..count { + let start = 8 + i * 8; + let end = start + 8; + let offset_bytes = &entry.data[start..end]; + let offset = u64::from_le_bytes( + offset_bytes + .try_into() + .map_err(|_| E2sError::Ssz(format!("Failed to read offset {i} bytes")))?, + ); + offsets.push(offset); + } + + Ok(Self::new(starting_number, offsets)) + } +} diff --git a/crates/era/src/era1_file.rs b/crates/era/src/era1_file.rs index 547d770f06d..b665b481766 100644 --- a/crates/era/src/era1_file.rs +++ b/crates/era/src/era1_file.rs @@ -3,11 +3,11 @@ //! The structure of an Era1 file follows the specification: //! `Version | block-tuple* | other-entries* | Accumulator | BlockIndex` //! -//! See also +//! See also . use crate::{ e2s_file::{E2StoreReader, E2StoreWriter}, - e2s_types::{E2sError, Entry, Version}, + e2s_types::{E2sError, Entry, IndexEntry, Version}, era1_types::{BlockIndex, Era1Group, Era1Id, BLOCK_INDEX}, execution_types::{ self, Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, @@ -43,13 +43,13 @@ impl Era1File { /// Get a block by its number, if present in this file pub fn get_block_by_number(&self, number: BlockNumber) -> Option<&BlockTuple> { - let index = (number - self.group.block_index.starting_number) as usize; + let index = (number - self.group.block_index.starting_number()) as usize; (index < self.group.blocks.len()).then(|| &self.group.blocks[index]) } /// Get the range of block numbers contained in this file pub fn block_range(&self) -> std::ops::RangeInclusive { - let start = self.group.block_index.starting_number; + let start = self.group.block_index.starting_number(); let end = start + (self.group.blocks.len() as u64) - 1; start..=end } @@ -59,6 +59,7 @@ impl Era1File { self.block_range().contains(&number) } } + /// Reader for Era1 files that builds on top of [`E2StoreReader`] #[derive(Debug)] pub struct Era1Reader { @@ -215,8 +216,8 @@ impl Era1Reader { let id = Era1Id::new( network_name, - block_index.starting_number, - block_index.offsets.len() as u32, + block_index.starting_number(), + block_index.offsets().len() as u32, ); Ok(Era1File::new(group, id)) @@ -445,7 +446,7 @@ mod tests { let mut offsets = Vec::with_capacity(block_count); for i in 0..block_count { - offsets.push(i as i64 * 100); + offsets.push(i as u64 * 100); } let block_index = BlockIndex::new(start_block, offsets); let group = Era1Group::new(blocks, accumulator, block_index); diff --git a/crates/era/src/era1_types.rs b/crates/era/src/era1_types.rs index 135f7225f60..58f51b42419 100644 --- a/crates/era/src/era1_types.rs +++ b/crates/era/src/era1_types.rs @@ -3,8 +3,8 @@ //! See also use crate::{ - e2s_types::{E2sError, Entry}, - execution_types::{Accumulator, BlockTuple}, + e2s_types::{Entry, IndexEntry}, + execution_types::{Accumulator, BlockTuple, MAX_BLOCKS_PER_ERA1}, }; use alloy_primitives::BlockNumber; @@ -38,6 +38,7 @@ impl Era1Group { ) -> Self { Self { blocks, accumulator, block_index, other_entries: Vec::new() } } + /// Add another entry to this group pub fn add_entry(&mut self, entry: Entry) { self.other_entries.push(entry); @@ -52,20 +53,15 @@ impl Era1Group { #[derive(Debug, Clone)] pub struct BlockIndex { /// Starting block number - pub starting_number: BlockNumber, + starting_number: BlockNumber, /// Offsets to data at each block number - pub offsets: Vec, + offsets: Vec, } impl BlockIndex { - /// Create a new [`BlockIndex`] - pub const fn new(starting_number: BlockNumber, offsets: Vec) -> Self { - Self { starting_number, offsets } - } - /// Get the offset for a specific block number - pub fn offset_for_block(&self, block_number: BlockNumber) -> Option { + pub fn offset_for_block(&self, block_number: BlockNumber) -> Option { if block_number < self.starting_number { return None; } @@ -73,72 +69,23 @@ impl BlockIndex { let index = (block_number - self.starting_number) as usize; self.offsets.get(index).copied() } +} - /// Convert to an [`Entry`] for storage in an e2store file - pub fn to_entry(&self) -> Entry { - // Format: starting-(block)-number | index | index | index ... | count - let mut data = Vec::with_capacity(8 + self.offsets.len() * 8 + 8); - - // Add starting block number - data.extend_from_slice(&self.starting_number.to_le_bytes()); - - // Add all offsets - for offset in &self.offsets { - data.extend_from_slice(&offset.to_le_bytes()); - } - - // Add count - data.extend_from_slice(&(self.offsets.len() as i64).to_le_bytes()); - - Entry::new(BLOCK_INDEX, data) +impl IndexEntry for BlockIndex { + fn new(starting_number: u64, offsets: Vec) -> Self { + Self { starting_number, offsets } } - /// Create from an [`Entry`] - pub fn from_entry(entry: &Entry) -> Result { - if entry.entry_type != BLOCK_INDEX { - return Err(E2sError::Ssz(format!( - "Invalid entry type for BlockIndex: expected {:02x}{:02x}, got {:02x}{:02x}", - BLOCK_INDEX[0], BLOCK_INDEX[1], entry.entry_type[0], entry.entry_type[1] - ))); - } - - if entry.data.len() < 16 { - return Err(E2sError::Ssz(String::from( - "BlockIndex entry too short to contain starting block number and count", - ))); - } - - // Extract starting block number = first 8 bytes - let mut starting_number_bytes = [0u8; 8]; - starting_number_bytes.copy_from_slice(&entry.data[0..8]); - let starting_number = u64::from_le_bytes(starting_number_bytes); - - // Extract count = last 8 bytes - let mut count_bytes = [0u8; 8]; - count_bytes.copy_from_slice(&entry.data[entry.data.len() - 8..]); - let count = u64::from_le_bytes(count_bytes) as usize; - - // Verify that the entry has the correct size - let expected_size = 8 + count * 8 + 8; - if entry.data.len() != expected_size { - return Err(E2sError::Ssz(format!( - "BlockIndex entry has incorrect size: expected {}, got {}", - expected_size, - entry.data.len() - ))); - } + fn entry_type() -> [u8; 2] { + BLOCK_INDEX + } - // Extract all offsets - let mut offsets = Vec::with_capacity(count); - for i in 0..count { - let start = 8 + i * 8; - let end = start + 8; - let mut offset_bytes = [0u8; 8]; - offset_bytes.copy_from_slice(&entry.data[start..end]); - offsets.push(i64::from_le_bytes(offset_bytes)); - } + fn starting_number(&self) -> u64 { + self.starting_number + } - Ok(Self { starting_number, offsets }) + fn offsets(&self) -> &[u64] { + &self.offsets } } @@ -155,6 +102,7 @@ pub struct Era1Id { pub block_count: u32, /// Optional hash identifier for this file + /// First 4 bytes of the last historical root in the last state in the era file pub hash: Option<[u8; 4]>, } @@ -174,52 +122,74 @@ impl Era1Id { self } - /// Convert to file name following the era1 file naming: - /// `--.era1` - /// inspired from era file naming convention in + /// Convert to file name following the era file naming: + /// `---.era(1)` /// /// See also pub fn to_file_name(&self) -> String { + // Find which era the first block belongs to + let era_number = self.start_block / MAX_BLOCKS_PER_ERA1 as u64; + let era_count = self.calculate_era_count(era_number); if let Some(hash) = self.hash { - // Format with zero-padded era number and hash: - // For example network-00000-5ec1ffb8.era1 format!( - "{}-{:05}-{:02x}{:02x}{:02x}{:02x}.era1", - self.network_name, self.start_block, hash[0], hash[1], hash[2], hash[3] + "{}-{:05}-{:05}-{:02x}{:02x}{:02x}{:02x}.era1", + self.network_name, era_number, era_count, hash[0], hash[1], hash[2], hash[3] ) } else { - // Original format without hash - format!("{}-{}-{}.era1", self.network_name, self.start_block, self.block_count) + // era spec format with placeholder hash when no hash available + // Format: `---00000000.era1` + format!("{}-{:05}-{:05}-00000000.era1", self.network_name, era_number, era_count) } } + + // Helper function to calculate the number of eras per era1 file, + // If the user can decide how many blocks per era1 file there are, we need to calculate it. + // Most of the time it should be 1, but it can never be more than 2 eras per file + // as there is a maximum of 8192 blocks per era1 file. + const fn calculate_era_count(&self, first_era: u64) -> u64 { + // Calculate the actual last block number in the range + let last_block = self.start_block + self.block_count as u64 - 1; + // Find which era the last block belongs to + let last_era = last_block / MAX_BLOCKS_PER_ERA1 as u64; + // Count how many eras we span + last_era - first_era + 1 + } } #[cfg(test)] mod tests { use super::*; - use crate::execution_types::{ - CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + use crate::{ + test_utils::{create_sample_block, create_test_block_with_compressed_data}, + DecodeCompressed, }; + use alloy_consensus::ReceiptWithBloom; use alloy_primitives::{B256, U256}; - /// Helper function to create a sample block tuple - fn create_sample_block(data_size: usize) -> BlockTuple { - // Create a compressed header with very sample data - let header_data = vec![0xAA; data_size]; - let header = CompressedHeader::new(header_data); - - // Create a compressed body - let body_data = vec![0xBB; data_size * 2]; - let body = CompressedBody::new(body_data); - - // Create compressed receipts - let receipts_data = vec![0xCC; data_size]; - let receipts = CompressedReceipts::new(receipts_data); - - let difficulty = TotalDifficulty::new(U256::from(data_size)); - - // Create and return the block tuple - BlockTuple::new(header, body, receipts, difficulty) + #[test] + fn test_alloy_components_decode_and_receipt_in_bloom() { + // Create a block tuple from compressed data + let block: BlockTuple = create_test_block_with_compressed_data(30); + + // Decode and decompress the block header + let header: alloy_consensus::Header = block.header.decode().unwrap(); + assert_eq!(header.number, 30, "Header block number should match"); + assert_eq!(header.difficulty, U256::from(30 * 1000), "Header difficulty should match"); + assert_eq!(header.gas_limit, 5000000, "Gas limit should match"); + assert_eq!(header.gas_used, 21000, "Gas used should match"); + assert_eq!(header.timestamp, 1609459200 + 30, "Timestamp should match"); + assert_eq!(header.base_fee_per_gas, Some(10), "Base fee per gas should match"); + assert!(header.withdrawals_root.is_some(), "Should have withdrawals root"); + assert!(header.blob_gas_used.is_none(), "Should not have blob gas used"); + assert!(header.excess_blob_gas.is_none(), "Should not have excess blob gas"); + + let body: alloy_consensus::BlockBody = + block.body.decode().unwrap(); + assert_eq!(body.ommers.len(), 0, "Should have no ommers"); + assert!(body.withdrawals.is_some(), "Should have withdrawals field"); + + let receipts: Vec = block.receipts.decode().unwrap(); + assert_eq!(receipts.len(), 1, "Should have exactly 1 receipt"); } #[test] @@ -330,33 +300,33 @@ mod tests { #[test_case::test_case( Era1Id::new("mainnet", 0, 8192).with_hash([0x5e, 0xc1, 0xff, 0xb8]), - "mainnet-00000-5ec1ffb8.era1"; - "Mainnet 00000" + "mainnet-00000-00001-5ec1ffb8.era1"; + "Mainnet era 0" )] #[test_case::test_case( - Era1Id::new("mainnet", 12, 8192).with_hash([0x5e, 0xcb, 0x9b, 0xf9]), - "mainnet-00012-5ecb9bf9.era1"; - "Mainnet 00012" + Era1Id::new("mainnet", 8192, 8192).with_hash([0x5e, 0xcb, 0x9b, 0xf9]), + "mainnet-00001-00001-5ecb9bf9.era1"; + "Mainnet era 1" )] #[test_case::test_case( - Era1Id::new("sepolia", 5, 8192).with_hash([0x90, 0x91, 0x84, 0x72]), - "sepolia-00005-90918472.era1"; - "Sepolia 00005" + Era1Id::new("sepolia", 0, 8192).with_hash([0x90, 0x91, 0x84, 0x72]), + "sepolia-00000-00001-90918472.era1"; + "Sepolia era 0" )] #[test_case::test_case( - Era1Id::new("sepolia", 19, 8192).with_hash([0xfa, 0x77, 0x00, 0x19]), - "sepolia-00019-fa770019.era1"; - "Sepolia 00019" + Era1Id::new("sepolia", 155648, 8192).with_hash([0xfa, 0x77, 0x00, 0x19]), + "sepolia-00019-00001-fa770019.era1"; + "Sepolia era 19" )] #[test_case::test_case( Era1Id::new("mainnet", 1000, 100), - "mainnet-1000-100.era1"; + "mainnet-00000-00001-00000000.era1"; "ID without hash" )] #[test_case::test_case( - Era1Id::new("sepolia", 12345, 8192).with_hash([0xab, 0xcd, 0xef, 0x12]), - "sepolia-12345-abcdef12.era1"; - "Large block number" + Era1Id::new("sepolia", 101130240, 8192).with_hash([0xab, 0xcd, 0xef, 0x12]), + "sepolia-12345-00001-abcdef12.era1"; + "Large block number era 12345" )] fn test_era1id_file_naming(id: Era1Id, expected_file_name: &str) { let actual_file_name = id.to_file_name(); diff --git a/crates/era/src/era_types.rs b/crates/era/src/era_types.rs new file mode 100644 index 00000000000..65b80f5b384 --- /dev/null +++ b/crates/era/src/era_types.rs @@ -0,0 +1,275 @@ +//! Era types for `.era` files +//! +//! See also + +use crate::{ + consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, + e2s_types::{Entry, IndexEntry, SLOT_INDEX}, +}; + +/// Era file content group +/// +/// Format: `Version | block* | era-state | other-entries* | slot-index(block)? | slot-index(state)` +/// See also +#[derive(Debug)] +pub struct EraGroup { + /// Group including all blocks leading up to the era transition in slot order + pub blocks: Vec, + + /// State in the era transition slot + pub era_state: CompressedBeaconState, + + /// Other entries that don't fit into standard categories + pub other_entries: Vec, + + /// Block slot index, omitted for genesis era + pub slot_index: Option, + + /// State slot index + pub state_slot_index: SlotIndex, +} + +impl EraGroup { + /// Create a new era group + pub const fn new( + blocks: Vec, + era_state: CompressedBeaconState, + state_slot_index: SlotIndex, + ) -> Self { + Self { blocks, era_state, other_entries: Vec::new(), slot_index: None, state_slot_index } + } + + /// Create a new era group with block slot index + pub const fn with_block_index( + blocks: Vec, + era_state: CompressedBeaconState, + slot_index: SlotIndex, + state_slot_index: SlotIndex, + ) -> Self { + Self { + blocks, + era_state, + other_entries: Vec::new(), + slot_index: Some(slot_index), + state_slot_index, + } + } + + /// Check if this is a genesis era - no blocks yet + pub fn is_genesis(&self) -> bool { + self.blocks.is_empty() && self.slot_index.is_none() + } + + /// Add another entry to this group + pub fn add_entry(&mut self, entry: Entry) { + self.other_entries.push(entry); + } +} + +/// [`SlotIndex`] records store offsets to data at specific slots +/// from the beginning of the index record to the beginning of the corresponding data. +/// +/// Format: `starting-slot | index | index | index ... | count` +/// +/// See also . +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SlotIndex { + /// Starting slot number + pub starting_slot: u64, + + /// Offsets to data at each slot + /// 0 indicates no data for that slot + pub offsets: Vec, +} + +impl SlotIndex { + /// Create a new slot index + pub const fn new(starting_slot: u64, offsets: Vec) -> Self { + Self { starting_slot, offsets } + } + + /// Get the number of slots covered by this index + pub fn slot_count(&self) -> usize { + self.offsets.len() + } + + /// Get the offset for a specific slot + pub fn get_offset(&self, slot_index: usize) -> Option { + self.offsets.get(slot_index).copied() + } + + /// Check if a slot has data - non-zero offset + pub fn has_data_at_slot(&self, slot_index: usize) -> bool { + self.get_offset(slot_index).is_some_and(|offset| offset != 0) + } +} + +impl IndexEntry for SlotIndex { + fn new(starting_number: u64, offsets: Vec) -> Self { + Self { starting_slot: starting_number, offsets } + } + + fn entry_type() -> [u8; 2] { + SLOT_INDEX + } + + fn starting_number(&self) -> u64 { + self.starting_slot + } + + fn offsets(&self) -> &[u64] { + &self.offsets + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + e2s_types::{Entry, IndexEntry}, + test_utils::{create_beacon_block, create_beacon_state}, + }; + + #[test] + fn test_slot_index_roundtrip() { + let starting_slot = 1000; + let offsets = vec![100, 200, 300, 400, 500]; + + let slot_index = SlotIndex::new(starting_slot, offsets.clone()); + + let entry = slot_index.to_entry(); + + // Validate entry type + assert_eq!(entry.entry_type, SLOT_INDEX); + + // Convert back to slot index + let recovered = SlotIndex::from_entry(&entry).unwrap(); + + // Verify fields match + assert_eq!(recovered.starting_slot, starting_slot); + assert_eq!(recovered.offsets, offsets); + } + #[test] + fn test_slot_index_basic_operations() { + let starting_slot = 2000; + let offsets = vec![100, 200, 300]; + + let slot_index = SlotIndex::new(starting_slot, offsets); + + assert_eq!(slot_index.slot_count(), 3); + assert_eq!(slot_index.starting_slot, 2000); + } + + #[test] + fn test_slot_index_empty_slots() { + let starting_slot = 1000; + let offsets = vec![100, 0, 300, 0, 500]; + + let slot_index = SlotIndex::new(starting_slot, offsets); + + // Test that empty slots return false for has_data_at_slot + // slot 1000: offset 100 + assert!(slot_index.has_data_at_slot(0)); + // slot 1001: offset 0 - empty + assert!(!slot_index.has_data_at_slot(1)); + // slot 1002: offset 300 + assert!(slot_index.has_data_at_slot(2)); + // slot 1003: offset 0 - empty + assert!(!slot_index.has_data_at_slot(3)); + // slot 1004: offset 500 + assert!(slot_index.has_data_at_slot(4)); + } + + #[test] + fn test_era_group_basic_construction() { + let blocks = + vec![create_beacon_block(10), create_beacon_block(15), create_beacon_block(20)]; + let era_state = create_beacon_state(50); + let state_slot_index = SlotIndex::new(1000, vec![100, 200, 300]); + + let era_group = EraGroup::new(blocks, era_state, state_slot_index); + + // Verify initial state + assert_eq!(era_group.blocks.len(), 3); + assert_eq!(era_group.other_entries.len(), 0); + assert_eq!(era_group.slot_index, None); + assert_eq!(era_group.state_slot_index.starting_slot, 1000); + assert_eq!(era_group.state_slot_index.offsets, vec![100, 200, 300]); + } + + #[test] + fn test_era_group_with_block_index() { + let blocks = vec![create_beacon_block(10), create_beacon_block(15)]; + let era_state = create_beacon_state(50); + let block_slot_index = SlotIndex::new(500, vec![50, 100]); + let state_slot_index = SlotIndex::new(1000, vec![200, 300]); + + let era_group = + EraGroup::with_block_index(blocks, era_state, block_slot_index, state_slot_index); + + // Verify state with block index + assert_eq!(era_group.blocks.len(), 2); + assert_eq!(era_group.other_entries.len(), 0); + assert!(era_group.slot_index.is_some()); + + let block_index = era_group.slot_index.as_ref().unwrap(); + assert_eq!(block_index.starting_slot, 500); + assert_eq!(block_index.offsets, vec![50, 100]); + + assert_eq!(era_group.state_slot_index.starting_slot, 1000); + assert_eq!(era_group.state_slot_index.offsets, vec![200, 300]); + } + + #[test] + fn test_era_group_genesis_check() { + // Genesis era - no blocks, no block slot index + let era_state = create_beacon_state(50); + let state_slot_index = SlotIndex::new(0, vec![100]); + + let genesis_era = EraGroup::new(vec![], era_state, state_slot_index); + assert!(genesis_era.is_genesis()); + + // Non-genesis era - has blocks + let blocks = vec![create_beacon_block(10)]; + let era_state = create_beacon_state(50); + let state_slot_index = SlotIndex::new(1000, vec![100]); + + let normal_era = EraGroup::new(blocks, era_state, state_slot_index); + assert!(!normal_era.is_genesis()); + + // Non-genesis era - has block slot index + let era_state = create_beacon_state(50); + let block_slot_index = SlotIndex::new(500, vec![50]); + let state_slot_index = SlotIndex::new(1000, vec![100]); + + let era_with_index = + EraGroup::with_block_index(vec![], era_state, block_slot_index, state_slot_index); + assert!(!era_with_index.is_genesis()); + } + + #[test] + fn test_era_group_add_entries() { + let blocks = vec![create_beacon_block(10)]; + let era_state = create_beacon_state(50); + let state_slot_index = SlotIndex::new(1000, vec![100]); + + // Create and verify group + let mut era_group = EraGroup::new(blocks, era_state, state_slot_index); + assert_eq!(era_group.other_entries.len(), 0); + + // Create custom entries with different types + let entry1 = Entry::new([0x01, 0x01], vec![1, 2, 3, 4]); + let entry2 = Entry::new([0x02, 0x02], vec![5, 6, 7, 8]); + + // Add those entries + era_group.add_entry(entry1); + era_group.add_entry(entry2); + + // Verify entries were added correctly + assert_eq!(era_group.other_entries.len(), 2); + assert_eq!(era_group.other_entries[0].entry_type, [0x01, 0x01]); + assert_eq!(era_group.other_entries[0].data, vec![1, 2, 3, 4]); + assert_eq!(era_group.other_entries[1].entry_type, [0x02, 0x02]); + assert_eq!(era_group.other_entries[1].data, vec![5, 6, 7, 8]); + } +} diff --git a/crates/era/src/execution_types.rs b/crates/era/src/execution_types.rs index 27030b112a1..6feb2873fbd 100644 --- a/crates/era/src/execution_types.rs +++ b/crates/era/src/execution_types.rs @@ -1,4 +1,4 @@ -//! Execution layer specific types for era1 files +//! Execution layer specific types for `.era1` files //! //! Contains implementations for compressed execution layer data structures: //! - [`CompressedHeader`] - Block header @@ -9,8 +9,72 @@ //! These types use Snappy compression to match the specification. //! //! See also - -use crate::e2s_types::{E2sError, Entry}; +//! +//! # Examples +//! +//! ## [`CompressedHeader`] +//! +//! ```rust +//! use alloy_consensus::Header; +//! use reth_era::{execution_types::CompressedHeader, DecodeCompressed}; +//! +//! let header = Header { number: 100, ..Default::default() }; +//! // Compress the header: rlp encoding and Snappy compression +//! let compressed = CompressedHeader::from_header(&header)?; +//! // Decompressed and decode typed compressed header +//! let decoded_header: Header = compressed.decode_header()?; +//! assert_eq!(decoded_header.number, 100); +//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! ``` +//! +//! ## [`CompressedBody`] +//! +//! ```rust +//! use alloy_consensus::{BlockBody, Header}; +//! use alloy_primitives::Bytes; +//! use reth_era::{execution_types::CompressedBody, DecodeCompressed}; +//! use reth_ethereum_primitives::TransactionSigned; +//! +//! let body: BlockBody = BlockBody { +//! transactions: vec![Bytes::from(vec![1, 2, 3])], +//! ommers: vec![], +//! withdrawals: None, +//! }; +//! // Compress the body: rlp encoding and snappy compression +//! let compressed_body = CompressedBody::from_body(&body)?; +//! // Decode back to typed body by decompressing and decoding +//! let decoded_body: alloy_consensus::BlockBody = +//! compressed_body.decode()?; +//! assert_eq!(decoded_body.transactions.len(), 1); +//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! ``` +//! +//! ## [`CompressedReceipts`] +//! +//! ```rust +//! use alloy_consensus::ReceiptWithBloom; +//! use reth_era::{execution_types::CompressedReceipts, DecodeCompressed}; +//! use reth_ethereum_primitives::{Receipt, TxType}; +//! +//! let receipt = Receipt { +//! tx_type: TxType::Legacy, +//! success: true, +//! cumulative_gas_used: 21000, +//! logs: vec![], +//! }; +//! let receipt_with_bloom = ReceiptWithBloom { receipt, logs_bloom: Default::default() }; +//! // Compress the receipt: rlp encoding and snappy compression +//! let compressed_receipt_data = CompressedReceipts::from_encodable(&receipt_with_bloom)?; +//! // Get raw receipt by decoding and decompressing compressed and encoded receipt +//! let decompressed_receipt = compressed_receipt_data.decode::()?; +//! assert_eq!(decompressed_receipt.receipt.cumulative_gas_used, 21000); +//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! `````` + +use crate::{ + e2s_types::{E2sError, Entry}, + DecodeCompressed, +}; use alloy_consensus::{Block, BlockBody, Header}; use alloy_primitives::{B256, U256}; use alloy_rlp::{Decodable, Encodable}; @@ -96,12 +160,6 @@ pub struct CompressedHeader { pub data: Vec, } -/// Extension trait for generic decoding from compressed data -pub trait DecodeCompressed { - /// Decompress and decode the data into the given type - fn decode(&self) -> Result; -} - impl CompressedHeader { /// Create a new [`CompressedHeader`] from compressed data pub const fn new(data: Vec) -> Self { @@ -161,9 +219,9 @@ impl CompressedHeader { self.decode() } - /// Create a [`CompressedHeader`] from an `alloy_consensus::Header` - pub fn from_header(header: &Header) -> Result { - let encoder = SnappyRlpCodec::
::new(); + /// Create a [`CompressedHeader`] from a header + pub fn from_header(header: &H) -> Result { + let encoder = SnappyRlpCodec::new(); let compressed = encoder.encode(header)?; Ok(Self::new(compressed)) } @@ -248,9 +306,9 @@ impl CompressedBody { .map_err(|e| E2sError::Rlp(format!("Failed to decode RLP data: {e}"))) } - /// Create a [`CompressedBody`] from an `alloy_consensus::BlockBody` - pub fn from_body(body: &BlockBody) -> Result { - let encoder = SnappyRlpCodec::>::new(); + /// Create a [`CompressedBody`] from a block body (e.g. `alloy_consensus::BlockBody`) + pub fn from_body(body: &B) -> Result { + let encoder = SnappyRlpCodec::new(); let compressed = encoder.encode(body)?; Ok(Self::new(compressed)) } @@ -502,34 +560,14 @@ impl BlockTuple { #[cfg(test)] mod tests { use super::*; + use crate::test_utils::{create_header, create_test_receipt, create_test_receipts}; use alloy_eips::eip4895::Withdrawals; - use alloy_primitives::{Address, Bytes, B64}; + use alloy_primitives::{Bytes, U256}; + use reth_ethereum_primitives::{Receipt, TxType}; #[test] fn test_header_conversion_roundtrip() { - let header = Header { - parent_hash: B256::default(), - ommers_hash: B256::default(), - beneficiary: Address::default(), - state_root: B256::default(), - transactions_root: B256::default(), - receipts_root: B256::default(), - logs_bloom: Default::default(), - difficulty: U256::from(123456u64), - number: 100, - gas_limit: 5000000, - gas_used: 21000, - timestamp: 1609459200, - extra_data: Bytes::default(), - mix_hash: B256::default(), - nonce: B64::default(), - base_fee_per_gas: Some(10), - withdrawals_root: None, - blob_gas_used: None, - excess_blob_gas: None, - parent_beacon_block_root: None, - requests_hash: None, - }; + let header = create_header(); let compressed_header = CompressedHeader::from_header(&header).unwrap(); @@ -595,29 +633,7 @@ mod tests { #[test] fn test_block_tuple_with_data() { // Create block with transactions and withdrawals - let header = Header { - parent_hash: B256::default(), - ommers_hash: B256::default(), - beneficiary: Address::default(), - state_root: B256::default(), - transactions_root: B256::default(), - receipts_root: B256::default(), - logs_bloom: Default::default(), - difficulty: U256::from(123456u64), - number: 100, - gas_limit: 5000000, - gas_used: 21000, - timestamp: 1609459200, - extra_data: Bytes::default(), - mix_hash: B256::default(), - nonce: B64::default(), - base_fee_per_gas: Some(10), - withdrawals_root: Some(B256::default()), - blob_gas_used: None, - excess_blob_gas: None, - parent_beacon_block_root: None, - requests_hash: None, - }; + let header = create_header(); let transactions = vec![Bytes::from(vec![1, 2, 3, 4]), Bytes::from(vec![5, 6, 7, 8])]; @@ -642,4 +658,63 @@ mod tests { assert_eq!(decoded_block.body.transactions[1], Bytes::from(vec![5, 6, 7, 8])); assert!(decoded_block.body.withdrawals.is_some()); } + + #[test] + fn test_single_receipt_compression_roundtrip() { + let test_receipt = create_test_receipt(TxType::Eip1559, true, 21000, 2); + + // Compress the receipt + let compressed_receipts = + CompressedReceipts::from_encodable(&test_receipt).expect("Failed to compress receipt"); + + // Verify compression + assert!(!compressed_receipts.data.is_empty()); + + // Decode the compressed receipt back + let decoded_receipt: Receipt = + compressed_receipts.decode().expect("Failed to decode compressed receipt"); + + // Verify that the decoded receipt matches the original + assert_eq!(decoded_receipt.tx_type, test_receipt.tx_type); + assert_eq!(decoded_receipt.success, test_receipt.success); + assert_eq!(decoded_receipt.cumulative_gas_used, test_receipt.cumulative_gas_used); + assert_eq!(decoded_receipt.logs.len(), test_receipt.logs.len()); + + // Verify each log + for (original_log, decoded_log) in test_receipt.logs.iter().zip(decoded_receipt.logs.iter()) + { + assert_eq!(decoded_log.address, original_log.address); + assert_eq!(decoded_log.data.topics(), original_log.data.topics()); + } + } + + #[test] + fn test_receipt_list_compression() { + let receipts = create_test_receipts(); + + // Compress the list of receipts + let compressed_receipts = CompressedReceipts::from_encodable_list(&receipts) + .expect("Failed to compress receipt list"); + + // Decode the compressed receipts back + // Note: most likely the decoding for real era files will be done to reach + // `Vec`` + let decoded_receipts: Vec = + compressed_receipts.decode().expect("Failed to decode compressed receipt list"); + + // Verify that the decoded receipts match the original + assert_eq!(decoded_receipts.len(), receipts.len()); + + for (original, decoded) in receipts.iter().zip(decoded_receipts.iter()) { + assert_eq!(decoded.tx_type, original.tx_type); + assert_eq!(decoded.success, original.success); + assert_eq!(decoded.cumulative_gas_used, original.cumulative_gas_used); + assert_eq!(decoded.logs.len(), original.logs.len()); + + for (original_log, decoded_log) in original.logs.iter().zip(decoded.logs.iter()) { + assert_eq!(decoded_log.address, original_log.address); + assert_eq!(decoded_log.data.topics(), original_log.data.topics()); + } + } + } } diff --git a/crates/era/src/lib.rs b/crates/era/src/lib.rs index 6007da18738..45383e3eead 100644 --- a/crates/era/src/lib.rs +++ b/crates/era/src/lib.rs @@ -1,19 +1,39 @@ //! Era and Era1 files support for Ethereum history expiry. //! -//! -//! Era files are special instances of .e2s files with a strict content format -//! optimized for reading and long-term storage and distribution. -//! //! Era1 files use the same e2store foundation but are specialized for //! execution layer block history, following the format: //! Version | block-tuple* | other-entries* | Accumulator | `BlockIndex` //! +//! Era files are special instances of `.e2s` files with a strict content format +//! optimized for reading and long-term storage and distribution. +//! //! See also: //! - E2store format: +//! - Era format: //! - Era1 format: +pub mod consensus_types; pub mod e2s_file; pub mod e2s_types; pub mod era1_file; pub mod era1_types; +pub mod era_types; pub mod execution_types; +#[cfg(test)] +pub(crate) mod test_utils; + +use crate::e2s_types::E2sError; +use alloy_rlp::Decodable; +use ssz::Decode; + +/// Extension trait for generic decoding from compressed data +pub trait DecodeCompressed { + /// Decompress and decode the data into the given type + fn decode(&self) -> Result; +} + +/// Extension trait for generic decoding from compressed ssz data +pub trait DecodeCompressedSsz { + /// Decompress and decode the SSZ data into the given type + fn decode(&self) -> Result; +} diff --git a/crates/era/src/test_utils.rs b/crates/era/src/test_utils.rs new file mode 100644 index 00000000000..96b2545be16 --- /dev/null +++ b/crates/era/src/test_utils.rs @@ -0,0 +1,177 @@ +//! Utilities helpers to create era data structures for testing purposes. + +use crate::{ + consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, + execution_types::{ + BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + }, +}; +use alloy_consensus::{Header, ReceiptWithBloom}; +use alloy_primitives::{Address, BlockNumber, Bytes, Log, LogData, B256, B64, U256}; +use reth_ethereum_primitives::{Receipt, TxType}; + +// Helper function to create a test header +pub(crate) fn create_header() -> Header { + Header { + parent_hash: B256::default(), + ommers_hash: B256::default(), + beneficiary: Address::default(), + state_root: B256::default(), + transactions_root: B256::default(), + receipts_root: B256::default(), + logs_bloom: Default::default(), + difficulty: U256::from(123456u64), + number: 100, + gas_limit: 5000000, + gas_used: 21000, + timestamp: 1609459200, + extra_data: Bytes::default(), + mix_hash: B256::default(), + nonce: B64::default(), + base_fee_per_gas: Some(10), + withdrawals_root: Some(B256::default()), + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_hash: None, + } +} + +// Helper function to create a test receipt with customizable parameters +pub(crate) fn create_test_receipt( + tx_type: TxType, + success: bool, + cumulative_gas_used: u64, + log_count: usize, +) -> Receipt { + let mut logs = Vec::new(); + + for i in 0..log_count { + let address_byte = (i + 1) as u8; + let topic_byte = (i + 10) as u8; + let data_byte = (i + 100) as u8; + + logs.push(Log { + address: Address::from([address_byte; 20]), + data: LogData::new_unchecked( + vec![B256::from([topic_byte; 32]), B256::from([topic_byte + 1; 32])], + alloy_primitives::Bytes::from(vec![data_byte, data_byte + 1, data_byte + 2]), + ), + }); + } + + Receipt { tx_type, success, cumulative_gas_used, logs } +} + +// Helper function to create a list of test receipts with different characteristics +pub(crate) fn create_test_receipts() -> Vec { + vec![ + // Legacy transaction, successful, no logs + create_test_receipt(TxType::Legacy, true, 21000, 0), + // EIP-2930 transaction, failed, one log + create_test_receipt(TxType::Eip2930, false, 42000, 1), + // EIP-1559 transaction, successful, multiple logs + create_test_receipt(TxType::Eip1559, true, 63000, 3), + // EIP-4844 transaction, successful, two logs + create_test_receipt(TxType::Eip4844, true, 84000, 2), + // EIP-7702 transaction, failed, no logs + create_test_receipt(TxType::Eip7702, false, 105000, 0), + ] +} + +pub(crate) fn create_test_receipt_with_bloom( + tx_type: TxType, + success: bool, + cumulative_gas_used: u64, + log_count: usize, +) -> ReceiptWithBloom { + let receipt = create_test_receipt(tx_type, success, cumulative_gas_used, log_count); + ReceiptWithBloom { receipt: receipt.into(), logs_bloom: Default::default() } +} + +// Helper function to create a sample block tuple +pub(crate) fn create_sample_block(data_size: usize) -> BlockTuple { + // Create a compressed header with very sample data - not compressed for simplicity + let header_data = vec![0xAA; data_size]; + let header = CompressedHeader::new(header_data); + + // Create a compressed body with very sample data - not compressed for simplicity + let body_data = vec![0xBB; data_size * 2]; + let body = CompressedBody::new(body_data); + + // Create compressed receipts with very sample data - not compressed for simplicity + let receipts_data = vec![0xCC; data_size]; + let receipts = CompressedReceipts::new(receipts_data); + + let difficulty = TotalDifficulty::new(U256::from(data_size)); + + // Create and return the block tuple + BlockTuple::new(header, body, receipts, difficulty) +} + +// Helper function to create a test block with compressed data +pub(crate) fn create_test_block_with_compressed_data(number: BlockNumber) -> BlockTuple { + use alloy_consensus::{BlockBody, Header}; + use alloy_eips::eip4895::Withdrawals; + use alloy_primitives::{Address, Bytes, B256, B64, U256}; + + // Create test header + let header = Header { + parent_hash: B256::default(), + ommers_hash: B256::default(), + beneficiary: Address::default(), + state_root: B256::default(), + transactions_root: B256::default(), + receipts_root: B256::default(), + logs_bloom: Default::default(), + difficulty: U256::from(number * 1000), + number, + gas_limit: 5000000, + gas_used: 21000, + timestamp: 1609459200 + number, + extra_data: Bytes::default(), + mix_hash: B256::default(), + nonce: B64::default(), + base_fee_per_gas: Some(10), + withdrawals_root: Some(B256::default()), + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_hash: None, + }; + + // Create test body + let body: BlockBody = BlockBody { + transactions: vec![Bytes::from(vec![(number % 256) as u8; 10])], + ommers: vec![], + withdrawals: Some(Withdrawals(vec![])), + }; + + // Create test receipt list with bloom + let receipts_list: Vec = vec![create_test_receipt_with_bloom( + reth_ethereum_primitives::TxType::Legacy, + true, + 21000, + 0, + )]; + + // Compressed test compressed + let compressed_header = CompressedHeader::from_header(&header).unwrap(); + let compressed_body = CompressedBody::from_body(&body).unwrap(); + let compressed_receipts = CompressedReceipts::from_encodable_list(&receipts_list).unwrap(); + let total_difficulty = TotalDifficulty::new(U256::from(number * 1000)); + + BlockTuple::new(compressed_header, compressed_body, compressed_receipts, total_difficulty) +} + +/// Helper function to create a simple beacon block +pub(crate) fn create_beacon_block(data_size: usize) -> CompressedSignedBeaconBlock { + let block_data = vec![0xAA; data_size]; + CompressedSignedBeaconBlock::new(block_data) +} + +/// Helper function to create a simple beacon state +pub(crate) fn create_beacon_state(data_size: usize) -> CompressedBeaconState { + let state_data = vec![0xBB; data_size]; + CompressedBeaconState::new(state_data) +} diff --git a/crates/era/tests/it/dd.rs b/crates/era/tests/it/dd.rs index 7aa0afb6e20..0c656a512f9 100644 --- a/crates/era/tests/it/dd.rs +++ b/crates/era/tests/it/dd.rs @@ -4,6 +4,7 @@ use alloy_consensus::{BlockBody, Header}; use alloy_primitives::U256; use reth_era::{ + e2s_types::IndexEntry, era1_file::{Era1Reader, Era1Writer}, execution_types::CompressedBody, }; @@ -30,7 +31,7 @@ async fn test_mainnet_era1_only_file_decompression_and_decoding() -> eyre::Resul for &block_idx in &test_block_indices { let block = &file.group.blocks[block_idx]; - let block_number = file.group.block_index.starting_number + block_idx as u64; + let block_number = file.group.block_index.starting_number() + block_idx as u64; println!( "\n Testing block {}, compressed body size: {} bytes", @@ -110,7 +111,7 @@ async fn test_mainnet_era1_only_file_decompression_and_decoding() -> eyre::Resul for &idx in &test_block_indices { let original_block = &file.group.blocks[idx]; let read_back_block = &read_back_file.group.blocks[idx]; - let block_number = file.group.block_index.starting_number + idx as u64; + let block_number = file.group.block_index.starting_number() + idx as u64; println!("Block {block_number} details:"); println!(" Header size: {} bytes", original_block.header.data.len()); diff --git a/crates/era/tests/it/genesis.rs b/crates/era/tests/it/genesis.rs index 1812a77798a..80869f97fa0 100644 --- a/crates/era/tests/it/genesis.rs +++ b/crates/era/tests/it/genesis.rs @@ -3,13 +3,12 @@ //! These tests verify proper decompression and decoding of genesis blocks //! from different networks. -use alloy_consensus::{BlockBody, Header}; -use reth_era::execution_types::CompressedBody; -use reth_ethereum_primitives::TransactionSigned; - use crate::{ Era1TestDownloader, ERA1_MAINNET_FILES_NAMES, ERA1_SEPOLIA_FILES_NAMES, MAINNET, SEPOLIA, }; +use alloy_consensus::{BlockBody, Header}; +use reth_era::{e2s_types::IndexEntry, execution_types::CompressedBody}; +use reth_ethereum_primitives::TransactionSigned; #[tokio::test(flavor = "multi_thread")] #[ignore = "download intensive"] @@ -23,7 +22,7 @@ async fn test_mainnet_genesis_block_decompression() -> eyre::Result<()> { for &block_idx in &test_blocks { let block = &file.group.blocks[block_idx]; - let block_number = file.group.block_index.starting_number + block_idx as u64; + let block_number = file.group.block_index.starting_number() + block_idx as u64; println!( "Testing block {}, compressed body size: {} bytes", @@ -75,7 +74,7 @@ async fn test_sepolia_genesis_block_decompression() -> eyre::Result<()> { for &block_idx in &test_blocks { let block = &file.group.blocks[block_idx]; - let block_number = file.group.block_index.starting_number + block_idx as u64; + let block_number = file.group.block_index.starting_number() + block_idx as u64; println!( "Testing block {}, compressed body size: {} bytes", diff --git a/crates/era/tests/it/main.rs b/crates/era/tests/it/main.rs index fa939819189..86bfb3b3ac5 100644 --- a/crates/era/tests/it/main.rs +++ b/crates/era/tests/it/main.rs @@ -49,7 +49,7 @@ const ERA1_MAINNET_FILES_NAMES: [&str; 6] = [ /// Sepolia network name const SEPOLIA: &str = "sepolia"; -/// Default sepolia mainnet url +/// Default sepolia url /// for downloading sepolia `.era1` files const SEPOLIA_URL: &str = "https://era.ithaca.xyz/sepolia-era1/"; diff --git a/crates/era/tests/it/roundtrip.rs b/crates/era/tests/it/roundtrip.rs index a444fe9c570..0689ef383e2 100644 --- a/crates/era/tests/it/roundtrip.rs +++ b/crates/era/tests/it/roundtrip.rs @@ -7,12 +7,15 @@ //! - Writing the data back to a new file //! - Confirming that all original data is preserved throughout the process -use alloy_consensus::{BlockBody, BlockHeader, Header}; +use alloy_consensus::{BlockBody, BlockHeader, Header, ReceiptWithBloom}; use rand::{prelude::IndexedRandom, rng}; use reth_era::{ + e2s_types::IndexEntry, era1_file::{Era1File, Era1Reader, Era1Writer}, era1_types::{Era1Group, Era1Id}, - execution_types::{BlockTuple, CompressedBody, CompressedHeader, TotalDifficulty}, + execution_types::{ + BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + }, }; use reth_ethereum_primitives::TransactionSigned; use std::io::Cursor; @@ -71,7 +74,7 @@ async fn test_file_roundtrip( for &block_id in &test_block_indices { let original_block = &original_file.group.blocks[block_id]; let roundtrip_block = &roundtrip_file.group.blocks[block_id]; - let block_number = original_file.group.block_index.starting_number + block_id as u64; + let block_number = original_file.group.block_index.starting_number() + block_id as u64; println!("Testing roundtrip for block {block_number}"); @@ -143,6 +146,21 @@ async fn test_file_roundtrip( "Ommers count should match after roundtrip" ); + // Decode receipts + let original_receipts_decoded = + original_block.receipts.decode::>()?; + let roundtrip_receipts_decoded = + roundtrip_block.receipts.decode::>()?; + + assert_eq!( + original_receipts_decoded, roundtrip_receipts_decoded, + "Block {block_number} decoded receipts should be identical after roundtrip" + ); + assert_eq!( + original_receipts_data, roundtrip_receipts_data, + "Block {block_number} receipts data should be identical after roundtrip" + ); + // Check withdrawals presence/absence matches assert_eq!( original_decoded_body.withdrawals.is_some(), @@ -178,11 +196,21 @@ async fn test_file_roundtrip( "Transaction count should match after re-compression" ); + // Re-encore and re-compress the receipts + let recompressed_receipts = + CompressedReceipts::from_encodable(&roundtrip_receipts_decoded)?; + let recompressed_receipts_data = recompressed_receipts.decompress()?; + + assert_eq!( + original_receipts_data.len(), + recompressed_receipts_data.len(), + "Receipts length should match after re-compression" + ); + let recompressed_block = BlockTuple::new( recompressed_header, recompressed_body, - original_block.receipts.clone(), /* reuse original receipts directly as it not - * possible to decode them */ + recompressed_receipts, TotalDifficulty::new(original_block.total_difficulty.value), ); diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index 78080bcbc42..a32ead66fba 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -17,60 +17,15 @@ reth-cli-commands.workspace = true reth-cli-runner.workspace = true reth-chainspec.workspace = true reth-db.workspace = true -reth-ethereum-primitives.workspace = true -reth-network.workspace = true reth-node-builder.workspace = true reth-node-core.workspace = true reth-node-ethereum.workspace = true reth-node-metrics.workspace = true reth-tracing.workspace = true -reth-db-api.workspace = true -reth-consensus.workspace = true -reth-errors.workspace = true -reth-ethereum-payload-builder.workspace = true -reth-evm.workspace = true -reth-execution-types.workspace = true -reth-fs-util.workspace = true reth-node-api.workspace = true -reth-basic-payload-builder.workspace = true -reth-primitives-traits.workspace = true -reth-provider.workspace = true -reth-revm.workspace = true -reth-stages.workspace = true -reth-transaction-pool.workspace = true -reth-trie.workspace = true -reth-trie-db.workspace = true -reth-cli-util.workspace = true -reth-config.workspace = true -reth-downloaders.workspace = true -reth-exex.workspace = true -reth-network-api.workspace = true -reth-network-p2p.workspace = true -reth-node-events.workspace = true -reth-prune.workspace = true -reth-static-file.workspace = true -reth-tasks.workspace = true -reth-payload-builder.workspace = true - -# serde -serde_json.workspace = true - -# backoff -backon.workspace = true - -# test -similar-asserts.workspace = true - -# async -tokio.workspace = true -futures.workspace = true # alloy -alloy-eips = { workspace = true, features = ["kzg"] } -alloy-rlp.workspace = true -alloy-rpc-types = { workspace = true, features = ["engine"] } alloy-consensus.workspace = true -alloy-primitives.workspace = true # misc clap.workspace = true @@ -78,38 +33,33 @@ eyre.workspace = true tracing.workspace = true [dev-dependencies] -# reth -reth-cli-commands.workspace = true - # fs tempfile.workspace = true [features] -default = ["jemalloc", "reth-revm/portable"] +default = ["jemalloc"] dev = ["reth-cli-commands/arbitrary"] asm-keccak = [ "reth-node-core/asm-keccak", - "alloy-primitives/asm-keccak", + "reth-node-ethereum/asm-keccak", ] jemalloc = [ - "reth-cli-util/jemalloc", "reth-node-core/jemalloc", "reth-node-metrics/jemalloc", ] jemalloc-prof = [ - "reth-cli-util/jemalloc", - "reth-cli-util/jemalloc-prof", + "reth-node-core/jemalloc", ] -tracy-allocator = ["reth-cli-util/tracy-allocator"] +tracy-allocator = [] # Because jemalloc is default and preferred over snmalloc when both features are # enabled, `--no-default-features` should be used when enabling snmalloc or # snmalloc-native. -snmalloc = ["reth-cli-util/snmalloc"] -snmalloc-native = ["reth-cli-util/snmalloc-native"] +snmalloc = [] +snmalloc-native = [] min-error-logs = ["tracing/release_max_level_error"] min-warn-logs = ["tracing/release_max_level_warn"] diff --git a/crates/ethereum/cli/src/debug_cmd/build_block.rs b/crates/ethereum/cli/src/debug_cmd/build_block.rs deleted file mode 100644 index 22260f7e337..00000000000 --- a/crates/ethereum/cli/src/debug_cmd/build_block.rs +++ /dev/null @@ -1,270 +0,0 @@ -//! Command for debugging block building. -use alloy_consensus::BlockHeader; -use alloy_eips::{ - eip2718::Encodable2718, eip4844::env_settings::EnvKzgSettings, - eip7594::BlobTransactionSidecarVariant, -}; -use alloy_primitives::{Address, Bytes, B256}; -use alloy_rlp::Decodable; -use alloy_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; -use clap::Parser; -use eyre::Context; -use reth_basic_payload_builder::{BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig}; -use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use reth_cli_runner::CliContext; -use reth_consensus::{Consensus, FullConsensus}; -use reth_errors::{ConsensusError, RethResult}; -use reth_ethereum_payload_builder::EthereumBuilderConfig; -use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; -use reth_evm::{execute::Executor, ConfigureEvm}; -use reth_execution_types::ExecutionOutcome; -use reth_fs_util as fs; -use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; -use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig}; -use reth_primitives_traits::{Block as _, SealedBlock, SealedHeader, SignedTransaction}; -use reth_provider::{ - providers::{BlockchainProvider, ProviderNodeTypes}, - BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, - StageCheckpointReader, StateProviderFactory, -}; -use reth_revm::{cached::CachedReads, cancelled::CancelOnDrop, database::StateProviderDatabase}; -use reth_stages::StageId; -use reth_transaction_pool::{ - blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, - TransactionPool, TransactionValidationTaskExecutor, -}; -use std::{path::PathBuf, str::FromStr, sync::Arc}; -use tracing::*; - -/// `reth debug build-block` command -/// This debug routine requires that the node is positioned at the block before the target. -/// The script will then parse the block and attempt to build a similar one. -#[derive(Debug, Parser)] -pub struct Command { - #[command(flatten)] - env: EnvironmentArgs, - - #[arg(long)] - parent_beacon_block_root: Option, - - #[arg(long)] - prev_randao: B256, - - #[arg(long)] - timestamp: u64, - - #[arg(long)] - suggested_fee_recipient: Address, - - /// Array of transactions. - /// NOTE: 4844 transactions must be provided in the same order as they appear in the blobs - /// bundle. - #[arg(long, value_delimiter = ',')] - transactions: Vec, - - /// Path to the file that contains a corresponding blobs bundle. - #[arg(long)] - blobs_bundle_path: Option, -} - -impl> Command { - /// Fetches the best block from the database. - /// - /// If the database is empty, returns the genesis block. - fn lookup_best_block>( - &self, - factory: ProviderFactory, - ) -> RethResult>>> { - let provider = factory.provider()?; - - let best_number = - provider.get_stage_checkpoint(StageId::Finish)?.unwrap_or_default().block_number; - let best_hash = provider - .block_hash(best_number)? - .expect("the hash for the latest block is missing, database is corrupt"); - - Ok(Arc::new( - provider - .block(best_number.into())? - .expect("the header for the latest block is missing, database is corrupt") - .seal_unchecked(best_hash), - )) - } - - /// Returns the default KZG settings - const fn kzg_settings(&self) -> eyre::Result { - Ok(EnvKzgSettings::Default) - } - - /// Execute `debug build-block` command - pub async fn execute>( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - - let consensus: Arc> = - Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - - // fetch the best block from the database - let best_block = self - .lookup_best_block(provider_factory.clone()) - .wrap_err("the head block is missing")?; - - let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; - let blob_store = InMemoryBlobStore::default(); - - let validator = TransactionValidationTaskExecutor::eth_builder(blockchain_db.clone()) - .with_head_timestamp(best_block.timestamp) - .kzg_settings(self.kzg_settings()?) - .with_additional_tasks(1) - .build_with_tasks(ctx.task_executor.clone(), blob_store.clone()); - - let transaction_pool = reth_transaction_pool::Pool::eth_pool( - validator, - blob_store.clone(), - PoolConfig::default(), - ); - info!(target: "reth::cli", "Transaction pool initialized"); - - let mut blobs_bundle = self - .blobs_bundle_path - .map(|path| -> eyre::Result { - let contents = fs::read_to_string(&path) - .wrap_err(format!("could not read {}", path.display()))?; - serde_json::from_str(&contents).wrap_err("failed to deserialize blobs bundle") - }) - .transpose()?; - - for tx_bytes in &self.transactions { - debug!(target: "reth::cli", bytes = ?tx_bytes, "Decoding transaction"); - let transaction = TransactionSigned::decode(&mut &Bytes::from_str(tx_bytes)?[..])? - .try_into_recovered() - .map_err(|tx| eyre::eyre!("failed to recover tx: {}", tx.tx_hash()))?; - - let encoded_length = match transaction.inner() { - TransactionSigned::Eip4844(tx) => { - let blobs_bundle = blobs_bundle.as_mut().ok_or_else(|| { - eyre::eyre!("encountered a blob tx. `--blobs-bundle-path` must be provided") - })?; - - let sidecar: BlobTransactionSidecarVariant = - BlobTransactionSidecarVariant::Eip4844( - blobs_bundle.pop_sidecar(tx.tx().blob_versioned_hashes.len()), - ); - - let pooled = transaction - .clone() - .into_inner() - .try_into_pooled_eip4844(sidecar.clone()) - .expect("should not fail to convert blob tx if it is already eip4844"); - let encoded_length = pooled.encode_2718_len(); - - // insert the blob into the store - blob_store.insert(*transaction.tx_hash(), sidecar)?; - - encoded_length - } - _ => transaction.encode_2718_len(), - }; - - debug!(target: "reth::cli", ?transaction, "Adding transaction to the pool"); - transaction_pool - .add_transaction( - TransactionOrigin::External, - EthPooledTransaction::new(transaction, encoded_length), - ) - .await?; - } - - let payload_attrs = PayloadAttributes { - parent_beacon_block_root: self.parent_beacon_block_root, - prev_randao: self.prev_randao, - timestamp: self.timestamp, - suggested_fee_recipient: self.suggested_fee_recipient, - // Set empty withdrawals vector if Shanghai is active, None otherwise - withdrawals: provider_factory - .chain_spec() - .is_shanghai_active_at_timestamp(self.timestamp) - .then(Vec::new), - }; - let payload_config = PayloadConfig::new( - Arc::new(SealedHeader::new(best_block.header().clone(), best_block.hash())), - reth_payload_builder::EthPayloadBuilderAttributes::try_new( - best_block.hash(), - payload_attrs, - EngineApiMessageVersion::default() as u8, - )?, - ); - - let args = BuildArguments::new( - CachedReads::default(), - payload_config, - CancelOnDrop::default(), - None, - ); - - let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new( - blockchain_db.clone(), - transaction_pool, - EthEvmConfig::new(provider_factory.chain_spec()), - EthereumBuilderConfig::new(), - ); - - match payload_builder.try_build(args)? { - BuildOutcome::Better { payload, .. } => { - let block = payload.block(); - debug!(target: "reth::cli", ?block, "Built new payload"); - - consensus.validate_header(block.sealed_header())?; - consensus.validate_block_pre_execution(block)?; - - let block_with_senders = block.clone().try_recover().unwrap(); - - let state_provider = blockchain_db.latest()?; - let db = StateProviderDatabase::new(&state_provider); - let evm_config = EthEvmConfig::ethereum(provider_factory.chain_spec()); - let executor = evm_config.batch_executor(db); - - let block_execution_output = executor.execute(&block_with_senders)?; - let execution_outcome = - ExecutionOutcome::from((block_execution_output, block.number)); - debug!(target: "reth::cli", ?execution_outcome, "Executed block"); - - let hashed_post_state = state_provider.hashed_post_state(execution_outcome.state()); - let (state_root, trie_updates) = - state_provider.state_root_with_updates(hashed_post_state.clone())?; - - if state_root != block_with_senders.state_root() { - eyre::bail!( - "state root mismatch. expected: {}. got: {}", - block_with_senders.state_root, - state_root - ); - } - - // Attempt to insert new block without committing - let provider_rw = provider_factory.provider_rw()?; - provider_rw.append_blocks_with_state( - Vec::from([block_with_senders]), - &execution_outcome, - hashed_post_state.into_sorted(), - trie_updates, - )?; - info!(target: "reth::cli", "Successfully appended built block"); - } - _ => unreachable!("other outcomes are unreachable"), - }; - - Ok(()) - } -} - -impl Command { - /// Returns the underlying chain being used to run this command - pub const fn chain_spec(&self) -> Option<&Arc> { - Some(&self.env.chain) - } -} diff --git a/crates/ethereum/cli/src/debug_cmd/execution.rs b/crates/ethereum/cli/src/debug_cmd/execution.rs deleted file mode 100644 index 63a9cc3a80e..00000000000 --- a/crates/ethereum/cli/src/debug_cmd/execution.rs +++ /dev/null @@ -1,253 +0,0 @@ -//! Command for debugging execution. - -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; -use clap::Parser; -use futures::StreamExt; -use reth_chainspec::ChainSpec; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use reth_cli_runner::CliContext; -use reth_cli_util::get_secret_key; -use reth_config::Config; -use reth_consensus::FullConsensus; -use reth_db::DatabaseEnv; -use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, -}; -use reth_errors::ConsensusError; -use reth_ethereum_primitives::EthPrimitives; -use reth_exex::ExExManagerHandle; -use reth_network::{BlockDownloaderProvider, NetworkHandle}; -use reth_network_api::NetworkInfo; -use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; -use reth_node_api::NodeTypesWithDBAdapter; -use reth_node_core::{args::NetworkArgs, utils::get_single_header}; -use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig}; -use reth_node_events::node::NodeEvent; -use reth_provider::{ - providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory, StageCheckpointReader, -}; -use reth_prune::PruneModes; -use reth_stages::{ - sets::DefaultStages, stages::ExecutionStage, ExecutionStageThresholds, Pipeline, StageId, - StageSet, -}; -use reth_static_file::StaticFileProducer; -use reth_tasks::TaskExecutor; -use std::{path::PathBuf, sync::Arc}; -use tokio::sync::watch; -use tracing::*; - -/// `reth debug execution` command -#[derive(Debug, Parser)] -pub struct Command { - #[command(flatten)] - env: EnvironmentArgs, - - #[command(flatten)] - network: NetworkArgs, - - /// The maximum block height. - #[arg(long)] - pub to: u64, - - /// The block interval for sync and unwind. - /// Defaults to `1000`. - #[arg(long, default_value = "1000")] - pub interval: u64, -} - -impl> Command { - fn build_pipeline( - &self, - config: &Config, - client: Client, - consensus: Arc>, - provider_factory: ProviderFactory, - task_executor: &TaskExecutor, - static_file_producer: StaticFileProducer>, - ) -> eyre::Result> - where - N: ProviderNodeTypes, - Client: EthBlockClient + 'static, - { - // building network downloaders using the fetch client - let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(client.clone(), consensus.clone()) - .into_task_with(task_executor); - - let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) - .build(client, consensus.clone(), provider_factory.clone()) - .into_task_with(task_executor); - - let stage_conf = &config.stages; - let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); - - let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let executor = EthEvmConfig::ethereum(provider_factory.chain_spec()); - - let pipeline = Pipeline::::builder() - .with_tip_sender(tip_tx) - .add_stages( - DefaultStages::new( - provider_factory.clone(), - tip_rx, - consensus.clone(), - header_downloader, - body_downloader, - executor.clone(), - stage_conf.clone(), - prune_modes, - None, - ) - .set(ExecutionStage::new( - executor, - consensus.clone(), - ExecutionStageThresholds { - max_blocks: None, - max_changes: None, - max_cumulative_gas: None, - max_duration: None, - }, - stage_conf.execution_external_clean_threshold(), - ExExManagerHandle::empty(), - )), - ) - .build(provider_factory, static_file_producer); - - Ok(pipeline) - } - - async fn build_network< - N: CliNodeTypes, - >( - &self, - config: &Config, - task_executor: TaskExecutor, - provider_factory: ProviderFactory>>, - network_secret_path: PathBuf, - default_peers_path: PathBuf, - ) -> eyre::Result { - let secret_key = get_secret_key(&network_secret_path)?; - let network = self - .network - .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) - .with_task_executor(Box::new(task_executor)) - .build(provider_factory) - .start_network() - .await?; - info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); - debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); - Ok(network) - } - - async fn fetch_block_hash( - &self, - client: Client, - block: BlockNumber, - ) -> eyre::Result - where - Client: HeadersClient, - { - info!(target: "reth::cli", ?block, "Fetching block from the network."); - loop { - match get_single_header(&client, BlockHashOrNumber::Number(block)).await { - Ok(tip_header) => { - info!(target: "reth::cli", ?block, "Successfully fetched block"); - return Ok(tip_header.hash()) - } - Err(error) => { - error!(target: "reth::cli", ?block, %error, "Failed to fetch the block. Retrying..."); - } - } - } - } - - /// Execute `execution-debug` command - pub async fn execute>( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = - self.env.init::(AccessRights::RW)?; - - let consensus: Arc> = - Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - - // Configure and build network - let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); - let network = self - .build_network( - &config, - ctx.task_executor.clone(), - provider_factory.clone(), - network_secret_path, - data_dir.known_peers(), - ) - .await?; - - let static_file_producer = - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); - - // Configure the pipeline - let fetch_client = network.fetch_client().await?; - let mut pipeline = self.build_pipeline( - &config, - fetch_client.clone(), - consensus.clone(), - provider_factory.clone(), - &ctx.task_executor, - static_file_producer, - )?; - - let provider = provider_factory.provider()?; - - let latest_block_number = - provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); - if latest_block_number.unwrap_or_default() >= self.to { - info!(target: "reth::cli", latest = latest_block_number, "Nothing to run"); - return Ok(()) - } - - ctx.task_executor.spawn_critical( - "events task", - reth_node_events::node::handle_events( - Some(Box::new(network)), - latest_block_number, - pipeline.events().map(Into::>::into), - ), - ); - - let mut current_max_block = latest_block_number.unwrap_or_default(); - while current_max_block < self.to { - let next_block = current_max_block + 1; - let target_block = self.to.min(current_max_block + self.interval); - let target_block_hash = - self.fetch_block_hash(fetch_client.clone(), target_block).await?; - - // Run the pipeline - info!(target: "reth::cli", from = next_block, to = target_block, tip = ?target_block_hash, "Starting pipeline"); - pipeline.set_tip(target_block_hash); - let result = pipeline.run_loop().await?; - trace!(target: "reth::cli", from = next_block, to = target_block, tip = ?target_block_hash, ?result, "Pipeline finished"); - - // Unwind the pipeline without committing. - provider_factory.provider_rw()?.unwind_trie_state_range(next_block..=target_block)?; - - // Update latest block - current_max_block = target_block; - } - - Ok(()) - } -} - -impl Command { - /// Returns the underlying chain being used to run this command - pub const fn chain_spec(&self) -> Option<&Arc> { - Some(&self.env.chain) - } -} diff --git a/crates/ethereum/cli/src/debug_cmd/in_memory_merkle.rs b/crates/ethereum/cli/src/debug_cmd/in_memory_merkle.rs deleted file mode 100644 index b45e712da29..00000000000 --- a/crates/ethereum/cli/src/debug_cmd/in_memory_merkle.rs +++ /dev/null @@ -1,243 +0,0 @@ -//! Command for debugging in-memory merkle trie calculation. - -use alloy_consensus::BlockHeader; -use alloy_eips::BlockHashOrNumber; -use backon::{ConstantBuilder, Retryable}; -use clap::Parser; -use reth_chainspec::ChainSpec; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use reth_cli_runner::CliContext; -use reth_cli_util::get_secret_key; -use reth_config::Config; -use reth_ethereum_primitives::EthPrimitives; -use reth_evm::{execute::Executor, ConfigureEvm}; -use reth_execution_types::ExecutionOutcome; -use reth_network::{BlockDownloaderProvider, NetworkHandle}; -use reth_network_api::NetworkInfo; -use reth_node_api::{BlockTy, NodePrimitives}; -use reth_node_core::{ - args::NetworkArgs, - utils::{get_single_body, get_single_header}, -}; -use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig}; -use reth_primitives_traits::SealedBlock; -use reth_provider::{ - providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, - HashedPostStateProvider, HashingWriter, LatestStateProviderRef, OriginalValuesKnown, - ProviderFactory, StageCheckpointReader, StateWriter, StorageLocation, StorageReader, -}; -use reth_revm::database::StateProviderDatabase; -use reth_stages::StageId; -use reth_tasks::TaskExecutor; -use reth_trie::StateRoot; -use reth_trie_db::DatabaseStateRoot; -use std::{path::PathBuf, sync::Arc}; -use tracing::*; - -/// `reth debug in-memory-merkle` command -/// This debug routine requires that the node is positioned at the block before the target. -/// The script will then download the block from p2p network and attempt to calculate and verify -/// merkle root for it. -#[derive(Debug, Parser)] -pub struct Command { - #[command(flatten)] - env: EnvironmentArgs, - - #[command(flatten)] - network: NetworkArgs, - - /// The number of retries per request - #[arg(long, default_value = "5")] - retries: usize, - - /// The depth after which we should start comparing branch nodes - #[arg(long)] - skip_node_depth: Option, -} - -impl> Command { - async fn build_network< - N: ProviderNodeTypes< - ChainSpec = C::ChainSpec, - Primitives: NodePrimitives< - Block = reth_ethereum_primitives::Block, - Receipt = reth_ethereum_primitives::Receipt, - BlockHeader = alloy_consensus::Header, - >, - >, - >( - &self, - config: &Config, - task_executor: TaskExecutor, - provider_factory: ProviderFactory, - network_secret_path: PathBuf, - default_peers_path: PathBuf, - ) -> eyre::Result { - let secret_key = get_secret_key(&network_secret_path)?; - let network = self - .network - .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) - .with_task_executor(Box::new(task_executor)) - .build(provider_factory) - .start_network() - .await?; - info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); - debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); - Ok(network) - } - - /// Execute `debug in-memory-merkle` command - pub async fn execute>( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = - self.env.init::(AccessRights::RW)?; - - let provider = provider_factory.provider()?; - - // Look up merkle checkpoint - let merkle_checkpoint = provider - .get_stage_checkpoint(StageId::MerkleExecute)? - .expect("merkle checkpoint exists"); - - let merkle_block_number = merkle_checkpoint.block_number; - - // Configure and build network - let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); - let network = self - .build_network( - &config, - ctx.task_executor.clone(), - provider_factory.clone(), - network_secret_path, - data_dir.known_peers(), - ) - .await?; - - let target_block_number = merkle_block_number + 1; - - info!(target: "reth::cli", target_block_number, "Downloading full block"); - let fetch_client = network.fetch_client().await?; - - let retries = self.retries.max(1); - let backoff = ConstantBuilder::default().with_max_times(retries); - - let client = fetch_client.clone(); - let header = (move || { - get_single_header(client.clone(), BlockHashOrNumber::Number(target_block_number)) - }) - .retry(backoff) - .notify(|err, _| warn!(target: "reth::cli", "Error requesting header: {err}. Retrying...")) - .await?; - - let client = fetch_client.clone(); - let chain = provider_factory.chain_spec(); - let consensus = Arc::new(EthBeaconConsensus::new(chain.clone())); - let block: SealedBlock> = (move || { - get_single_body(client.clone(), header.clone(), consensus.clone()) - }) - .retry(backoff) - .notify(|err, _| warn!(target: "reth::cli", "Error requesting body: {err}. Retrying...")) - .await?; - - let state_provider = LatestStateProviderRef::new(&provider); - let db = StateProviderDatabase::new(&state_provider); - - let evm_config = EthEvmConfig::ethereum(provider_factory.chain_spec()); - let executor = evm_config.batch_executor(db); - let block_execution_output = executor.execute(&block.clone().try_recover()?)?; - let execution_outcome = ExecutionOutcome::from((block_execution_output, block.number())); - - // Unpacked `BundleState::state_root_slow` function - let (in_memory_state_root, in_memory_updates) = StateRoot::overlay_root_with_updates( - provider.tx_ref(), - state_provider.hashed_post_state(execution_outcome.state()), - )?; - - if in_memory_state_root == block.state_root() { - info!(target: "reth::cli", state_root = ?in_memory_state_root, "Computed in-memory state root matches"); - return Ok(()) - } - - let provider_rw = provider_factory.database_provider_rw()?; - - // Insert block, state and hashes - provider_rw.insert_historical_block(block.clone().try_recover()?)?; - provider_rw.write_state( - &execution_outcome, - OriginalValuesKnown::No, - StorageLocation::Database, - )?; - let storage_lists = - provider_rw.changed_storages_with_range(block.number..=block.number())?; - let storages = provider_rw.plain_state_storages(storage_lists)?; - provider_rw.insert_storage_for_hashing(storages)?; - let account_lists = - provider_rw.changed_accounts_with_range(block.number..=block.number())?; - let accounts = provider_rw.basic_accounts(account_lists)?; - provider_rw.insert_account_for_hashing(accounts)?; - - let (state_root, incremental_trie_updates) = StateRoot::incremental_root_with_updates( - provider_rw.tx_ref(), - block.number..=block.number(), - )?; - if state_root != block.state_root() { - eyre::bail!( - "Computed incremental state root mismatch. Expected: {:?}. Got: {:?}", - block.state_root, - state_root - ); - } - - // Compare updates - let mut in_mem_mismatched = Vec::new(); - let mut incremental_mismatched = Vec::new(); - let mut in_mem_updates_iter = in_memory_updates.account_nodes_ref().iter().peekable(); - let mut incremental_updates_iter = - incremental_trie_updates.account_nodes_ref().iter().peekable(); - - while in_mem_updates_iter.peek().is_some() || incremental_updates_iter.peek().is_some() { - match (in_mem_updates_iter.next(), incremental_updates_iter.next()) { - (Some(in_mem), Some(incr)) => { - similar_asserts::assert_eq!(in_mem.0, incr.0, "Nibbles don't match"); - if in_mem.1 != incr.1 && - in_mem.0.len() > self.skip_node_depth.unwrap_or_default() - { - in_mem_mismatched.push(in_mem); - incremental_mismatched.push(incr); - } - } - (Some(in_mem), None) => { - warn!(target: "reth::cli", next = ?in_mem, "In-memory trie updates have more entries"); - } - (None, Some(incr)) => { - tracing::warn!(target: "reth::cli", next = ?incr, "Incremental trie updates have more entries"); - } - (None, None) => { - tracing::info!(target: "reth::cli", "Exhausted all trie updates entries"); - } - } - } - - similar_asserts::assert_eq!( - incremental_mismatched, - in_mem_mismatched, - "Mismatched trie updates" - ); - - // Drop without committing. - drop(provider_rw); - - Ok(()) - } -} - -impl Command { - /// Returns the underlying chain being used to run this command - pub const fn chain_spec(&self) -> Option<&Arc> { - Some(&self.env.chain) - } -} diff --git a/crates/ethereum/cli/src/debug_cmd/merkle.rs b/crates/ethereum/cli/src/debug_cmd/merkle.rs deleted file mode 100644 index 09c435b6f36..00000000000 --- a/crates/ethereum/cli/src/debug_cmd/merkle.rs +++ /dev/null @@ -1,315 +0,0 @@ -//! Command for debugging merkle tree calculation. -use alloy_eips::BlockHashOrNumber; -use backon::{ConstantBuilder, Retryable}; -use clap::Parser; -use reth_chainspec::ChainSpec; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use reth_cli_runner::CliContext; -use reth_cli_util::get_secret_key; -use reth_config::Config; -use reth_consensus::{Consensus, ConsensusError}; -use reth_db_api::{cursor::DbCursorRO, tables, transaction::DbTx}; -use reth_ethereum_primitives::EthPrimitives; -use reth_evm::{execute::Executor, ConfigureEvm}; -use reth_execution_types::ExecutionOutcome; -use reth_network::{BlockDownloaderProvider, NetworkHandle}; -use reth_network_api::NetworkInfo; -use reth_network_p2p::full_block::FullBlockClient; -use reth_node_api::{BlockTy, NodePrimitives}; -use reth_node_core::{args::NetworkArgs, utils::get_single_header}; -use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig}; -use reth_provider::{ - providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider, - DatabaseProviderFactory, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, - StateWriter, StorageLocation, -}; -use reth_revm::database::StateProviderDatabase; -use reth_stages::{ - stages::{AccountHashingStage, MerkleStage, StorageHashingStage}, - ExecInput, Stage, StageCheckpoint, -}; -use reth_tasks::TaskExecutor; -use std::{path::PathBuf, sync::Arc}; -use tracing::*; - -/// `reth debug merkle` command -#[derive(Debug, Parser)] -pub struct Command { - #[command(flatten)] - env: EnvironmentArgs, - - #[command(flatten)] - network: NetworkArgs, - - /// The number of retries per request - #[arg(long, default_value = "5")] - retries: usize, - - /// The height to finish at - #[arg(long)] - to: u64, - - /// The depth after which we should start comparing branch nodes - #[arg(long)] - skip_node_depth: Option, -} - -impl> Command { - async fn build_network< - N: ProviderNodeTypes< - ChainSpec = C::ChainSpec, - Primitives: NodePrimitives< - Block = reth_ethereum_primitives::Block, - Receipt = reth_ethereum_primitives::Receipt, - BlockHeader = alloy_consensus::Header, - >, - >, - >( - &self, - config: &Config, - task_executor: TaskExecutor, - provider_factory: ProviderFactory, - network_secret_path: PathBuf, - default_peers_path: PathBuf, - ) -> eyre::Result { - let secret_key = get_secret_key(&network_secret_path)?; - let network = self - .network - .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) - .with_task_executor(Box::new(task_executor)) - .build(provider_factory) - .start_network() - .await?; - info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); - debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); - Ok(network) - } - - /// Execute `merkle-debug` command - pub async fn execute>( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = - self.env.init::(AccessRights::RW)?; - - let provider_rw = provider_factory.database_provider_rw()?; - - // Configure and build network - let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); - let network = self - .build_network( - &config, - ctx.task_executor.clone(), - provider_factory.clone(), - network_secret_path, - data_dir.known_peers(), - ) - .await?; - - let executor_provider = EthEvmConfig::ethereum(provider_factory.chain_spec()); - - // Initialize the fetch client - info!(target: "reth::cli", target_block_number = self.to, "Downloading tip of block range"); - let fetch_client = network.fetch_client().await?; - - // fetch the header at `self.to` - let retries = self.retries.max(1); - let backoff = ConstantBuilder::default().with_max_times(retries); - let client = fetch_client.clone(); - let to_header = (move || { - get_single_header(client.clone(), BlockHashOrNumber::Number(self.to)) - }) - .retry(backoff) - .notify(|err, _| warn!(target: "reth::cli", "Error requesting header: {err}. Retrying...")) - .await?; - info!(target: "reth::cli", target_block_number=self.to, "Finished downloading tip of block range"); - - // build the full block client - let consensus: Arc, Error = ConsensusError>> = - Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - let block_range_client = FullBlockClient::new(fetch_client, consensus.clone()); - - // get best block number - let best_block_number = provider_rw.best_block_number()?; - assert!(best_block_number < self.to, "Nothing to run"); - - // get the block range from the network - let block_range = best_block_number + 1..=self.to; - info!(target: "reth::cli", ?block_range, "Downloading range of blocks"); - let blocks = block_range_client - .get_full_block_range(to_header.hash_slow(), self.to - best_block_number) - .await; - - let mut account_hashing_stage = AccountHashingStage::default(); - let mut storage_hashing_stage = StorageHashingStage::default(); - let mut merkle_stage = - MerkleStage::::default_execution_with_consensus(consensus); - - for block in blocks.into_iter().rev() { - let block_number = block.number; - let sealed_block = - block.try_recover().map_err(|_| eyre::eyre!("Error sealing block with senders"))?; - trace!(target: "reth::cli", block_number, "Executing block"); - - provider_rw.insert_block(sealed_block.clone(), StorageLocation::Database)?; - - let executor = executor_provider.batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new(&provider_rw), - )); - let output = executor.execute(&sealed_block)?; - - provider_rw.write_state( - &ExecutionOutcome::single(block_number, output), - OriginalValuesKnown::Yes, - StorageLocation::Database, - )?; - - let checkpoint = Some(StageCheckpoint::new( - block_number - .checked_sub(1) - .ok_or_else(|| eyre::eyre!("GenesisBlockHasNoParent"))?, - )); - - let mut account_hashing_done = false; - while !account_hashing_done { - let output = account_hashing_stage - .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?; - account_hashing_done = output.done; - } - - let mut storage_hashing_done = false; - while !storage_hashing_done { - let output = storage_hashing_stage - .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?; - storage_hashing_done = output.done; - } - - let incremental_result = merkle_stage - .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint }); - - if incremental_result.is_ok() { - debug!(target: "reth::cli", block_number, "Successfully computed incremental root"); - continue - } - - warn!(target: "reth::cli", block_number, "Incremental calculation failed, retrying from scratch"); - let incremental_account_trie = provider_rw - .tx_ref() - .cursor_read::()? - .walk_range(..)? - .collect::, _>>()?; - let incremental_storage_trie = provider_rw - .tx_ref() - .cursor_dup_read::()? - .walk_range(..)? - .collect::, _>>()?; - - let clean_input = ExecInput { target: Some(sealed_block.number), checkpoint: None }; - loop { - let clean_result = merkle_stage - .execute(&provider_rw, clean_input) - .map_err(|e| eyre::eyre!("Clean state root calculation failed: {}", e))?; - if clean_result.done { - break; - } - } - - let clean_account_trie = provider_rw - .tx_ref() - .cursor_read::()? - .walk_range(..)? - .collect::, _>>()?; - let clean_storage_trie = provider_rw - .tx_ref() - .cursor_dup_read::()? - .walk_range(..)? - .collect::, _>>()?; - - info!(target: "reth::cli", block_number, "Comparing incremental trie vs clean trie"); - - // Account trie - let mut incremental_account_mismatched = Vec::new(); - let mut clean_account_mismatched = Vec::new(); - let mut incremental_account_trie_iter = incremental_account_trie.into_iter().peekable(); - let mut clean_account_trie_iter = clean_account_trie.into_iter().peekable(); - while incremental_account_trie_iter.peek().is_some() || - clean_account_trie_iter.peek().is_some() - { - match (incremental_account_trie_iter.next(), clean_account_trie_iter.next()) { - (Some(incremental), Some(clean)) => { - similar_asserts::assert_eq!(incremental.0, clean.0, "Nibbles don't match"); - if incremental.1 != clean.1 && - clean.0 .0.len() > self.skip_node_depth.unwrap_or_default() - { - incremental_account_mismatched.push(incremental); - clean_account_mismatched.push(clean); - } - } - (Some(incremental), None) => { - warn!(target: "reth::cli", next = ?incremental, "Incremental account trie has more entries"); - } - (None, Some(clean)) => { - warn!(target: "reth::cli", next = ?clean, "Clean account trie has more entries"); - } - (None, None) => { - info!(target: "reth::cli", "Exhausted all account trie entries"); - } - } - } - - // Storage trie - let mut first_mismatched_storage = None; - let mut incremental_storage_trie_iter = incremental_storage_trie.into_iter().peekable(); - let mut clean_storage_trie_iter = clean_storage_trie.into_iter().peekable(); - while incremental_storage_trie_iter.peek().is_some() || - clean_storage_trie_iter.peek().is_some() - { - match (incremental_storage_trie_iter.next(), clean_storage_trie_iter.next()) { - (Some(incremental), Some(clean)) => { - if incremental != clean && - clean.1.nibbles.len() > self.skip_node_depth.unwrap_or_default() - { - first_mismatched_storage = Some((incremental, clean)); - break - } - } - (Some(incremental), None) => { - warn!(target: "reth::cli", next = ?incremental, "Incremental storage trie has more entries"); - } - (None, Some(clean)) => { - warn!(target: "reth::cli", next = ?clean, "Clean storage trie has more entries") - } - (None, None) => { - info!(target: "reth::cli", "Exhausted all storage trie entries.") - } - } - } - - similar_asserts::assert_eq!( - ( - incremental_account_mismatched, - first_mismatched_storage.as_ref().map(|(incremental, _)| incremental) - ), - ( - clean_account_mismatched, - first_mismatched_storage.as_ref().map(|(_, clean)| clean) - ), - "Mismatched trie nodes" - ); - } - - info!(target: "reth::cli", ?block_range, "Successfully validated incremental roots"); - - Ok(()) - } -} - -impl Command { - /// Returns the underlying chain being used to run this command - pub const fn chain_spec(&self) -> Option<&Arc> { - Some(&self.env.chain) - } -} diff --git a/crates/ethereum/cli/src/debug_cmd/mod.rs b/crates/ethereum/cli/src/debug_cmd/mod.rs deleted file mode 100644 index 1a7bd5ed0cc..00000000000 --- a/crates/ethereum/cli/src/debug_cmd/mod.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! `reth debug` command. Collection of various debugging routines. - -use clap::{Parser, Subcommand}; -use reth_chainspec::ChainSpec; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::CliNodeTypes; -use reth_cli_runner::CliContext; -use reth_ethereum_primitives::EthPrimitives; -use reth_node_ethereum::EthEngineTypes; -use std::sync::Arc; - -mod build_block; -mod execution; -mod in_memory_merkle; -mod merkle; - -/// `reth debug` command -#[derive(Debug, Parser)] -pub struct Command { - #[command(subcommand)] - command: Subcommands, -} - -/// `reth debug` subcommands -#[derive(Subcommand, Debug)] -pub enum Subcommands { - /// Debug the roundtrip execution of blocks as well as the generated data. - Execution(execution::Command), - /// Debug the clean & incremental state root calculations. - Merkle(merkle::Command), - /// Debug in-memory state root calculation. - InMemoryMerkle(in_memory_merkle::Command), - /// Debug block building. - BuildBlock(build_block::Command), -} - -impl> Command { - /// Execute `debug` command - pub async fn execute< - N: CliNodeTypes< - Payload = EthEngineTypes, - Primitives = EthPrimitives, - ChainSpec = C::ChainSpec, - >, - >( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - match self.command { - Subcommands::Execution(command) => command.execute::(ctx).await, - Subcommands::Merkle(command) => command.execute::(ctx).await, - Subcommands::InMemoryMerkle(command) => command.execute::(ctx).await, - Subcommands::BuildBlock(command) => command.execute::(ctx).await, - } - } -} - -impl Command { - /// Returns the underlying chain being used to run this command - pub const fn chain_spec(&self) -> Option<&Arc> { - match &self.command { - Subcommands::Execution(command) => command.chain_spec(), - Subcommands::Merkle(command) => command.chain_spec(), - Subcommands::InMemoryMerkle(command) => command.chain_spec(), - Subcommands::BuildBlock(command) => command.chain_spec(), - } - } -} diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index 91164489bdb..ee6fe5698ea 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -1,17 +1,20 @@ //! CLI definition and entrypoint to executable -use crate::{chainspec::EthereumChainSpecParser, debug_cmd}; +use crate::chainspec::EthereumChainSpecParser; +use alloy_consensus::Header; use clap::{Parser, Subcommand}; -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthChainSpec, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::{ - config_cmd, db, download, dump_genesis, import, import_era, init_cmd, init_state, + common::{CliComponentsBuilder, CliNodeTypes}, + config_cmd, db, download, dump_genesis, export_era, import, import_era, init_cmd, init_state, launcher::FnLauncher, node::{self, NoArgs}, - p2p, prune, recover, stage, + p2p, prune, re_execute, recover, stage, }; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; +use reth_node_api::NodePrimitives; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::LogArgs, @@ -55,7 +58,7 @@ impl Cli { } } -impl, Ext: clap::Args + fmt::Debug> Cli { +impl Cli { /// Execute the configured cli command. /// /// This accepts a closure that is used to launch the node via the @@ -102,10 +105,35 @@ impl, Ext: clap::Args + fmt::Debug> Cl where L: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, Fut: Future>, + C: ChainSpecParser, { self.with_runner(CliRunner::try_default_runtime()?, launcher) } + /// Execute the configured cli command with the provided [`CliComponentsBuilder`]. + /// + /// This accepts a closure that is used to launch the node via the + /// [`NodeCommand`](node::NodeCommand). + /// + /// This command will be run on the [default tokio runtime](reth_cli_runner::tokio_runtime). + pub fn run_with_components( + self, + components: impl CliComponentsBuilder, + launcher: impl AsyncFnOnce( + WithLaunchContext, C::ChainSpec>>, + Ext, + ) -> eyre::Result<()>, + ) -> eyre::Result<()> + where + N: CliNodeTypes< + Primitives: NodePrimitives>, + ChainSpec: Hardforks, + >, + C: ChainSpecParser, + { + self.with_runner_and_components(CliRunner::try_default_runtime()?, components, launcher) + } + /// Execute the configured cli command with the provided [`CliRunner`]. /// /// @@ -116,13 +144,7 @@ impl, Ext: clap::Args + fmt::Debug> Cl /// use reth_ethereum_cli::interface::Cli; /// use reth_node_ethereum::EthereumNode; /// - /// let runtime = tokio::runtime::Builder::new_multi_thread() - /// .worker_threads(4) - /// .max_blocking_threads(256) - /// .enable_all() - /// .build() - /// .unwrap(); - /// let runner = CliRunner::from_runtime(runtime); + /// let runner = CliRunner::try_default_runtime().unwrap(); /// /// Cli::parse_args() /// .with_runner(runner, |builder, _| async move { @@ -131,15 +153,45 @@ impl, Ext: clap::Args + fmt::Debug> Cl /// }) /// .unwrap(); /// ``` - pub fn with_runner(mut self, runner: CliRunner, launcher: L) -> eyre::Result<()> + pub fn with_runner(self, runner: CliRunner, launcher: L) -> eyre::Result<()> where L: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, Fut: Future>, + C: ChainSpecParser, + { + let components = |spec: Arc| { + (EthEvmConfig::ethereum(spec.clone()), EthBeaconConsensus::new(spec)) + }; + + self.with_runner_and_components::( + runner, + components, + async move |builder, ext| launcher(builder, ext).await, + ) + } + + /// Execute the configured cli command with the provided [`CliRunner`] and + /// [`CliComponentsBuilder`]. + pub fn with_runner_and_components( + mut self, + runner: CliRunner, + components: impl CliComponentsBuilder, + launcher: impl AsyncFnOnce( + WithLaunchContext, C::ChainSpec>>, + Ext, + ) -> eyre::Result<()>, + ) -> eyre::Result<()> + where + N: CliNodeTypes< + Primitives: NodePrimitives>, + ChainSpec: Hardforks, + >, + C: ChainSpecParser, { // Add network name if available to the logs dir if let Some(chain_spec) = self.command.chain_spec() { self.logs.log_file_directory = - self.logs.log_file_directory.join(chain_spec.chain.to_string()); + self.logs.log_file_directory.join(chain_spec.chain().to_string()); } let _guard = self.init_tracing()?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); @@ -147,45 +199,40 @@ impl, Ext: clap::Args + fmt::Debug> Cl // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); - let components = |spec: Arc| { - (EthEvmConfig::ethereum(spec.clone()), EthBeaconConsensus::new(spec)) - }; match self.command { Commands::Node(command) => runner.run_command_until_exit(|ctx| { command.execute(ctx, FnLauncher::new::(launcher)) }), - Commands::Init(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) - } + Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute::()), Commands::InitState(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::Import(command) => { - runner.run_blocking_until_ctrl_c(command.execute::(components)) + runner.run_blocking_until_ctrl_c(command.execute::(components)) } Commands::ImportEra(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } - Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Db(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + Commands::ExportEra(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) } - Commands::Download(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), + Commands::Download(command) => runner.run_blocking_until_ctrl_c(command.execute::()), + Commands::Stage(command) => { + runner.run_command_until_exit(|ctx| command.execute::(ctx, components)) } - Commands::Stage(command) => runner - .run_command_until_exit(|ctx| command.execute::(ctx, components)), - Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), #[cfg(feature = "dev")] Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), - Commands::Debug(command) => { - runner.run_command_until_exit(|ctx| command.execute::(ctx)) - } Commands::Recover(command) => { - runner.run_command_until_exit(|ctx| command.execute::(ctx)) + runner.run_command_until_exit(|ctx| command.execute::(ctx)) + } + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + Commands::ReExecute(command) => { + runner.run_until_ctrl_c(command.execute::(components)) } - Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), } } @@ -218,6 +265,9 @@ pub enum Commands { /// This syncs ERA encoded blocks from a directory. #[command(name = "import-era")] ImportEra(import_era::ImportEraCommand), + /// Exports block to era1 files in a specified directory. + #[command(name = "export-era")] + ExportEra(export_era::ExportEraCommand), /// Dumps genesis block JSON configuration to stdout. DumpGenesis(dump_genesis::DumpGenesisCommand), /// Database debugging utilities @@ -239,15 +289,15 @@ pub enum Commands { /// Write config to stdout #[command(name = "config")] Config(config_cmd::Command), - /// Various debug routines - #[command(name = "debug")] - Debug(Box>), /// Scripts for node recovery #[command(name = "recover")] Recover(recover::Command), /// Prune according to the configuration without any limits #[command(name = "prune")] Prune(prune::PruneCommand), + /// Re-execute blocks in parallel to verify historical sync correctness. + #[command(name = "re-execute")] + ReExecute(re_execute::Command), } impl Commands { @@ -258,6 +308,7 @@ impl Commands { Self::Init(cmd) => cmd.chain_spec(), Self::InitState(cmd) => cmd.chain_spec(), Self::Import(cmd) => cmd.chain_spec(), + Self::ExportEra(cmd) => cmd.chain_spec(), Self::ImportEra(cmd) => cmd.chain_spec(), Self::DumpGenesis(cmd) => cmd.chain_spec(), Self::Db(cmd) => cmd.chain_spec(), @@ -267,9 +318,9 @@ impl Commands { #[cfg(feature = "dev")] Self::TestVectors(_) => None, Self::Config(_) => None, - Self::Debug(cmd) => cmd.chain_spec(), Self::Recover(cmd) => cmd.chain_spec(), Self::Prune(cmd) => cmd.chain_spec(), + Self::ReExecute(cmd) => cmd.chain_spec(), } } } diff --git a/crates/ethereum/cli/src/lib.rs b/crates/ethereum/cli/src/lib.rs index a9d0e355bac..067d49d1682 100644 --- a/crates/ethereum/cli/src/lib.rs +++ b/crates/ethereum/cli/src/lib.rs @@ -10,7 +10,6 @@ /// Chain specification parser. pub mod chainspec; -pub mod debug_cmd; pub mod interface; pub use interface::Cli; diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index 186403d865b..01b0068db10 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,7 +1,7 @@ use alloc::vec::Vec; use alloy_consensus::{proofs::calculate_receipt_root, BlockHeader, TxReceipt}; -use alloy_eips::eip7685::Requests; -use alloy_primitives::{Bloom, B256}; +use alloy_eips::{eip7685::Requests, Encodable2718}; +use alloy_primitives::{Bloom, Bytes, B256}; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_primitives_traits::{ @@ -41,6 +41,10 @@ where if let Err(error) = verify_receipts(block.header().receipts_root(), block.header().logs_bloom(), receipts) { + let receipts = receipts + .iter() + .map(|r| Bytes::from(r.with_bloom_ref().encoded_2718())) + .collect::>(); tracing::debug!(%error, ?receipts, "receipts verification failed"); return Err(error) } @@ -118,7 +122,7 @@ mod tests { #[test] fn test_verify_receipts_success() { // Create a vector of 5 default Receipt instances - let receipts = vec![Receipt::default(); 5]; + let receipts: Vec = vec![Receipt::default(); 5]; // Compare against expected values assert!(verify_receipts( @@ -136,7 +140,7 @@ mod tests { let expected_logs_bloom = Bloom::random(); // Create a vector of 5 random Receipt instances - let receipts = vec![Receipt::default(); 5]; + let receipts: Vec = vec![Receipt::default(); 5]; assert!(verify_receipts(expected_receipts_root, expected_logs_bloom, &receipts).is_err()); } diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index b0f75388ec2..744bcdc5368 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -33,7 +33,6 @@ derive_more = { workspace = true, optional = true } [dev-dependencies] reth-testing-utils.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } -reth-execution-types.workspace = true secp256k1.workspace = true alloy-genesis.workspace = true diff --git a/crates/ethereum/evm/src/config.rs b/crates/ethereum/evm/src/config.rs index 676b790edb7..08f42540d08 100644 --- a/crates/ethereum/evm/src/config.rs +++ b/crates/ethereum/evm/src/config.rs @@ -1,14 +1,15 @@ -use alloy_consensus::Header; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_ethereum_forks::{EthereumHardfork, Hardforks}; +use reth_primitives_traits::BlockHeader; use revm::primitives::hardfork::SpecId; /// Map the latest active hardfork at the given header to a revm [`SpecId`]. -pub fn revm_spec(chain_spec: &C, header: &Header) -> SpecId +pub fn revm_spec(chain_spec: &C, header: &H) -> SpecId where C: EthereumHardforks + EthChainSpec + Hardforks, + H: BlockHeader, { - revm_spec_by_timestamp_and_block_number(chain_spec, header.timestamp, header.number) + revm_spec_by_timestamp_and_block_number(chain_spec, header.timestamp(), header.number()) } /// Map the latest active hardfork at the given timestamp or block number to a revm [`SpecId`]. @@ -99,6 +100,7 @@ where mod tests { use super::*; use crate::U256; + use alloy_consensus::Header; use reth_chainspec::{ChainSpecBuilder, MAINNET}; #[test] @@ -129,74 +131,74 @@ mod tests { #[test] fn test_to_revm_spec() { assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().cancun_activated().build(), &Default::default()), + revm_spec(&ChainSpecBuilder::mainnet().cancun_activated().build(), &Header::default()), SpecId::CANCUN ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().shanghai_activated().build(), - &Default::default() + &Header::default() ), SpecId::SHANGHAI ); assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().paris_activated().build(), &Default::default()), + revm_spec(&ChainSpecBuilder::mainnet().paris_activated().build(), &Header::default()), SpecId::MERGE ); assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().london_activated().build(), &Default::default()), + revm_spec(&ChainSpecBuilder::mainnet().london_activated().build(), &Header::default()), SpecId::LONDON ); assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().berlin_activated().build(), &Default::default()), + revm_spec(&ChainSpecBuilder::mainnet().berlin_activated().build(), &Header::default()), SpecId::BERLIN ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().istanbul_activated().build(), - &Default::default() + &Header::default() ), SpecId::ISTANBUL ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().petersburg_activated().build(), - &Default::default() + &Header::default() ), SpecId::PETERSBURG ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().byzantium_activated().build(), - &Default::default() + &Header::default() ), SpecId::BYZANTIUM ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().spurious_dragon_activated().build(), - &Default::default() + &Header::default() ), SpecId::SPURIOUS_DRAGON ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().tangerine_whistle_activated().build(), - &Default::default() + &Header::default() ), SpecId::TANGERINE ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().homestead_activated().build(), - &Default::default() + &Header::default() ), SpecId::HOMESTEAD ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().frontier_activated().build(), - &Default::default() + &Header::default() ), SpecId::FRONTIER ); diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index d3266bbb21b..f709fd62837 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -16,7 +16,8 @@ reth-ethereum-engine-primitives.workspace = true reth-ethereum-payload-builder.workspace = true reth-ethereum-consensus.workspace = true reth-ethereum-primitives.workspace = true -reth-primitives-traits.workspace = true +## ensure secp256k1 recovery with rayon support is activated +reth-primitives-traits = { workspace = true, features = ["secp256k1", "rayon"] } reth-node-builder.workspace = true reth-tracing.workspace = true reth-provider.workspace = true @@ -24,9 +25,9 @@ reth-transaction-pool.workspace = true reth-network.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true -reth-consensus.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true +reth-rpc-eth-api.workspace = true reth-rpc-builder.workspace = true reth-rpc-server-types.workspace = true reth-node-api.workspace = true @@ -34,39 +35,37 @@ reth-chainspec.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-trie-db.workspace = true reth-rpc-eth-types.workspace = true +reth-engine-local.workspace = true reth-engine-primitives.workspace = true reth-payload-primitives.workspace = true # ethereum alloy-eips.workspace = true +alloy-network.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types-engine.workspace = true + # revm with required ethereum features +# Note: this must be kept to ensure all features are properly enabled/forwarded revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } # misc eyre.workspace = true [dev-dependencies] -reth-chainspec.workspace = true reth-db.workspace = true reth-exex.workspace = true reth-node-core.workspace = true -reth-payload-primitives.workspace = true reth-e2e-test-utils.workspace = true -reth-rpc-eth-api.workspace = true reth-tasks.workspace = true alloy-primitives.workspace = true alloy-provider.workspace = true alloy-genesis.workspace = true alloy-signer.workspace = true -alloy-eips.workspace = true alloy-sol-types.workspace = true alloy-contract.workspace = true alloy-rpc-types-beacon = { workspace = true, features = ["ssz"] } -alloy-rpc-types-engine.workspace = true -alloy-rpc-types-eth.workspace = true alloy-consensus.workspace = true futures.workspace = true @@ -76,11 +75,15 @@ rand.workspace = true [features] default = [] +asm-keccak = [ + "alloy-primitives/asm-keccak", + "reth-node-core/asm-keccak", + "revm/asm-keccak", +] js-tracer = ["reth-node-builder/js-tracer"] test-utils = [ "reth-node-builder/test-utils", "reth-chainspec/test-utils", - "reth-consensus/test-utils", "reth-network/test-utils", "reth-ethereum-primitives/test-utils", "reth-revm/test-utils", diff --git a/crates/ethereum/node/src/engine.rs b/crates/ethereum/node/src/engine.rs index 14e1f4eff2a..34cda0e9d60 100644 --- a/crates/ethereum/node/src/engine.rs +++ b/crates/ethereum/node/src/engine.rs @@ -36,12 +36,12 @@ impl EthereumEngineValidator { } } -impl PayloadValidator for EthereumEngineValidator +impl PayloadValidator for EthereumEngineValidator where ChainSpec: EthChainSpec + EthereumHardforks + 'static, + Types: PayloadTypes, { type Block = Block; - type ExecutionData = ExecutionData; fn ensure_well_formed_payload( &self, @@ -60,7 +60,7 @@ where fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, EthPayloadAttributes>, + payload_or_attrs: PayloadOrAttributes<'_, Types::ExecutionData, EthPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { payload_or_attrs .execution_requests() @@ -78,7 +78,7 @@ where validate_version_specific_fields( self.chain_spec(), version, - PayloadOrAttributes::::PayloadAttributes( + PayloadOrAttributes::::PayloadAttributes( attributes, ), ) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 02ebacdb7d7..36dec1a2192 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -3,9 +3,10 @@ pub use crate::{payload::EthereumPayloadBuilder, EthereumEngineValidator}; use crate::{EthEngineTypes, EthEvmConfig}; use alloy_eips::{eip7840::BlobParams, merge::EPOCH_SLOTS}; +use alloy_network::Ethereum; use alloy_rpc_types_engine::ExecutionData; use reth_chainspec::{ChainSpec, EthChainSpec, EthereumHardforks, Hardforks}; -use reth_consensus::{ConsensusError, FullConsensus}; +use reth_engine_local::LocalPayloadAttributesBuilder; use reth_engine_primitives::EngineTypes; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::{ @@ -14,10 +15,12 @@ use reth_ethereum_engine_primitives::{ use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; use reth_evm::{ eth::spec::EthExecutorSpec, ConfigureEvm, EvmFactory, EvmFactoryFor, NextBlockEnvAttributes, + TxEnvFor, }; use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, FullNodeComponents, NodeAddOns, NodePrimitives, PrimitivesTy, TxTy, + AddOnsContext, FullNodeComponents, HeaderTy, NodeAddOns, NodePrimitives, + PayloadAttributesBuilder, PrimitivesTy, TxTy, }; use reth_node_builder::{ components::{ @@ -27,15 +30,20 @@ use reth_node_builder::{ node::{FullNodeTypes, NodeTypes}, rpc::{ BasicEngineApiBuilder, EngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, - EthApiBuilder, EthApiCtx, RethRpcAddOns, RpcAddOns, RpcHandle, + EthApiBuilder, EthApiCtx, Identity, RethRpcAddOns, RpcAddOns, RpcHandle, }, - BuilderContext, DebugNode, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, - PayloadTypes, + BuilderContext, DebugNode, Node, NodeAdapter, PayloadBuilderConfig, PayloadTypes, }; use reth_provider::{providers::ProviderFactoryBuilder, EthStorage}; -use reth_rpc::{eth::core::EthApiFor, ValidationApi}; -use reth_rpc_api::{eth::FullEthApiServer, servers::BlockSubmissionValidationApiServer}; -use reth_rpc_builder::config::RethRpcServerConfig; +use reth_rpc::{ + eth::core::{EthApiFor, EthRpcConverterFor}, + ValidationApi, +}; +use reth_rpc_api::servers::BlockSubmissionValidationApiServer; +use reth_rpc_builder::{config::RethRpcServerConfig, middleware::RethRpcMiddleware}; +use reth_rpc_eth_api::{ + helpers::pending_block::BuildPendingEnv, RpcConvert, RpcTypes, SignableTxRequest, +}; use reth_rpc_eth_types::{error::FromEvmError, EthApiError}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -45,7 +53,7 @@ use reth_transaction_pool::{ }; use reth_trie_db::MerklePatriciaTrie; use revm::context::TxEnv; -use std::{default::Default, sync::Arc, time::SystemTime}; +use std::{default::Default, marker::PhantomData, sync::Arc, time::SystemTime}; /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] @@ -129,33 +137,34 @@ impl NodeTypes for EthereumNode { } /// Builds [`EthApi`](reth_rpc::EthApi) for Ethereum. -#[derive(Debug, Default)] -pub struct EthereumEthApiBuilder; +#[derive(Debug)] +pub struct EthereumEthApiBuilder(PhantomData); + +impl Default for EthereumEthApiBuilder { + fn default() -> Self { + Self(Default::default()) + } +} -impl EthApiBuilder for EthereumEthApiBuilder +impl EthApiBuilder for EthereumEthApiBuilder where - N: FullNodeComponents, - EthApiFor: FullEthApiServer, + N: FullNodeComponents< + Types: NodeTypes, + Evm: ConfigureEvm>>, + >, + NetworkT: RpcTypes>>, + EthRpcConverterFor: RpcConvert< + Primitives = PrimitivesTy, + TxEnv = TxEnvFor, + Error = EthApiError, + Network = NetworkT, + >, + EthApiError: FromEvmError, { - type EthApi = EthApiFor; + type EthApi = EthApiFor; async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { - let api = reth_rpc::EthApiBuilder::new( - ctx.components.provider().clone(), - ctx.components.pool().clone(), - ctx.components.network().clone(), - ctx.components.evm_config().clone(), - ) - .eth_cache(ctx.cache) - .task_spawner(ctx.components.task_executor().clone()) - .gas_cap(ctx.config.rpc_gas_cap.into()) - .max_simulate_blocks(ctx.config.rpc_max_simulate_blocks) - .eth_proof_window(ctx.config.eth_proof_window) - .fee_history_cache_config(ctx.config.fee_history_cache) - .proof_permits(ctx.config.proof_permits) - .gas_oracle_config(ctx.config.gas_oracle) - .build(); - Ok(api) + Ok(ctx.eth_api_builder().map_converter(|r| r.with_network()).build()) } } @@ -166,8 +175,9 @@ pub struct EthereumAddOns< EthB: EthApiBuilder, EV, EB = BasicEngineApiBuilder, + RpcMiddleware = Identity, > { - inner: RpcAddOns, + inner: RpcAddOns, } impl Default for EthereumAddOns @@ -178,7 +188,7 @@ where fn default() -> Self { Self { inner: RpcAddOns::new( - EthereumEthApiBuilder, + EthereumEthApiBuilder::default(), EthereumEngineValidatorBuilder::default(), BasicEngineApiBuilder::default(), Default::default(), @@ -187,13 +197,16 @@ where } } -impl EthereumAddOns +impl EthereumAddOns where N: FullNodeComponents, EthB: EthApiBuilder, { /// Replace the engine API builder. - pub fn with_engine_api(self, engine_api_builder: T) -> EthereumAddOns + pub fn with_engine_api( + self, + engine_api_builder: T, + ) -> EthereumAddOns where T: Send, { @@ -205,16 +218,26 @@ where pub fn with_engine_validator( self, engine_validator_builder: T, - ) -> EthereumAddOns + ) -> EthereumAddOns where T: Send, { let Self { inner } = self; EthereumAddOns { inner: inner.with_engine_validator(engine_validator_builder) } } + + /// Sets rpc middleware + pub fn with_rpc_middleware(self, rpc_middleware: T) -> EthereumAddOns + where + T: Send, + { + let Self { inner } = self; + EthereumAddOns { inner: inner.with_rpc_middleware(rpc_middleware) } + } } -impl NodeAddOns for EthereumAddOns +impl NodeAddOns + for EthereumAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -229,6 +252,7 @@ where EB: EngineApiBuilder, EthApiError: FromEvmError, EvmFactoryFor: EvmFactory, + RpcMiddleware: RethRpcMiddleware, { type Handle = RpcHandle; @@ -236,7 +260,7 @@ where self, ctx: reth_node_api::AddOnsContext<'_, N>, ) -> eyre::Result { - let validation_api = ValidationApi::new( + let validation_api = ValidationApi::<_, _, ::Payload>::new( ctx.node.provider().clone(), Arc::new(ctx.node.consensus().clone()), ctx.node.evm_config().clone(), @@ -317,11 +341,8 @@ where EthereumConsensusBuilder, >; - type AddOns = EthereumAddOns< - NodeAdapter>::Components>, - EthereumEthApiBuilder, - EthereumEngineValidatorBuilder, - >; + type AddOns = + EthereumAddOns, EthereumEthApiBuilder, EthereumEngineValidatorBuilder>; fn components_builder(&self) -> Self::ComponentsBuilder { Self::components() @@ -338,6 +359,12 @@ impl> DebugNode for EthereumNode { fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_ethereum_primitives::Block { rpc_block.into_consensus().convert_transactions() } + + fn local_payload_attributes_builder( + chain_spec: &Self::ChainSpec, + ) -> impl PayloadAttributesBuilder<::PayloadAttributes> { + LocalPayloadAttributesBuilder::new(Arc::new(chain_spec.clone())) + } } /// A regular ethereum evm and executor builder. @@ -411,9 +438,22 @@ where .kzg_settings(ctx.kzg_settings()?) .with_local_transactions_config(pool_config.local_transactions_config.clone()) .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) + .with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit) + .with_minimum_priority_fee(ctx.config().txpool.minimum_priority_fee) .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()); + if validator.validator().eip4844() { + // initializing the KZG settings can be expensive, this should be done upfront so that + // it doesn't impact the first block or the first gossiped blob transaction, so we + // initialize this in the background + let kzg_settings = validator.validator().kzg_settings().clone(); + ctx.task_executor().spawn_blocking(async move { + let _ = kzg_settings.get(); + debug!(target: "reth::cli", "Initialized KZG settings"); + }); + } + let transaction_pool = TxPoolBuilder::new(ctx) .with_validator(validator) .build_and_spawn_maintenance_task(blob_store, pool_config)?; @@ -465,7 +505,7 @@ where Types: NodeTypes, >, { - type Consensus = Arc>; + type Consensus = Arc::ChainSpec>>; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) @@ -475,23 +515,19 @@ where /// Builder for [`EthereumEngineValidator`]. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct EthereumEngineValidatorBuilder { - _phantom: std::marker::PhantomData, -} +pub struct EthereumEngineValidatorBuilder; -impl EngineValidatorBuilder - for EthereumEngineValidatorBuilder +impl EngineValidatorBuilder for EthereumEngineValidatorBuilder where Types: NodeTypes< - ChainSpec = ChainSpec, + ChainSpec: EthereumHardforks + Clone + 'static, Payload: EngineTypes + PayloadTypes, Primitives = EthPrimitives, >, Node: FullNodeComponents, - ChainSpec: EthChainSpec + EthereumHardforks + Clone + 'static, { - type Validator = EthereumEngineValidator; + type Validator = EthereumEngineValidator; async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { Ok(EthereumEngineValidator::new(ctx.config.chain.clone())) diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index e54e84f6ac6..3b14c240102 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -6,7 +6,7 @@ use reth_chainspec::ChainSpec; use reth_node_api::{BlockBody, FullNodeComponents, FullNodePrimitives, NodeAddOns, NodeTypes}; use reth_node_builder::{ rpc::{RethRpcAddOns, RpcHandleProvider}, - EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, + DebugNodeLauncher, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, }; use reth_node_core::args::DevArgs; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; @@ -30,11 +30,12 @@ async fn can_run_dev_node() -> eyre::Result<()> { .with_components(EthereumNode::components()) .with_add_ons(EthereumAddOns::default()) .launch_with_fn(|builder| { - let launcher = EngineNodeLauncher::new( + let engine_launcher = EngineNodeLauncher::new( builder.task_executor().clone(), builder.config().datadir(), Default::default(), ); + let launcher = DebugNodeLauncher::new(engine_launcher); builder.launch_with(launcher) }) .await?; diff --git a/crates/ethereum/node/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs index 91dfd683efe..4e619f5f3d0 100644 --- a/crates/ethereum/node/tests/it/builder.rs +++ b/crates/ethereum/node/tests/it/builder.rs @@ -10,6 +10,7 @@ use reth_node_api::NodeTypesWithDBAdapter; use reth_node_builder::{EngineNodeLauncher, FullNodeComponents, NodeBuilder, NodeConfig}; use reth_node_ethereum::node::{EthereumAddOns, EthereumNode}; use reth_provider::providers::BlockchainProvider; +use reth_rpc_builder::Identity; use reth_tasks::TaskManager; #[test] @@ -33,6 +34,7 @@ fn test_basic_setup() { let _client = handles.rpc.http_client(); Ok(()) }) + .map_add_ons(|addons| addons.with_rpc_middleware(Identity::default())) .extend_rpc_modules(|ctx| { let _ = ctx.config(); let _ = ctx.node().provider(); diff --git a/crates/ethereum/payload/src/validator.rs b/crates/ethereum/payload/src/validator.rs index 75f4b1f474c..ccace26ef80 100644 --- a/crates/ethereum/payload/src/validator.rs +++ b/crates/ethereum/payload/src/validator.rs @@ -28,83 +28,80 @@ impl EthereumExecutionPayloadValidator { } impl EthereumExecutionPayloadValidator { - /// Returns true if the Cancun hardfork is active at the given timestamp. - #[inline] - fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { - self.chain_spec().is_cancun_active_at_timestamp(timestamp) - } - - /// Returns true if the Shanghai hardfork is active at the given timestamp. - #[inline] - fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { - self.chain_spec().is_shanghai_active_at_timestamp(timestamp) - } - - /// Returns true if the Prague hardfork is active at the given timestamp. - #[inline] - fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { - self.chain_spec().is_prague_active_at_timestamp(timestamp) - } - /// Ensures that the given payload does not violate any consensus rules that concern the block's - /// layout, like: - /// - missing or invalid base fee - /// - invalid extra data - /// - invalid transactions - /// - incorrect hash - /// - the versioned hashes passed with the payload do not exactly match transaction versioned - /// hashes - /// - the block does not contain blob transactions if it is pre-cancun + /// layout, /// - /// The checks are done in the order that conforms with the engine-API specification. - /// - /// This is intended to be invoked after receiving the payload from the CLI. - /// The additional [`MaybeCancunPayloadFields`](alloy_rpc_types_engine::MaybeCancunPayloadFields) are not part of the payload, but are additional fields in the `engine_newPayloadV3` RPC call, See also - /// - /// If the cancun fields are provided this also validates that the versioned hashes in the block - /// match the versioned hashes passed in the - /// [`CancunPayloadFields`](alloy_rpc_types_engine::CancunPayloadFields), if the cancun payload - /// fields are provided. If the payload fields are not provided, but versioned hashes exist - /// in the block, this is considered an error: [`PayloadError::InvalidVersionedHashes`]. - /// - /// This validates versioned hashes according to the Engine API Cancun spec: - /// + /// See also [`ensure_well_formed_payload`] pub fn ensure_well_formed_payload( &self, payload: ExecutionData, ) -> Result>, PayloadError> { - let ExecutionData { payload, sidecar } = payload; + ensure_well_formed_payload(&self.chain_spec, payload) + } +} + +/// Ensures that the given payload does not violate any consensus rules that concern the block's +/// layout, like: +/// - missing or invalid base fee +/// - invalid extra data +/// - invalid transactions +/// - incorrect hash +/// - the versioned hashes passed with the payload do not exactly match transaction versioned +/// hashes +/// - the block does not contain blob transactions if it is pre-cancun +/// +/// The checks are done in the order that conforms with the engine-API specification. +/// +/// This is intended to be invoked after receiving the payload from the CLI. +/// The additional [`MaybeCancunPayloadFields`](alloy_rpc_types_engine::MaybeCancunPayloadFields) are not part of the payload, but are additional fields in the `engine_newPayloadV3` RPC call, See also +/// +/// If the cancun fields are provided this also validates that the versioned hashes in the block +/// match the versioned hashes passed in the +/// [`CancunPayloadFields`](alloy_rpc_types_engine::CancunPayloadFields), if the cancun payload +/// fields are provided. If the payload fields are not provided, but versioned hashes exist +/// in the block, this is considered an error: [`PayloadError::InvalidVersionedHashes`]. +/// +/// This validates versioned hashes according to the Engine API Cancun spec: +/// +pub fn ensure_well_formed_payload( + chain_spec: ChainSpec, + payload: ExecutionData, +) -> Result>, PayloadError> +where + ChainSpec: EthereumHardforks, + T: SignedTransaction, +{ + let ExecutionData { payload, sidecar } = payload; - let expected_hash = payload.block_hash(); + let expected_hash = payload.block_hash(); - // First parse the block - let sealed_block = payload.try_into_block_with_sidecar(&sidecar)?.seal_slow(); + // First parse the block + let sealed_block = payload.try_into_block_with_sidecar(&sidecar)?.seal_slow(); - // Ensure the hash included in the payload matches the block hash - if expected_hash != sealed_block.hash() { - return Err(PayloadError::BlockHash { - execution: sealed_block.hash(), - consensus: expected_hash, - }) - } + // Ensure the hash included in the payload matches the block hash + if expected_hash != sealed_block.hash() { + return Err(PayloadError::BlockHash { + execution: sealed_block.hash(), + consensus: expected_hash, + }) + } - shanghai::ensure_well_formed_fields( - sealed_block.body(), - self.is_shanghai_active_at_timestamp(sealed_block.timestamp), - )?; + shanghai::ensure_well_formed_fields( + sealed_block.body(), + chain_spec.is_shanghai_active_at_timestamp(sealed_block.timestamp), + )?; - cancun::ensure_well_formed_fields( - &sealed_block, - sidecar.cancun(), - self.is_cancun_active_at_timestamp(sealed_block.timestamp), - )?; + cancun::ensure_well_formed_fields( + &sealed_block, + sidecar.cancun(), + chain_spec.is_cancun_active_at_timestamp(sealed_block.timestamp), + )?; - prague::ensure_well_formed_fields( - sealed_block.body(), - sidecar.prague(), - self.is_prague_active_at_timestamp(sealed_block.timestamp), - )?; + prague::ensure_well_formed_fields( + sealed_block.body(), + sidecar.prague(), + chain_spec.is_prague_active_at_timestamp(sealed_block.timestamp), + )?; - Ok(sealed_block) - } + Ok(sealed_block) } diff --git a/crates/ethereum/primitives/Cargo.toml b/crates/ethereum/primitives/Cargo.toml index 5a5f0ee101c..b99f2d34e58 100644 --- a/crates/ethereum/primitives/Cargo.toml +++ b/crates/ethereum/primitives/Cargo.toml @@ -40,7 +40,6 @@ rand.workspace = true reth-codecs = { workspace = true, features = ["test-utils"] } reth-zstd-compressors.workspace = true secp256k1 = { workspace = true, features = ["rand"] } -test-fuzz.workspace = true alloy-consensus = { workspace = true, features = ["serde", "arbitrary"] } [features] diff --git a/crates/ethereum/primitives/src/receipt.rs b/crates/ethereum/primitives/src/receipt.rs index ffc06c7fc82..6c81f8bd69d 100644 --- a/crates/ethereum/primitives/src/receipt.rs +++ b/crates/ethereum/primitives/src/receipt.rs @@ -1,30 +1,55 @@ +use core::fmt::Debug; + use alloc::vec::Vec; use alloy_consensus::{ Eip2718EncodableReceipt, Eip658Value, ReceiptWithBloom, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, TxType, Typed2718, }; use alloy_eips::{ - eip2718::{Eip2718Result, Encodable2718, IsTyped2718}, + eip2718::{Eip2718Error, Eip2718Result, Encodable2718, IsTyped2718}, Decodable2718, }; use alloy_primitives::{Bloom, Log, B256}; use alloy_rlp::{BufMut, Decodable, Encodable, Header}; use reth_primitives_traits::{proofs::ordered_trie_root_with_encoder, InMemorySize}; +/// Helper trait alias with requirements for transaction type generic to be used within [`Receipt`]. +pub trait TxTy: + Debug + + Copy + + Eq + + Send + + Sync + + InMemorySize + + Typed2718 + + TryFrom + + Decodable + + 'static +{ +} +impl TxTy for T where + T: Debug + + Copy + + Eq + + Send + + Sync + + InMemorySize + + Typed2718 + + TryFrom + + Decodable + + 'static +{ +} + /// Typed ethereum transaction receipt. /// Receipt containing result of transaction execution. #[derive(Clone, Debug, PartialEq, Eq, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[cfg_attr(feature = "reth-codec", derive(reth_codecs::CompactZstd))] #[cfg_attr(feature = "reth-codec", reth_codecs::add_arbitrary_tests(compact, rlp))] -#[cfg_attr(feature = "reth-codec", reth_zstd( - compressor = reth_zstd_compressors::RECEIPT_COMPRESSOR, - decompressor = reth_zstd_compressors::RECEIPT_DECOMPRESSOR -))] -pub struct Receipt { +pub struct Receipt { /// Receipt type. - pub tx_type: TxType, + pub tx_type: T, /// If transaction is executed successfully. /// /// This is the `statusCode` @@ -35,7 +60,7 @@ pub struct Receipt { pub logs: Vec, } -impl Receipt { +impl Receipt { /// Returns length of RLP-encoded receipt fields with the given [`Bloom`] without an RLP header. pub fn rlp_encoded_fields_length(&self, bloom: &Bloom) -> usize { self.success.length() + @@ -61,7 +86,7 @@ impl Receipt { /// network header. pub fn rlp_decode_inner( buf: &mut &[u8], - tx_type: TxType, + tx_type: T, ) -> alloy_rlp::Result> { let header = Header::decode(buf)?; if !header.list { @@ -112,10 +137,7 @@ impl Receipt { /// RLP-decodes the receipt from the provided buffer. This does not expect a type byte or /// network header. - pub fn rlp_decode_inner_without_bloom( - buf: &mut &[u8], - tx_type: TxType, - ) -> alloy_rlp::Result { + pub fn rlp_decode_inner_without_bloom(buf: &mut &[u8], tx_type: T) -> alloy_rlp::Result { let header = Header::decode(buf)?; if !header.list { return Err(alloy_rlp::Error::UnexpectedString); @@ -134,21 +156,21 @@ impl Receipt { } } -impl Eip2718EncodableReceipt for Receipt { +impl Eip2718EncodableReceipt for Receipt { fn eip2718_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { !self.tx_type.is_legacy() as usize + self.rlp_header_inner(bloom).length_with_payload() } fn eip2718_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { if !self.tx_type.is_legacy() { - out.put_u8(self.tx_type as u8); + out.put_u8(self.tx_type.ty()); } self.rlp_header_inner(bloom).encode(out); self.rlp_encode_fields(bloom, out); } } -impl RlpEncodableReceipt for Receipt { +impl RlpEncodableReceipt for Receipt { fn rlp_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { let mut len = self.eip2718_encoded_length_with_bloom(bloom); if !self.tx_type.is_legacy() { @@ -171,21 +193,21 @@ impl RlpEncodableReceipt for Receipt { } } -impl RlpDecodableReceipt for Receipt { +impl RlpDecodableReceipt for Receipt { fn rlp_decode_with_bloom(buf: &mut &[u8]) -> alloy_rlp::Result> { let header_buf = &mut &**buf; let header = Header::decode(header_buf)?; // Legacy receipt, reuse initial buffer without advancing if header.list { - return Self::rlp_decode_inner(buf, TxType::Legacy) + return Self::rlp_decode_inner(buf, T::try_from(0)?) } // Otherwise, advance the buffer and try decoding type flag followed by receipt *buf = *header_buf; let remaining = buf.len(); - let tx_type = TxType::decode(buf)?; + let tx_type = T::decode(buf)?; let this = Self::rlp_decode_inner(buf, tx_type)?; if buf.len() + header.payload_length != remaining { @@ -196,7 +218,7 @@ impl RlpDecodableReceipt for Receipt { } } -impl Encodable2718 for Receipt { +impl Encodable2718 for Receipt { fn encode_2718_len(&self) -> usize { (!self.tx_type.is_legacy() as usize) + self.rlp_header_inner_without_bloom().length_with_payload() @@ -205,24 +227,24 @@ impl Encodable2718 for Receipt { // encode the header fn encode_2718(&self, out: &mut dyn BufMut) { if !self.tx_type.is_legacy() { - out.put_u8(self.tx_type as u8); + out.put_u8(self.tx_type.ty()); } self.rlp_header_inner_without_bloom().encode(out); self.rlp_encode_fields_without_bloom(out); } } -impl Decodable2718 for Receipt { +impl Decodable2718 for Receipt { fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { - Ok(Self::rlp_decode_inner_without_bloom(buf, TxType::try_from(ty)?)?) + Ok(Self::rlp_decode_inner_without_bloom(buf, T::try_from(ty)?)?) } fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { - Ok(Self::rlp_decode_inner_without_bloom(buf, TxType::Legacy)?) + Ok(Self::rlp_decode_inner_without_bloom(buf, T::try_from(0)?)?) } } -impl Encodable for Receipt { +impl Encodable for Receipt { fn encode(&self, out: &mut dyn BufMut) { self.network_encode(out); } @@ -232,13 +254,13 @@ impl Encodable for Receipt { } } -impl Decodable for Receipt { +impl Decodable for Receipt { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { Ok(Self::network_decode(buf)?) } } -impl TxReceipt for Receipt { +impl TxReceipt for Receipt { type Log = Log; fn status_or_post_state(&self) -> Eip658Value { @@ -260,21 +282,25 @@ impl TxReceipt for Receipt { fn logs(&self) -> &[Log] { &self.logs } + + fn into_logs(self) -> Vec { + self.logs + } } -impl Typed2718 for Receipt { +impl Typed2718 for Receipt { fn ty(&self) -> u8 { - self.tx_type as u8 + self.tx_type.ty() } } -impl IsTyped2718 for Receipt { +impl IsTyped2718 for Receipt { fn is_type(type_id: u8) -> bool { ::is_type(type_id) } } -impl InMemorySize for Receipt { +impl InMemorySize for Receipt { fn size(&self) -> usize { self.tx_type.size() + core::mem::size_of::() + @@ -283,7 +309,7 @@ impl InMemorySize for Receipt { } } -impl From> for Receipt +impl From> for Receipt where T: Into, { @@ -299,8 +325,8 @@ where } } -impl From for alloy_consensus::Receipt { - fn from(value: Receipt) -> Self { +impl From> for alloy_consensus::Receipt { + fn from(value: Receipt) -> Self { Self { status: value.success.into(), cumulative_gas_used: value.cumulative_gas_used, @@ -309,8 +335,8 @@ impl From for alloy_consensus::Receipt { } } -impl From for alloy_consensus::ReceiptEnvelope { - fn from(value: Receipt) -> Self { +impl From> for alloy_consensus::ReceiptEnvelope { + fn from(value: Receipt) -> Self { let tx_type = value.tx_type; let receipt = value.into_with_bloom().map_receipt(Into::into); match tx_type { @@ -327,7 +353,9 @@ impl From for alloy_consensus::ReceiptEnvelope { pub(super) mod serde_bincode_compat { use alloc::{borrow::Cow, vec::Vec}; use alloy_consensus::TxType; + use alloy_eips::eip2718::Eip2718Error; use alloy_primitives::{Log, U8}; + use core::fmt::Debug; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -335,6 +363,7 @@ pub(super) mod serde_bincode_compat { /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust + /// use alloy_consensus::TxType; /// use reth_ethereum_primitives::{serde_bincode_compat, Receipt}; /// use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// use serde_with::serde_as; @@ -343,14 +372,15 @@ pub(super) mod serde_bincode_compat { /// #[derive(Serialize, Deserialize)] /// struct Data { /// #[serde_as(as = "serde_bincode_compat::Receipt<'_>")] - /// receipt: Receipt, + /// receipt: Receipt, /// } /// ``` #[derive(Debug, Serialize, Deserialize)] - pub struct Receipt<'a> { + #[serde(bound(deserialize = "T: TryFrom"))] + pub struct Receipt<'a, T = TxType> { /// Receipt type. #[serde(deserialize_with = "deserde_txtype")] - pub tx_type: TxType, + pub tx_type: T, /// If transaction is executed successfully. /// /// This is the `statusCode` @@ -362,16 +392,16 @@ pub(super) mod serde_bincode_compat { } /// Ensures that txtype is deserialized symmetrically as U8 - fn deserde_txtype<'de, D>(deserializer: D) -> Result + fn deserde_txtype<'de, D, T>(deserializer: D) -> Result where D: Deserializer<'de>, + T: TryFrom, { - let value = U8::deserialize(deserializer)?; - value.to::().try_into().map_err(serde::de::Error::custom) + U8::deserialize(deserializer)?.to::().try_into().map_err(serde::de::Error::custom) } - impl<'a> From<&'a super::Receipt> for Receipt<'a> { - fn from(value: &'a super::Receipt) -> Self { + impl<'a, T: Copy> From<&'a super::Receipt> for Receipt<'a, T> { + fn from(value: &'a super::Receipt) -> Self { Self { tx_type: value.tx_type, success: value.success, @@ -381,8 +411,8 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> From> for super::Receipt { - fn from(value: Receipt<'a>) -> Self { + impl<'a, T> From> for super::Receipt { + fn from(value: Receipt<'a, T>) -> Self { Self { tx_type: value.tx_type, success: value.success, @@ -392,8 +422,8 @@ pub(super) mod serde_bincode_compat { } } - impl SerializeAs for Receipt<'_> { - fn serialize_as(source: &super::Receipt, serializer: S) -> Result + impl SerializeAs> for Receipt<'_, T> { + fn serialize_as(source: &super::Receipt, serializer: S) -> Result where S: Serializer, { @@ -401,17 +431,22 @@ pub(super) mod serde_bincode_compat { } } - impl<'de> DeserializeAs<'de, super::Receipt> for Receipt<'de> { - fn deserialize_as(deserializer: D) -> Result + impl<'de, T: TryFrom> DeserializeAs<'de, super::Receipt> + for Receipt<'de, T> + { + fn deserialize_as(deserializer: D) -> Result, D::Error> where D: Deserializer<'de>, { - Receipt::<'_>::deserialize(deserializer).map(Into::into) + Receipt::<'_, T>::deserialize(deserializer).map(Into::into) } } - impl reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat for super::Receipt { - type BincodeRepr<'a> = Receipt<'a>; + impl reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat for super::Receipt + where + T: Copy + Serialize + TryFrom + Debug + 'static, + { + type BincodeRepr<'a> = Receipt<'a, T>; fn as_repr(&self) -> Self::BincodeRepr<'_> { self.into() @@ -425,6 +460,7 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { use crate::{receipt::serde_bincode_compat, Receipt}; + use alloy_consensus::TxType; use arbitrary::Arbitrary; use rand::Rng; use serde_with::serde_as; @@ -435,8 +471,8 @@ pub(super) mod serde_bincode_compat { #[derive(Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] struct Data { - #[serde_as(as = "serde_bincode_compat::Receipt<'_>")] - receipt: Receipt, + #[serde_as(as = "serde_bincode_compat::Receipt<'_, TxType>")] + receipt: Receipt, } let mut bytes = [0u8; 1024]; @@ -451,6 +487,124 @@ pub(super) mod serde_bincode_compat { } } +#[cfg(feature = "reth-codec")] +mod compact { + use super::*; + use reth_codecs::{ + Compact, + __private::{modular_bitfield::prelude::*, Buf}, + }; + + impl Receipt { + #[doc = "Used bytes by [`ReceiptFlags`]"] + pub const fn bitflag_encoded_bytes() -> usize { + 1u8 as usize + } + #[doc = "Unused bits for new fields by [`ReceiptFlags`]"] + pub const fn bitflag_unused_bits() -> usize { + 0u8 as usize + } + } + + #[allow(non_snake_case, unused_parens)] + mod flags { + use super::*; + + #[doc = "Fieldset that facilitates compacting the parent type. Used bytes: 1 | Unused bits: 0"] + #[bitfield] + #[derive(Clone, Copy, Debug, Default)] + pub struct ReceiptFlags { + pub tx_type_len: B2, + pub success_len: B1, + pub cumulative_gas_used_len: B4, + pub __zstd: B1, + } + + impl ReceiptFlags { + #[doc = r" Deserializes this fieldset and returns it, alongside the original slice in an advanced position."] + pub fn from(mut buf: &[u8]) -> (Self, &[u8]) { + (Self::from_bytes([buf.get_u8()]), buf) + } + } + } + + pub use flags::ReceiptFlags; + + impl Compact for Receipt { + fn to_compact(&self, buf: &mut B) -> usize + where + B: reth_codecs::__private::bytes::BufMut + AsMut<[u8]>, + { + let mut flags = ReceiptFlags::default(); + let mut total_length = 0; + let mut buffer = reth_codecs::__private::bytes::BytesMut::new(); + + let tx_type_len = self.tx_type.to_compact(&mut buffer); + flags.set_tx_type_len(tx_type_len as u8); + let success_len = self.success.to_compact(&mut buffer); + flags.set_success_len(success_len as u8); + let cumulative_gas_used_len = self.cumulative_gas_used.to_compact(&mut buffer); + flags.set_cumulative_gas_used_len(cumulative_gas_used_len as u8); + self.logs.to_compact(&mut buffer); + + let zstd = buffer.len() > 7; + if zstd { + flags.set___zstd(1); + } + + let flags = flags.into_bytes(); + total_length += flags.len() + buffer.len(); + buf.put_slice(&flags); + if zstd { + reth_zstd_compressors::RECEIPT_COMPRESSOR.with(|compressor| { + let compressed = + compressor.borrow_mut().compress(&buffer).expect("Failed to compress."); + buf.put(compressed.as_slice()); + }); + } else { + buf.put(buffer); + } + total_length + } + + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { + let (flags, mut buf) = ReceiptFlags::from(buf); + if flags.__zstd() != 0 { + reth_zstd_compressors::RECEIPT_DECOMPRESSOR.with(|decompressor| { + let decompressor = &mut decompressor.borrow_mut(); + let decompressed = decompressor.decompress(buf); + let original_buf = buf; + let mut buf: &[u8] = decompressed; + let (tx_type, new_buf) = T::from_compact(buf, flags.tx_type_len() as usize); + buf = new_buf; + let (success, new_buf) = bool::from_compact(buf, flags.success_len() as usize); + buf = new_buf; + let (cumulative_gas_used, new_buf) = + u64::from_compact(buf, flags.cumulative_gas_used_len() as usize); + buf = new_buf; + let (logs, _) = Vec::from_compact(buf, buf.len()); + (Self { tx_type, success, cumulative_gas_used, logs }, original_buf) + }) + } else { + let (tx_type, new_buf) = T::from_compact(buf, flags.tx_type_len() as usize); + buf = new_buf; + let (success, new_buf) = bool::from_compact(buf, flags.success_len() as usize); + buf = new_buf; + let (cumulative_gas_used, new_buf) = + u64::from_compact(buf, flags.cumulative_gas_used_len() as usize); + buf = new_buf; + let (logs, new_buf) = Vec::from_compact(buf, buf.len()); + buf = new_buf; + let obj = Self { tx_type, success, cumulative_gas_used, logs }; + (obj, buf) + } + } + } +} + +#[cfg(feature = "reth-codec")] +pub use compact::*; + #[cfg(test)] mod tests { use super::*; @@ -472,7 +626,7 @@ mod tests { #[test] fn test_decode_receipt() { - reth_codecs::test_utils::test_decode::(&hex!( + reth_codecs::test_utils::test_decode::>(&hex!( "c428b52ffd23fc42696156b10200f034792b6a94c3850215c2fef7aea361a0c31b79d9a32652eefc0d4e2e730036061cff7344b6fc6132b50cda0ed810a991ae58ef013150c12b2522533cb3b3a8b19b7786a8b5ff1d3cdc84225e22b02def168c8858df" )); } @@ -564,7 +718,7 @@ mod tests { let mut data = vec![]; receipt.to_compact(&mut data); - let (decoded, _) = Receipt::from_compact(&data[..], data.len()); + let (decoded, _) = Receipt::::from_compact(&data[..], data.len()); assert_eq!(decoded, receipt); } diff --git a/crates/ethereum/primitives/src/transaction.rs b/crates/ethereum/primitives/src/transaction.rs index 07191142e71..c6de2521a03 100644 --- a/crates/ethereum/primitives/src/transaction.rs +++ b/crates/ethereum/primitives/src/transaction.rs @@ -650,21 +650,18 @@ impl SignerRecoverable for TransactionSigned { let signature_hash = self.signature_hash(); recover_signer_unchecked(&self.signature, signature_hash) } + + fn recover_unchecked_with_buf(&self, buf: &mut Vec) -> Result { + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); + recover_signer_unchecked(&self.signature, signature_hash) + } } impl SignedTransaction for TransactionSigned { fn tx_hash(&self) -> &TxHash { self.hash.get_or_init(|| self.recalculate_hash()) } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - self.encode_for_signing(buf); - let signature_hash = keccak256(buf); - recover_signer_unchecked(&self.signature, signature_hash) - } } #[cfg(test)] diff --git a/crates/ethereum/reth/Cargo.toml b/crates/ethereum/reth/Cargo.toml index 0522e6f84dc..fef17491b77 100644 --- a/crates/ethereum/reth/Cargo.toml +++ b/crates/ethereum/reth/Cargo.toml @@ -38,6 +38,7 @@ reth-trie-db = { workspace = true, optional = true } reth-node-builder = { workspace = true, optional = true } reth-tasks = { workspace = true, optional = true } reth-cli-util = { workspace = true, optional = true } +reth-engine-local = { workspace = true, optional = true } # reth-ethereum reth-ethereum-primitives.workspace = true @@ -126,6 +127,7 @@ node = [ "node-api", "dep:reth-node-ethereum", "dep:reth-node-builder", + "dep:reth-engine-local", "rpc", "trie-db", ] diff --git a/crates/ethereum/reth/src/lib.rs b/crates/ethereum/reth/src/lib.rs index 2a3a6135495..7c0141dc9a0 100644 --- a/crates/ethereum/reth/src/lib.rs +++ b/crates/ethereum/reth/src/lib.rs @@ -115,6 +115,15 @@ pub mod node { pub use reth_node_ethereum::*; } +/// Re-exported ethereum engine types +#[cfg(feature = "node")] +pub mod engine { + #[doc(inline)] + pub use reth_engine_local as local; + #[doc(inline)] + pub use reth_node_ethereum::engine::*; +} + /// Re-exported reth trie types #[cfg(feature = "trie")] pub mod trie { diff --git a/crates/evm/evm/Cargo.toml b/crates/evm/evm/Cargo.toml index c1c830168a1..9733da4f360 100644 --- a/crates/evm/evm/Cargo.toml +++ b/crates/evm/evm/Cargo.toml @@ -39,7 +39,6 @@ metrics = { workspace = true, optional = true } [dev-dependencies] reth-ethereum-primitives.workspace = true reth-ethereum-forks.workspace = true -alloy-consensus.workspace = true metrics-util = { workspace = true, features = ["debugging"] } [features] diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 148cadf0cfc..40c680d3868 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -344,15 +344,22 @@ pub trait BlockBuilder { fn into_executor(self) -> Self::Executor; } -pub(crate) struct BasicBlockBuilder<'a, F, Executor, Builder, N: NodePrimitives> +/// A type that constructs a block from transactions and execution results. +#[derive(Debug)] +pub struct BasicBlockBuilder<'a, F, Executor, Builder, N: NodePrimitives> where F: BlockExecutorFactory, { - pub(crate) executor: Executor, - pub(crate) transactions: Vec>>, - pub(crate) ctx: F::ExecutionCtx<'a>, - pub(crate) parent: &'a SealedHeader>, - pub(crate) assembler: Builder, + /// The block executor used to execute transactions. + pub executor: Executor, + /// The transactions executed in this block. + pub transactions: Vec>>, + /// The parent block execution context. + pub ctx: F::ExecutionCtx<'a>, + /// The sealed parent block header. + pub parent: &'a SealedHeader>, + /// The assembler used to build the block. + pub assembler: Builder, } /// Conversions for executable transactions. diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 8d380199002..0d09f0a8c68 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -54,13 +54,11 @@ tracing.workspace = true [dev-dependencies] reth-db-common.workspace = true reth-evm-ethereum.workspace = true -reth-node-api.workspace = true reth-primitives-traits = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true alloy-genesis.workspace = true -alloy-consensus.workspace = true rand.workspace = true secp256k1.workspace = true diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index bbfd6c2a894..1a294e50659 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -124,7 +124,7 @@ where blocks.push(block); // Check if we should commit now if self.thresholds.is_end_of_batch( - block_number - *self.range.start(), + block_number - *self.range.start() + 1, executor.size_hint() as u64, cumulative_gas, batch_start.elapsed(), @@ -243,7 +243,10 @@ impl From> for SingleBlockBackfillJob { #[cfg(test)] mod tests { use crate::{ - backfill::test_utils::{blocks_and_execution_outputs, chain_spec, to_execution_outcome}, + backfill::{ + job::ExecutionStageThresholds, + test_utils::{blocks_and_execution_outputs, chain_spec, to_execution_outcome}, + }, BackfillJobFactory, }; use reth_db_common::init::init_genesis; @@ -333,4 +336,47 @@ mod tests { Ok(()) } + + #[test] + fn test_backfill_with_batch_threshold() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + // Create a key pair for the sender + let key_pair = generators::generate_key(&mut generators::rng()); + let address = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec(address); + + let executor = EthEvmConfig::ethereum(chain_spec.clone()); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory)?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; + + let blocks_and_execution_outputs = + blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; + let (block1, output1) = blocks_and_execution_outputs[0].clone(); + let (block2, output2) = blocks_and_execution_outputs[1].clone(); + + // Backfill with max_blocks=1, expect two separate chains + let factory = BackfillJobFactory::new(executor, blockchain_db).with_thresholds( + ExecutionStageThresholds { max_blocks: Some(1), ..Default::default() }, + ); + let job = factory.backfill(1..=2); + let chains = job.collect::, _>>()?; + + // Assert two chains, each with one block + assert_eq!(chains.len(), 2); + + let mut chain1 = chains[0].clone(); + chain1.execution_outcome_mut().bundle.reverts.sort(); + assert_eq!(chain1.blocks(), &[(1, block1)].into()); + assert_eq!(chain1.execution_outcome(), &to_execution_outcome(1, &output1)); + + let mut chain2 = chains[1].clone(); + chain2.execution_outcome_mut().bundle.reverts.sort(); + assert_eq!(chain2.blocks(), &[(2, block2)].into()); + assert_eq!(chain2.execution_outcome(), &to_execution_outcome(2, &output2)); + + Ok(()) + } } diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 651bd7d5b29..c624fd4ff4e 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -350,7 +350,7 @@ where /// Compares the node head against the ExEx head, and backfills if needed. /// - /// CAUTON: This method assumes that the ExEx head is <= the node head, and that it's on the + /// CAUTION: This method assumes that the ExEx head is <= the node head, and that it's on the /// canonical chain. /// /// Possible situations are: diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 00bcdcbbf70..6463740dba2 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -35,7 +35,7 @@ use reth_node_api::{ use reth_node_builder::{ components::{ BasicPayloadServiceBuilder, Components, ComponentsBuilder, ConsensusBuilder, - ExecutorBuilder, NodeComponentsBuilder, PoolBuilder, + ExecutorBuilder, PoolBuilder, }, BuilderContext, Node, NodeAdapter, RethFullAdapter, }; @@ -133,11 +133,8 @@ where TestExecutorBuilder, TestConsensusBuilder, >; - type AddOns = EthereumAddOns< - NodeAdapter>::Components>, - EthereumEthApiBuilder, - EthereumEngineValidatorBuilder, - >; + type AddOns = + EthereumAddOns, EthereumEthApiBuilder, EthereumEngineValidatorBuilder>; fn components_builder(&self) -> Self::ComponentsBuilder { ComponentsBuilder::default() @@ -158,16 +155,7 @@ where pub type TmpDB = Arc>; /// The [`NodeAdapter`] for the [`TestExExContext`]. Contains type necessary to /// boot the testing environment -pub type Adapter = NodeAdapter< - RethFullAdapter, - <>, - >, - >>::ComponentsBuilder as NodeComponentsBuilder>>::Components, ->; +pub type Adapter = NodeAdapter>; /// An [`ExExContext`] using the [`Adapter`] type. pub type TestExExContext = ExExContext; diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index 922bec6bf67..d3195ad27fe 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -323,10 +323,21 @@ where let mut file = File::create(&tmp_path).map_err(|err| FsPathError::create_file(err, &tmp_path))?; - write_fn(&mut file).map_err(|err| FsPathError::Write { - source: Error::other(err.into()), - path: tmp_path.clone(), - })?; + // Execute the write function and handle errors properly + // If write_fn fails, we need to clean up the temporary file before returning + match write_fn(&mut file) { + Ok(()) => { + // Success - continue with the atomic operation + } + Err(err) => { + // Clean up the temporary file before returning the error + let _ = fs::remove_file(&tmp_path); + return Err(FsPathError::Write { + source: Error::other(err.into()), + path: tmp_path.clone(), + }); + } + } // fsync() file file.sync_all().map_err(|err| FsPathError::fsync(err, &tmp_path))?; diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index a83345a9a5e..def00f54dc3 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -1,4 +1,4 @@ -//! Predicates to constraint peer lookups. +//! Predicates to constrain peer lookups. use std::collections::HashSet; diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 9c833e17047..128da4ff084 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -66,10 +66,7 @@ reth-tracing.workspace = true assert_matches.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } -alloy-rlp.workspace = true -itertools.workspace = true rand.workspace = true - tempfile.workspace = true [features] diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index a6e454b0414..e4ef306b018 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -230,7 +230,7 @@ where self.metrics.buffered_responses.set(self.buffered_responses.len() as f64); } - /// Returns a response if it's first block number matches the next expected. + /// Returns a response if its first block number matches the next expected. fn try_next_buffered(&mut self) -> Option>> { if let Some(next) = self.buffered_responses.peek() { let expected = self.next_expected_block_number(); diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 93a3ed4aa77..47c372dba24 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -51,7 +51,6 @@ reth-tracing.workspace = true alloy-consensus.workspace = true test-fuzz.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } -tokio-util = { workspace = true, features = ["io", "codec"] } rand.workspace = true secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] } rand_08.workspace = true diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index 97e15dbe1f9..613ec87a4be 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -534,7 +534,7 @@ mod tests { let mut encoded = Vec::new(); msg.encode(&mut encoded); - // Decode the bytes back into RawCapbailitMessage + // Decode the bytes back into RawCapabilityMessage let decoded = RawCapabilityMessage::decode(&mut &encoded[..]).unwrap(); // Verify that the decoded message matches the original diff --git a/crates/net/eth-wire/src/eth_snap_stream.rs b/crates/net/eth-wire/src/eth_snap_stream.rs index 000e1615103..82260186593 100644 --- a/crates/net/eth-wire/src/eth_snap_stream.rs +++ b/crates/net/eth-wire/src/eth_snap_stream.rs @@ -44,7 +44,7 @@ pub enum EthSnapStreamError { StatusNotInHandshake, } -/// Combined message type that include either eth or snao protocol messages +/// Combined message type that include either eth or snap protocol messages #[derive(Debug)] pub enum EthSnapMessage { /// An Ethereum protocol message diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 7257ec6b99d..2bb54aa0d6b 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -22,12 +22,13 @@ reth-ethereum-forks.workspace = true reth-primitives-traits.workspace = true # ethereum +alloy-consensus.workspace = true +alloy-rpc-types-eth.workspace = true alloy-primitives = { workspace = true, features = ["getrandom"] } alloy-rpc-types-admin.workspace = true enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } # async -async-trait.workspace = true futures.workspace = true tokio-stream.workspace = true @@ -46,5 +47,7 @@ serde = [ "alloy-primitives/serde", "enr/serde", "reth-ethereum-forks/serde", + "alloy-consensus/serde", + "alloy-rpc-types-eth/serde", "reth-primitives-traits/serde", ] diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index e08416968c7..843611dd800 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -39,7 +39,8 @@ pub use events::{ }; use reth_eth_wire_types::{ - capability::Capabilities, DisconnectReason, EthVersion, NetworkPrimitives, UnifiedStatus, + capability::Capabilities, Capability, DisconnectReason, EthVersion, NetworkPrimitives, + UnifiedStatus, }; use reth_network_p2p::sync::NetworkSyncUpdater; use reth_network_peers::NodeRecord; @@ -291,4 +292,6 @@ pub struct NetworkStatus { pub protocol_version: u64, /// Information about the Ethereum Wire Protocol. pub eth_protocol_info: EthProtocolInfo, + /// The list of supported capabilities and their versions. + pub capabilities: Vec, } diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index 4b5a49c91c4..3f38559524a 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -6,6 +6,14 @@ use core::{fmt, marker::PhantomData}; use std::net::{IpAddr, SocketAddr}; +use crate::{ + block::{EthWireBlockListenerProvider, NewBlockWithPeer}, + events::{NetworkPeersEvents, PeerEventStream}, + test_utils::{PeersHandle, PeersHandleProvider}, + BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, + NetworkEventListenerProvider, NetworkInfo, NetworkStatus, PeerId, PeerInfo, PeerRequest, Peers, + PeersInfo, +}; use alloy_rpc_types_admin::EthProtocolInfo; use enr::{secp256k1::SecretKey, Enr}; use reth_eth_wire_types::{ @@ -15,17 +23,9 @@ use reth_network_p2p::{sync::NetworkSyncUpdater, NoopFullBlockClient}; use reth_network_peers::NodeRecord; use reth_network_types::{PeerKind, Reputation, ReputationChangeKind}; use reth_tokio_util::{EventSender, EventStream}; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::{ - events::{NetworkPeersEvents, PeerEventStream}, - test_utils::{PeersHandle, PeersHandleProvider}, - BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, - NetworkEventListenerProvider, NetworkInfo, NetworkStatus, PeerId, PeerInfo, PeerRequest, Peers, - PeersInfo, -}; - /// A type that implements all network trait that does nothing. /// /// Intended for testing purposes where network is not used. @@ -73,6 +73,7 @@ where config: Default::default(), head: Default::default(), }, + capabilities: vec![], }) } @@ -163,7 +164,7 @@ where impl BlockDownloaderProvider for NoopNetwork where - Net: NetworkPrimitives + Default, + Net: NetworkPrimitives, { type Client = NoopFullBlockClient; @@ -200,6 +201,16 @@ where } } +impl EthWireBlockListenerProvider for NoopNetwork { + type Block = N::Block; + + async fn eth_wire_block_listener( + &self, + ) -> Result>, RecvError> { + unreachable!() + } +} + impl NetworkPeersEvents for NoopNetwork where Net: NetworkPrimitives, diff --git a/crates/net/network-types/src/peers/mod.rs b/crates/net/network-types/src/peers/mod.rs index f3529875018..5e998c87904 100644 --- a/crates/net/network-types/src/peers/mod.rs +++ b/crates/net/network-types/src/peers/mod.rs @@ -8,7 +8,7 @@ pub use config::{ConnectionsConfig, PeersConfig}; pub use reputation::{Reputation, ReputationChange, ReputationChangeKind, ReputationChangeWeights}; use alloy_eip2124::ForkId; -use tracing::trace; +use tracing::debug; use crate::{ is_banned_reputation, PeerAddr, PeerConnectionState, PeerKind, ReputationChangeOutcome, @@ -92,7 +92,7 @@ impl Peer { // we add reputation since negative reputation change decrease total reputation self.reputation = previous.saturating_add(reputation); - trace!(target: "net::peers", reputation=%self.reputation, banned=%self.is_banned(), ?kind, "applied reputation change"); + debug!(target: "net::peers", reputation=%self.reputation, banned=%self.is_banned(), ?kind, "applied reputation change"); if self.state.is_connected() && self.is_banned() { self.state.disconnect(); diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 167fe4f26da..84fa656234d 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -89,7 +89,6 @@ reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } # alloy deps for testing against nodes -alloy-consensus.workspace = true alloy-genesis.workspace = true # misc diff --git a/crates/net/network/docs/mermaid/network-manager.mmd b/crates/net/network/docs/mermaid/network-manager.mmd index e34dbb17777..aa2514a54d5 100644 --- a/crates/net/network/docs/mermaid/network-manager.mmd +++ b/crates/net/network/docs/mermaid/network-manager.mmd @@ -9,7 +9,7 @@ graph TB subgraph Swarm direction TB B1[(Session Manager)] - B2[(Connection Lister)] + B2[(Connection Listener)] B3[(Network State)] end end diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index aee8218382f..bcbdb2472ab 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -91,7 +91,7 @@ use tracing::{debug, error, trace, warn}; /// subgraph Swarm /// direction TB /// B1[(Session Manager)] -/// B2[(Connection Lister)] +/// B2[(Connection Listener)] /// B3[(Network State)] /// end /// end @@ -461,6 +461,11 @@ impl NetworkManager { genesis: status.genesis, config: Default::default(), }, + capabilities: hello_message + .protocols + .into_iter() + .map(|protocol| protocol.cap) + .collect(), } } diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index c0694023ceb..d851a461ccc 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -956,7 +956,7 @@ impl PeersManager { if peer.addr != new_addr { peer.addr = new_addr; - trace!(target: "net::peers", ?peer_id, addre=?peer.addr, "Updated resolved trusted peer address"); + trace!(target: "net::peers", ?peer_id, addr=?peer.addr, "Updated resolved trusted peer address"); } } } diff --git a/crates/net/network/src/session/conn.rs b/crates/net/network/src/session/conn.rs index 1b262430f14..ea13cef4f01 100644 --- a/crates/net/network/src/session/conn.rs +++ b/crates/net/network/src/session/conn.rs @@ -65,7 +65,7 @@ impl EthRlpxConnection { } } - /// Returns access to the underlying stream. + /// Returns access to the underlying stream. #[inline] pub(crate) const fn inner(&self) -> &P2PStream> { match self { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 5aad90cbb6f..e94376948c6 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -174,6 +174,11 @@ impl SessionManager { } } + /// Returns the currently tracked [`ForkId`]. + pub(crate) const fn fork_id(&self) -> ForkId { + self.fork_filter.current() + } + /// Check whether the provided [`ForkId`] is compatible based on the validation rules in /// `EIP-2124`. pub fn is_valid_fork_id(&self, fork_id: ForkId) -> bool { diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 89ad4874cc2..8168a88cbf4 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -500,7 +500,7 @@ impl NetworkState { self.on_peer_action(action); } - // We need to poll again tn case we have received any responses because they may have + // We need to poll again in case we have received any responses because they may have // triggered follow-up requests. if self.queued_messages.is_empty() { return Poll::Pending diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index fbb7b0bf941..229d149a2f9 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -20,7 +20,7 @@ use std::{ sync::Arc, task::{Context, Poll}, }; -use tracing::trace; +use tracing::{debug, trace}; #[cfg_attr(doc, aquamarine::aquamarine)] /// Contains the connectivity related state of the network. @@ -259,6 +259,7 @@ impl Swarm { if self.sessions.is_valid_fork_id(fork_id) { self.state_mut().peers_mut().set_discovered_fork_id(peer_id, fork_id); } else { + debug!(target: "net", ?peer_id, remote_fork_id=?fork_id, our_fork_id=?self.sessions.fork_id(), "fork id mismatch, removing peer"); self.state_mut().peers_mut().remove_peer(peer_id); } } diff --git a/crates/net/network/src/test_utils/init.rs b/crates/net/network/src/test_utils/init.rs index 51537f37d87..db61931dd47 100644 --- a/crates/net/network/src/test_utils/init.rs +++ b/crates/net/network/src/test_utils/init.rs @@ -13,7 +13,7 @@ pub fn enr_to_peer_id(enr: Enr) -> PeerId { // copied from ethers-rs /// A bit of hack to find an unused TCP port. /// -/// Does not guarantee that the given port is unused after the function exists, just that it was +/// Does not guarantee that the given port is unused after the function exits, just that it was /// unused before the function started (i.e., it does not reserve a port). pub fn unused_port() -> u16 { unused_tcp_addr().port() diff --git a/crates/net/network/src/test_utils/transactions.rs b/crates/net/network/src/test_utils/transactions.rs index c3c38e3f1c7..467f146b059 100644 --- a/crates/net/network/src/test_utils/transactions.rs +++ b/crates/net/network/src/test_utils/transactions.rs @@ -51,7 +51,7 @@ pub async fn new_tx_manager( (transactions, network) } -/// Directly buffer hahs into tx fetcher for testing. +/// Directly buffer hash into tx fetcher for testing. pub fn buffer_hash_to_tx_fetcher( tx_fetcher: &mut TransactionFetcher, hash: TxHash, diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 18233700e25..05ab9ecbf71 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -61,8 +61,8 @@ use reth_primitives_traits::SignedTransaction; use reth_tokio_util::EventStream; use reth_transaction_pool::{ error::{PoolError, PoolResult}, - GetPooledTransactionLimit, PoolTransaction, PropagateKind, PropagatedTransactions, - TransactionPool, ValidPoolTransaction, + AddedTransactionOutcome, GetPooledTransactionLimit, PoolTransaction, PropagateKind, + PropagatedTransactions, TransactionPool, ValidPoolTransaction, }; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, @@ -81,7 +81,8 @@ use tracing::{debug, trace}; /// The future for importing transactions into the pool. /// /// Resolves with the result of each transaction import. -pub type PoolImportFuture = Pin>> + Send + 'static>>; +pub type PoolImportFuture = + Pin>> + Send + 'static>>; /// Api to interact with [`TransactionsManager`] task. /// @@ -561,10 +562,10 @@ impl TransactionsManager { /// Processes a batch import results. - fn on_batch_import_result(&mut self, batch_results: Vec>) { + fn on_batch_import_result(&mut self, batch_results: Vec>) { for res in batch_results { match res { - Ok(hash) => { + Ok(AddedTransactionOutcome { hash, .. }) => { self.on_good_import(hash); } Err(err) => { diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index 4014f41bfcb..ed1c2f925dd 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -10,7 +10,9 @@ use reth_network::{ }; use reth_network_api::{events::PeerEvent, PeerKind, PeersInfo}; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; -use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; +use reth_transaction_pool::{ + test_utils::TransactionGenerator, AddedTransactionOutcome, PoolTransaction, TransactionPool, +}; use std::sync::Arc; use tokio::join; @@ -42,7 +44,8 @@ async fn test_tx_gossip() { provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000))); // insert pending tx in peer0's pool - let hash = peer0_pool.add_external_transaction(tx).await.unwrap(); + let AddedTransactionOutcome { hash, .. } = + peer0_pool.add_external_transaction(tx).await.unwrap(); let inserted = peer0_tx_listener.recv().await.unwrap(); assert_eq!(inserted, hash); @@ -81,10 +84,10 @@ async fn test_tx_propagation_policy_trusted_only() { provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000))); // insert the tx in peer0's pool - let hash_0 = peer_0_handle.pool().unwrap().add_external_transaction(tx).await.unwrap(); + let outcome_0 = peer_0_handle.pool().unwrap().add_external_transaction(tx).await.unwrap(); let inserted = peer0_tx_listener.recv().await.unwrap(); - assert_eq!(inserted, hash_0); + assert_eq!(inserted, outcome_0.hash); // ensure tx is not gossiped to peer1 peer1_tx_listener.try_recv().expect_err("Empty"); @@ -108,16 +111,16 @@ async fn test_tx_propagation_policy_trusted_only() { provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000))); // insert pending tx in peer0's pool - let hash_1 = peer_0_handle.pool().unwrap().add_external_transaction(tx).await.unwrap(); + let outcome_1 = peer_0_handle.pool().unwrap().add_external_transaction(tx).await.unwrap(); let inserted = peer0_tx_listener.recv().await.unwrap(); - assert_eq!(inserted, hash_1); + assert_eq!(inserted, outcome_1.hash); // ensure peer1 now receives the pending txs from peer0 let mut buff = Vec::with_capacity(2); buff.push(peer1_tx_listener.recv().await.unwrap()); buff.push(peer1_tx_listener.recv().await.unwrap()); - assert!(buff.contains(&hash_1)); + assert!(buff.contains(&outcome_1.hash)); } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index aa48f6c610e..30a55885333 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -647,7 +647,7 @@ enum RangeResponseResult { } /// A headers+bodies client implementation that does nothing. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] #[non_exhaustive] pub struct NoopFullBlockClient(PhantomData); @@ -743,6 +743,12 @@ where type Block = Net::Block; } +impl Default for NoopFullBlockClient { + fn default() -> Self { + Self(PhantomData::) + } +} + #[cfg(test)] mod tests { use reth_ethereum_primitives::BlockBody; diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 88b0ae5cf5c..5622554cd46 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -121,11 +121,84 @@ pub struct AddOnsContext<'a, N: FullNodeComponents> { } /// Customizable node add-on types. +/// +/// This trait defines the interface for extending a node with additional functionality beyond +/// the core [`FullNodeComponents`]. It provides a way to launch supplementary services such as +/// RPC servers, monitoring, external integrations, or any custom functionality that builds on +/// top of the core node components. +/// +/// ## Purpose +/// +/// The `NodeAddOns` trait serves as an extension point in the node builder architecture, +/// allowing developers to: +/// - Define custom services that run alongside the main node +/// - Access all node components and configuration during initialization +/// - Return a handle for managing the launched services (e.g. handle to rpc server) +/// +/// ## How it fits into `NodeBuilder` +/// +/// In the node builder pattern, add-ons are the final layer that gets applied after all core +/// components are configured and started. The builder flow typically follows: +/// +/// 1. Configure [`NodeTypes`] (chain spec, database types, etc.) +/// 2. Build [`FullNodeComponents`] (consensus, networking, transaction pool, etc.) +/// 3. Launch [`NodeAddOns`] with access to all components via [`AddOnsContext`] +/// +/// ## Primary Use Case +/// +/// The primary use of this trait is to launch RPC servers that provide external API access to +/// the node. For Ethereum nodes, this typically includes two main servers: the regular RPC +/// server (HTTP/WS/IPC) that handles user requests and the authenticated Engine API server +/// that communicates with the consensus layer. The returned handle contains the necessary +/// endpoints and control mechanisms for these servers, allowing the node to serve JSON-RPC +/// requests and participate in consensus. While RPC is the main use case, the trait is +/// intentionally flexible to support other kinds of add-ons such as monitoring, indexing, or +/// custom protocol extensions. +/// +/// ## Context Access +/// +/// The [`AddOnsContext`] provides access to: +/// - All node components via the `node` field +/// - Node configuration +/// - Engine API handles for consensus layer communication +/// - JWT secrets for authenticated endpoints +/// +/// This ensures add-ons can integrate deeply with the node while maintaining clean separation +/// of concerns. pub trait NodeAddOns: Send { /// Handle to add-ons. + /// + /// This type is returned by [`launch_add_ons`](Self::launch_add_ons) and represents a + /// handle to the launched services. It must be `Clone` to allow multiple components to + /// hold references and should provide methods to interact with the running services. + /// + /// For RPC add-ons, this typically includes: + /// - Server handles to access local addresses and shutdown methods + /// - RPC module registry for runtime inspection of available methods + /// - Configured middleware and transport-specific settings + /// - For Engine API implementations, this also includes handles for consensus layer + /// communication type Handle: Send + Sync + Clone; /// Configures and launches the add-ons. + /// + /// This method is called once during node startup after all core components are initialized. + /// It receives an [`AddOnsContext`] that provides access to: + /// + /// - The fully configured node with all its components + /// - Node configuration for reading settings + /// - Engine API handles for consensus layer communication + /// - JWT secrets for setting up authenticated endpoints (if any). + /// + /// The implementation should: + /// 1. Use the context to configure the add-on services + /// 2. Launch any background tasks using the node's task executor + /// 3. Return a handle that allows interaction with the launched services + /// + /// # Errors + /// + /// This method may fail if the add-ons cannot be properly configured or launched, + /// for example due to port binding issues or invalid configuration. fn launch_add_ons( self, ctx: AddOnsContext<'_, N>, diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index d08c62d38ce..ba02aba2649 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -54,6 +54,7 @@ reth-tokio-util.workspace = true reth-tracing.workspace = true reth-transaction-pool.workspace = true reth-basic-payload-builder.workspace = true +reth-node-ethstats.workspace = true ## ethereum alloy-consensus.workspace = true @@ -84,6 +85,11 @@ tracing.workspace = true [dev-dependencies] tempfile.workspace = true +reth-ethereum-engine-primitives.workspace = true +reth-payload-builder = { workspace = true, features = ["test-utils"] } +reth-node-ethereum.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-evm-ethereum = { workspace = true, features = ["test-utils"] } [features] default = [] @@ -104,6 +110,8 @@ test-utils = [ "reth-db-api/test-utils", "reth-provider/test-utils", "reth-transaction-pool/test-utils", + "reth-evm-ethereum/test-utils", + "reth-node-ethereum/test-utils", ] op = [ "reth-db?/op", diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 540905960b5..97033320e3c 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -248,14 +248,25 @@ impl NodeBuilder { /// Creates an _ephemeral_ preconfigured node for testing purposes. #[cfg(feature = "test-utils")] pub fn testing_node( + self, + task_executor: TaskExecutor, + ) -> WithLaunchContext< + NodeBuilder>, ChainSpec>, + > { + let path = reth_db::test_utils::tempdir_path(); + self.testing_node_with_datadir(task_executor, path) + } + + /// Creates a preconfigured node for testing purposes with a specific datadir. + #[cfg(feature = "test-utils")] + pub fn testing_node_with_datadir( mut self, task_executor: TaskExecutor, + datadir: impl Into, ) -> WithLaunchContext< NodeBuilder>, ChainSpec>, > { - let path = reth_node_core::dirs::MaybePlatformPath::::from( - reth_db::test_utils::tempdir_path(), - ); + let path = reth_node_core::dirs::MaybePlatformPath::::from(datadir.into()); self.config = self.config.with_datadir_args(reth_node_core::args::DatadirArgs { datadir: path.clone(), ..Default::default() @@ -308,7 +319,7 @@ where } } -/// A [`NodeBuilder`] with it's launch context already configured. +/// A [`NodeBuilder`] with its launch context already configured. /// /// This exposes the same methods as [`NodeBuilder`] but with the launch context already configured, /// See [`WithLaunchContext::launch`] @@ -549,6 +560,39 @@ where } /// Sets the hook that is run to configure the rpc modules. + /// + /// This hook can obtain the node's components (txpool, provider, etc.) and can modify the + /// modules that the RPC server installs. + /// + /// # Examples + /// + /// ```rust,ignore + /// use jsonrpsee::{core::RpcResult, proc_macros::rpc}; + /// + /// #[derive(Clone)] + /// struct CustomApi { pool: Pool } + /// + /// #[rpc(server, namespace = "custom")] + /// impl CustomApi { + /// #[method(name = "hello")] + /// async fn hello(&self) -> RpcResult { + /// Ok("World".to_string()) + /// } + /// } + /// + /// let node = NodeBuilder::new(config) + /// .node(EthereumNode::default()) + /// .extend_rpc_modules(|ctx| { + /// // Access node components, so they can used by the CustomApi + /// let pool = ctx.pool().clone(); + /// + /// // Add custom RPC namespace + /// ctx.modules.merge_configured(CustomApi { pool }.into_rpc())?; + /// + /// Ok(()) + /// }) + /// .build()?; + /// ``` pub fn extend_rpc_modules(self, hook: F) -> Self where F: FnOnce(RpcContext<'_, NodeAdapter, AO::EthApi>) -> eyre::Result<()> diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 8e016c264a1..140802aafb2 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -10,7 +10,7 @@ use crate::{ hooks::NodeHooks, launch::LaunchNode, rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext, RpcHandleProvider}, - AddOns, FullNode, + AddOns, ComponentsFor, FullNode, }; use reth_exex::ExExContext; @@ -74,7 +74,7 @@ impl fmt::Debug for NodeTypesAdapter { /// Container for the node's types and the components and other internals that can be used by /// addons of the node. #[derive(Debug)] -pub struct NodeAdapter> { +pub struct NodeAdapter = ComponentsFor> { /// The components of the node. pub components: C, /// The task executor for the node. @@ -249,7 +249,7 @@ where T: FullNodeTypes, CB: NodeComponentsBuilder, AO: RethRpcAddOns>, - >::Components>, >>::Handle: RpcHandleProvider< NodeAdapter>::Components>, @@ -293,3 +293,51 @@ where }) } } + +#[cfg(test)] +mod test { + use super::*; + use crate::components::Components; + use reth_consensus::noop::NoopConsensus; + use reth_db_api::mock::DatabaseMock; + use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_evm::noop::NoopEvmConfig; + use reth_evm_ethereum::MockEvmConfig; + use reth_network::EthNetworkPrimitives; + use reth_network_api::{self, noop::NoopNetwork}; + use reth_node_api::FullNodeTypesAdapter; + use reth_node_ethereum::EthereumNode; + use reth_payload_builder::PayloadBuilderHandle; + use reth_provider::noop::NoopProvider; + use reth_tasks::TaskManager; + use reth_transaction_pool::noop::NoopTransactionPool; + + #[test] + fn test_noop_components() { + let components = Components::< + FullNodeTypesAdapter, + NoopNetwork, + _, + NoopEvmConfig, + _, + > { + transaction_pool: NoopTransactionPool::default(), + evm_config: NoopEvmConfig::default(), + consensus: NoopConsensus::default(), + network: NoopNetwork::default(), + payload_builder_handle: PayloadBuilderHandle::::noop(), + }; + + let task_executor = { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let manager = TaskManager::new(handle); + manager.executor() + }; + + let node = NodeAdapter { components, task_executor, provider: NoopProvider::default() }; + + // test that node implements `FullNodeComponents`` + as FullNodeComponents>::pool(&node); + } +} diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index f7696799e97..49381462fa9 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -1,4 +1,33 @@ //! Helper types that can be used by launchers. +//! +//! ## Launch Context Type System +//! +//! The node launch process uses a type-state pattern to ensure correct initialization +//! order at compile time. Methods are only available when their prerequisites are met. +//! +//! ### Core Types +//! +//! - [`LaunchContext`]: Base context with executor and data directory +//! - [`LaunchContextWith`]: Context with an attached value of type `T` +//! - [`Attached`]: Pairs values, preserving both previous (L) and new (R) state +//! +//! ### Helper Attachments +//! +//! - [`WithConfigs`]: Node config + TOML config +//! - [`WithMeteredProvider`]: Provider factory with metrics +//! - [`WithMeteredProviders`]: Provider factory + blockchain provider +//! - [`WithComponents`]: Final form with all components +//! +//! ### Method Availability +//! +//! Methods are implemented on specific type combinations: +//! - `impl LaunchContextWith`: Generic methods available for any attachment +//! - `impl LaunchContextWith`: Config-specific methods +//! - `impl LaunchContextWith>`: Database operations +//! - `impl LaunchContextWith>`: Provider operations +//! - etc. +//! +//! This ensures correct initialization order without runtime checks. use crate::{ components::{NodeComponents, NodeComponentsBuilder}, @@ -66,11 +95,27 @@ use tokio::sync::{ }; use futures::{future::Either, stream, Stream, StreamExt}; +use reth_node_ethstats::EthStatsService; use reth_node_events::{cl::ConsensusLayerHealthEvents, node::NodeEvent}; /// Reusable setup for launching a node. /// -/// This provides commonly used boilerplate for launching a node. +/// This is the entry point for the node launch process. It implements a builder +/// pattern using type-state programming to enforce correct initialization order. +/// +/// ## Type Evolution +/// +/// Starting from `LaunchContext`, each method transforms the type to reflect +/// accumulated state: +/// +/// ```text +/// LaunchContext +/// └─> LaunchContextWith +/// └─> LaunchContextWith> +/// └─> LaunchContextWith> +/// └─> LaunchContextWith> +/// └─> LaunchContextWith> +/// ``` #[derive(Debug, Clone)] pub struct LaunchContext { /// The task executor for the node. @@ -185,16 +230,21 @@ impl LaunchContext { .thread_name(|i| format!("reth-rayon-{i}")) .build_global() { - error!(%err, "Failed to build global thread pool") + warn!(%err, "Failed to build global thread pool") } } } /// A [`LaunchContext`] along with an additional value. /// -/// This can be used to sequentially attach additional values to the type during the launch process. +/// The type parameter `T` represents the current state of the launch process. +/// Methods are conditionally implemented based on `T`, ensuring operations +/// are only available when their prerequisites are met. /// -/// The type provides common boilerplate for launching a node depending on the additional value. +/// For example: +/// - Config methods when `T = WithConfigs` +/// - Database operations when `T = Attached, DB>` +/// - Provider operations when `T = Attached, ProviderFactory>` #[derive(Debug, Clone)] pub struct LaunchContextWith { /// The wrapped launch context. @@ -998,6 +1048,22 @@ where Either::Right(stream::empty()) } } + + /// Spawns the [`EthStatsService`] service if configured. + pub async fn spawn_ethstats(&self) -> eyre::Result<()> { + let Some(url) = self.node_config().debug.ethstats.as_ref() else { return Ok(()) }; + + let network = self.components().network().clone(); + let pool = self.components().pool().clone(); + let provider = self.node_adapter().provider.clone(); + + info!(target: "reth::cli", "Starting EthStats service at {}", url); + + let ethstats = EthStatsService::new(url, network, provider, pool).await?; + tokio::spawn(async move { ethstats.run().await }); + + Ok(()) + } } impl @@ -1074,7 +1140,11 @@ where } } -/// Joins two attachments together. +/// Joins two attachments together, preserving access to both values. +/// +/// This type enables the launch process to accumulate state while maintaining +/// access to all previously attached components. The `left` field holds the +/// previous state, while `right` holds the newly attached component. #[derive(Clone, Copy, Debug)] pub struct Attached { left: L, @@ -1201,6 +1271,7 @@ mod tests { transaction_lookup_distance: None, transaction_lookup_before: None, receipts_full: false, + receipts_pre_merge: false, receipts_distance: None, receipts_before: None, account_history_full: false, diff --git a/crates/node/builder/src/launch/debug.rs b/crates/node/builder/src/launch/debug.rs index fd85ba45206..687717d2705 100644 --- a/crates/node/builder/src/launch/debug.rs +++ b/crates/node/builder/src/launch/debug.rs @@ -7,7 +7,8 @@ use alloy_provider::network::AnyNetwork; use jsonrpsee::core::{DeserializeOwned, Serialize}; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; -use reth_node_api::{BlockTy, FullNodeComponents}; +use reth_engine_local::LocalMiner; +use reth_node_api::{BlockTy, FullNodeComponents, PayloadAttributesBuilder, PayloadTypes}; use std::sync::Arc; use tracing::info; @@ -59,6 +60,18 @@ pub trait DebugNode: Node { /// For Ethereum nodes, this typically converts from `alloy_rpc_types_eth::Block` /// to the node's internal block representation. fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> BlockTy; + + /// Creates a payload attributes builder for local mining in dev mode. + /// + /// It will be used by the `LocalMiner` when dev mode is enabled. + /// + /// The builder is responsible for creating the payload attributes that define how blocks should + /// be constructed during local mining. + fn local_payload_attributes_builder( + chain_spec: &Self::ChainSpec, + ) -> impl PayloadAttributesBuilder< + <::Payload as PayloadTypes>::PayloadAttributes, + >; } /// Node launcher with support for launching various debugging utilities. @@ -154,6 +167,7 @@ where "etherscan api key not found for rpc consensus client for chain: {chain}" ) })?, + chain.id(), N::Types::rpc_to_primitive_block, ); let rpc_consensus_client = DebugConsensusClient::new( @@ -165,6 +179,29 @@ where }); } + if config.dev.dev { + info!(target: "reth::cli", "Using local payload attributes builder for dev mode"); + + let blockchain_db = handle.node.provider.clone(); + let chain_spec = config.chain.clone(); + let beacon_engine_handle = handle.node.rpc_handle().beacon_engine_handle.clone(); + let pool = handle.node.pool.clone(); + let payload_builder_handle = handle.node.payload_builder_handle.clone(); + + let dev_mining_mode = handle.node.config.dev_mining_mode(pool); + handle.node.task_executor.spawn_critical("local engine", async move { + LocalMiner::new( + blockchain_db, + N::Types::local_payload_attributes_builder(&chain_spec), + beacon_engine_handle, + dev_mining_mode, + payload_builder_handle, + ) + .run() + .await + }); + } + Ok(handle) } } diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index a7d31623cd2..f0cb7a4c085 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -12,11 +12,10 @@ use alloy_consensus::BlockHeader; use futures::{stream_select, StreamExt}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_db_api::{database_metrics::DatabaseMetrics, Database}; -use reth_engine_local::{LocalMiner, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, - tree::TreeConfig, + tree::{BasicEngineValidator, TreeConfig}, }; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; @@ -24,7 +23,6 @@ use reth_network::{types::BlockRangeUpdate, NetworkSyncUpdater, SyncState}; use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ BeaconConsensusEngineHandle, BuiltPayload, FullNodeTypes, NodeTypes, NodeTypesWithDBAdapter, - PayloadAttributesBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -77,9 +75,6 @@ where CB: NodeComponentsBuilder, AO: RethRpcAddOns> + EngineValidatorAddOn>, - LocalPayloadAttributesBuilder: PayloadAttributesBuilder< - <::Payload as PayloadTypes>::PayloadAttributes, - >, >::Components>, >>::Handle: RpcHandleProvider< @@ -218,6 +213,15 @@ where // during this run. .maybe_store_messages(node_config.debug.engine_api_store.clone()); + let engine_validator = BasicEngineValidator::new( + ctx.blockchain_db().clone(), + consensus.clone(), + ctx.components().evm_config().clone(), + engine_payload_validator, + engine_tree_config.clone(), + ctx.invalid_block_hook().await?, + ); + let mut engine_service = EngineService::new( consensus.clone(), ctx.chain_spec(), @@ -229,27 +233,12 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder_handle().clone(), - engine_payload_validator, + engine_validator, engine_tree_config, - ctx.invalid_block_hook().await?, ctx.sync_metrics_tx(), ctx.components().evm_config().clone(), ); - if ctx.is_dev() { - ctx.task_executor().spawn_critical( - "local engine", - LocalMiner::new( - ctx.blockchain_db().clone(), - LocalPayloadAttributesBuilder::new(ctx.chain_spec()), - beacon_engine_handle.clone(), - ctx.dev_mining_mode(ctx.components().pool()), - ctx.components().payload_builder_handle().clone(), - ) - .run(), - ); - } - info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( @@ -367,6 +356,8 @@ where // Notify on node started on_node_started.on_event(FullNode::clone(&full_node))?; + ctx.spawn_ethstats().await?; + let handle = NodeHandle { node_exit_future: NodeExitFuture::new( async { rx.await? }, diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index b2c8c4894d2..a4d603c7247 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -20,6 +20,10 @@ use std::{ sync::Arc, }; +/// A helper type to obtain components for a given node when [`FullNodeTypes::Types`] is a [`Node`] +/// implementation. +pub type ComponentsFor = <<::Types as Node>::ComponentsBuilder as NodeComponentsBuilder>::Components; + /// A [`crate::Node`] is a [`NodeTypes`] that comes with preconfigured components. /// /// This can be used to configure the builder with a preset of components. diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 70fbe4818b7..2d95247a3fc 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -11,14 +11,14 @@ use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_node_api::{ AddOnsContext, BlockTy, EngineTypes, EngineValidator, FullNodeComponents, FullNodeTypes, - NodeAddOns, NodeTypes, PayloadTypes, ReceiptTy, + NodeAddOns, NodeTypes, PayloadTypes, PrimitivesTy, }; use reth_node_core::{ node_config::NodeConfig, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadStore}; -use reth_rpc::eth::{EthApiTypes, FullEthApiServer}; +use reth_rpc::eth::{core::EthRpcConverterFor, EthApiTypes, FullEthApiServer}; use reth_rpc_api::{eth::helpers::AddDevSigners, IntoEngineApiRpcModule}; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, @@ -283,6 +283,8 @@ where } /// Returns a reference to the configured node. + /// + /// This gives access to the node's components. pub const fn node(&self) -> &Node { &self.node } @@ -695,12 +697,31 @@ where } /// Launches the RPC servers with the given context and an additional hook for extending - /// modules. + /// modules. Whether the auth server is launched depends on the CLI configuration. pub async fn launch_add_ons_with( self, ctx: AddOnsContext<'_, N>, ext: F, ) -> eyre::Result> + where + F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>, + { + // Check CLI config to determine if auth server should be disabled + let disable_auth = ctx.config.rpc.disable_auth_server; + self.launch_add_ons_with_opt_engine(ctx, ext, disable_auth).await + } + + /// Launches the RPC servers with the given context and an additional hook for extending + /// modules. Optionally disables the auth server based on the `disable_auth` parameter. + /// + /// When `disable_auth` is true, the auth server will not be started and a noop handle + /// will be used instead. + pub async fn launch_add_ons_with_opt_engine( + self, + ctx: AddOnsContext<'_, N>, + ext: F, + disable_auth: bool, + ) -> eyre::Result> where F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>, { @@ -719,14 +740,21 @@ where } = setup_ctx; let server_config = config.rpc.rpc_server_config().set_rpc_middleware(rpc_middleware); - let auth_module_clone = auth_module.clone(); - // launch servers concurrently - let (rpc, auth) = futures::future::try_join( - Self::launch_rpc_server_internal(server_config, &modules), - Self::launch_auth_server_internal(auth_module_clone, auth_config), - ) - .await?; + let (rpc, auth) = if disable_auth { + // Only launch the RPC server, use a noop auth handle + let rpc = Self::launch_rpc_server_internal(server_config, &modules).await?; + (rpc, AuthServerHandle::noop()) + } else { + let auth_module_clone = auth_module.clone(); + // launch servers concurrently + let (rpc, auth) = futures::future::try_join( + Self::launch_rpc_server_internal(server_config, &modules), + Self::launch_auth_server_internal(auth_module_clone, auth_config), + ) + .await?; + (rpc, auth) + }; let handles = RethRpcServerHandles { rpc, auth }; @@ -940,7 +968,22 @@ pub struct EthApiCtx<'a, N: FullNodeTypes> { /// Eth API configuration pub config: EthConfig, /// Cache for eth state - pub cache: EthStateCache, ReceiptTy>, + pub cache: EthStateCache>, +} + +impl<'a, N: FullNodeComponents>> EthApiCtx<'a, N> { + /// Provides a [`EthApiBuilder`] with preconfigured config and components. + pub fn eth_api_builder(self) -> reth_rpc::EthApiBuilder> { + reth_rpc::EthApiBuilder::new_with_components(self.components.clone()) + .eth_cache(self.cache) + .task_spawner(self.components.task_executor().clone()) + .gas_cap(self.config.rpc_gas_cap.into()) + .max_simulate_blocks(self.config.rpc_max_simulate_blocks) + .eth_proof_window(self.config.eth_proof_window) + .fee_history_cache_config(self.config.fee_history_cache) + .proof_permits(self.config.proof_permits) + .gas_oracle_config(self.config.gas_oracle) + } } /// A `EthApi` that knows how to build `eth` namespace API from [`FullNodeComponents`]. diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 1a36c9af5ef..2240fa98837 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -34,6 +34,7 @@ reth-network-peers.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true reth-ethereum-forks.workspace = true +reth-engine-local.workspace = true reth-engine-primitives.workspace = true # ethereum diff --git a/crates/node/core/src/args/debug.rs b/crates/node/core/src/args/debug.rs index d8b6d570384..fdd08243a77 100644 --- a/crates/node/core/src/args/debug.rs +++ b/crates/node/core/src/args/debug.rs @@ -92,6 +92,11 @@ pub struct DebugArgs { verbatim_doc_comment )] pub healthy_node_rpc_url: Option, + + /// The URL of the ethstats server to connect to. + /// Example: `nodename:secret@host:port` + #[arg(long = "ethstats", help_heading = "Debug")] + pub ethstats: Option, } impl Default for DebugArgs { @@ -109,6 +114,7 @@ impl Default for DebugArgs { engine_api_store: None, invalid_block_hook: Some(InvalidBlockSelection::default()), healthy_node_rpc_url: None, + ethstats: None, } } } diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 8c03e42d9f2..64829c4c064 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -34,6 +34,10 @@ pub struct EngineArgs { #[arg(long = "engine.disable-caching-and-prewarming")] pub caching_and_prewarming_disabled: bool, + /// Enable the parallel sparse trie in the engine. + #[arg(long = "engine.parallel-sparse-trie", default_value = "false")] + pub parallel_sparse_trie_enabled: bool, + /// Enable state provider latency metrics. This allows the engine to collect and report stats /// about how long state provider calls took during execution, but this does introduce slight /// overhead to state provider calls. @@ -97,6 +101,7 @@ impl Default for EngineArgs { state_root_task_compare_updates: false, caching_and_prewarming_enabled: true, caching_and_prewarming_disabled: false, + parallel_sparse_trie_enabled: false, state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, accept_execution_requests_hash: false, @@ -118,6 +123,7 @@ impl EngineArgs { .with_memory_block_buffer_target(self.memory_block_buffer_target) .with_legacy_state_root(self.legacy_state_root_task_enabled) .without_caching_and_prewarming(self.caching_and_prewarming_disabled) + .with_enable_parallel_sparse_trie(self.parallel_sparse_trie_enabled) .with_state_provider_metrics(self.state_provider_metrics) .with_always_compare_trie_updates(self.state_root_task_compare_updates) .with_cross_block_cache_size(self.cross_block_cache_size * 1024 * 1024) diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 3f493a900a9..d6b8170440a 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -48,19 +48,22 @@ pub struct PruningArgs { // Receipts /// Prunes all receipt data. - #[arg(long = "prune.receipts.full", conflicts_with_all = &["receipts_distance", "receipts_before"])] + #[arg(long = "prune.receipts.full", conflicts_with_all = &["receipts_pre_merge", "receipts_distance", "receipts_before"])] pub receipts_full: bool, + /// Prune receipts before the merge block. + #[arg(long = "prune.receipts.pre-merge", conflicts_with_all = &["receipts_full", "receipts_distance", "receipts_before"])] + pub receipts_pre_merge: bool, /// Prune receipts before the `head-N` block number. In other words, keep last N + 1 blocks. - #[arg(long = "prune.receipts.distance", value_name = "BLOCKS", conflicts_with_all = &["receipts_full", "receipts_before"])] + #[arg(long = "prune.receipts.distance", value_name = "BLOCKS", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_before"])] pub receipts_distance: Option, /// Prune receipts before the specified block number. The specified block number is not pruned. - #[arg(long = "prune.receipts.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["receipts_full", "receipts_distance"])] + #[arg(long = "prune.receipts.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance"])] pub receipts_before: Option, // Receipts Log Filter /// Configure receipts log filter. Format: /// <`address`>:<`prune_mode`>[,<`address`>:<`prune_mode`>...] Where <`prune_mode`> can be /// 'full', 'distance:<`blocks`>', or 'before:<`block_number`>' - #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", conflicts_with_all = &["receipts_full", "receipts_distance", "receipts_before"], value_parser = parse_receipts_log_filter)] + #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance", "receipts_before"], value_parser = parse_receipts_log_filter)] pub receipts_log_filter: Option, // Account History @@ -138,7 +141,7 @@ impl PruningArgs { if let Some(mode) = self.transaction_lookup_prune_mode() { config.segments.transaction_lookup = Some(mode); } - if let Some(mode) = self.receipts_prune_mode() { + if let Some(mode) = self.receipts_prune_mode(chain_spec) { config.segments.receipts = Some(mode); } if let Some(mode) = self.account_history_prune_mode() { @@ -202,15 +205,21 @@ impl PruningArgs { } } - const fn receipts_prune_mode(&self) -> Option { - if self.receipts_full { + fn receipts_prune_mode(&self, chain_spec: &ChainSpec) -> Option + where + ChainSpec: EthereumHardforks, + { + if self.receipts_pre_merge { + chain_spec + .ethereum_fork_activation(EthereumHardfork::Paris) + .block_number() + .map(PruneMode::Before) + } else if self.receipts_full { Some(PruneMode::Full) } else if let Some(distance) = self.receipts_distance { Some(PruneMode::Distance(distance)) - } else if let Some(block_number) = self.receipts_before { - Some(PruneMode::Before(block_number)) } else { - None + self.receipts_before.map(PruneMode::Before) } } diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 120a3335936..07a0eb93303 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -94,6 +94,12 @@ pub struct RpcServerArgs { #[arg(long, default_value_t = constants::DEFAULT_IPC_ENDPOINT.to_string())] pub ipcpath: String, + /// Set the permissions for the IPC socket file, in octal format. + /// + /// If not specified, the permissions will be set by the system's umask. + #[arg(long = "ipc.permissions")] + pub ipc_socket_permissions: Option, + /// Auth server address to listen on #[arg(long = "authrpc.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))] pub auth_addr: IpAddr, @@ -119,6 +125,13 @@ pub struct RpcServerArgs { #[arg(long = "auth-ipc.path", default_value_t = constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string())] pub auth_ipc_path: String, + /// Disable the auth/engine API server. + /// + /// This will prevent the authenticated engine-API server from starting. Use this if you're + /// running a node that doesn't need to serve engine API requests. + #[arg(long = "disable-auth-server", alias = "disable-engine-api")] + pub disable_auth_server: bool, + /// Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and /// `--ws.api`. /// @@ -330,11 +343,13 @@ impl Default for RpcServerArgs { ws_api: None, ipcdisable: false, ipcpath: constants::DEFAULT_IPC_ENDPOINT.to_string(), + ipc_socket_permissions: None, auth_addr: Ipv4Addr::LOCALHOST.into(), auth_port: constants::DEFAULT_AUTH_PORT, auth_jwtsecret: None, auth_ipc: false, auth_ipc_path: constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string(), + disable_auth_server: false, rpc_jwtsecret: None, rpc_max_request_size: RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into(), rpc_max_response_size: RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into(), diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 59b920cc604..bcb033301fc 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -65,10 +65,20 @@ pub struct TxPoolArgs { #[arg(long = "txpool.minimal-protocol-fee", default_value_t = MIN_PROTOCOL_BASE_FEE)] pub minimal_protocol_basefee: u64, + /// Minimum priority fee required for transaction acceptance into the pool. + /// Transactions with priority fee below this value will be rejected. + #[arg(long = "txpool.minimum-priority-fee")] + pub minimum_priority_fee: Option, + /// The default enforced gas limit for transactions entering the pool #[arg(long = "txpool.gas-limit", default_value_t = ETHEREUM_BLOCK_GAS_LIMIT_30M)] pub enforced_gas_limit: u64, + /// Maximum gas limit for individual transactions. Transactions exceeding this limit will be + /// rejected by the transaction pool + #[arg(long = "txpool.max-tx-gas")] + pub max_tx_gas_limit: Option, + /// Price bump percentage to replace an already existing blob transaction #[arg(long = "blobpool.pricebump", default_value_t = REPLACE_BLOB_PRICE_BUMP)] pub blob_transaction_price_bump: u128, @@ -139,7 +149,9 @@ impl Default for TxPoolArgs { max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, price_bump: DEFAULT_PRICE_BUMP, minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE, + minimum_priority_fee: None, enforced_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M, + max_tx_gas_limit: None, blob_transaction_price_bump: REPLACE_BLOB_PRICE_BUMP, max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES, max_cached_entries: DEFAULT_MAX_CACHED_BLOBS, @@ -189,6 +201,7 @@ impl RethTransactionPoolConfig for TxPoolArgs { replace_blob_tx_price_bump: self.blob_transaction_price_bump, }, minimal_protocol_basefee: self.minimal_protocol_basefee, + minimum_priority_fee: self.minimum_priority_fee, gas_limit: self.enforced_gas_limit, pending_tx_listener_buffer_size: self.pending_tx_listener_buffer_size, new_tx_listener_buffer_size: self.new_tx_listener_buffer_size, diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index b1998110a33..f2962e0f236 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -14,6 +14,7 @@ use alloy_primitives::{BlockNumber, B256}; use eyre::eyre; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_config::config::PruneConfig; +use reth_engine_local::MiningMode; use reth_ethereum_forks::{EthereumHardforks, Head}; use reth_network_p2p::headers::client::HeadersClient; use reth_primitives_traits::SealedHeader; @@ -22,6 +23,7 @@ use reth_storage_api::{ BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader, }; use reth_storage_errors::provider::ProviderResult; +use reth_transaction_pool::TransactionPool; use serde::{de::DeserializeOwned, Serialize}; use std::{ fs, @@ -490,6 +492,15 @@ impl NodeConfig { era: self.era, } } + + /// Returns the [`MiningMode`] intended for --dev mode. + pub fn dev_mining_mode(&self, pool: impl TransactionPool) -> MiningMode { + if let Some(interval) = self.dev.block_time { + MiningMode::interval(interval) + } else { + MiningMode::instant(pool) + } + } } impl Default for NodeConfig { diff --git a/crates/node/ethstats/Cargo.toml b/crates/node/ethstats/Cargo.toml new file mode 100644 index 00000000000..6ffad317702 --- /dev/null +++ b/crates/node/ethstats/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "reth-node-ethstats" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-network-api.workspace = true +reth-transaction-pool.workspace = true +reth-primitives-traits.workspace = true +reth-storage-api.workspace = true +reth-chain-state.workspace = true + +alloy-primitives.workspace = true +alloy-consensus.workspace = true + +tokio.workspace = true +tokio-tungstenite = { workspace = true, features = ["rustls-tls-native-roots"] } +futures-util.workspace = true +tokio-stream.workspace = true + +serde.workspace = true +serde_json.workspace = true + +tracing.workspace = true +url.workspace = true +chrono.workspace = true +thiserror = { workspace = true, features = ["std"] } diff --git a/crates/node/ethstats/src/connection.rs b/crates/node/ethstats/src/connection.rs new file mode 100644 index 00000000000..049788dccc3 --- /dev/null +++ b/crates/node/ethstats/src/connection.rs @@ -0,0 +1,67 @@ +/// Abstractions for managing `WebSocket` connections in the ethstats service. +use crate::error::ConnectionError; +use futures_util::{ + stream::{SplitSink, SplitStream}, + SinkExt, StreamExt, +}; +use serde_json::Value; +use std::sync::Arc; +use tokio::{net::TcpStream, sync::Mutex}; +use tokio_tungstenite::{ + tungstenite::protocol::{frame::Utf8Bytes, Message}, + MaybeTlsStream, WebSocketStream, +}; + +/// Type alias for a `WebSocket` stream that may be TLS or plain TCP +pub(crate) type WsStream = WebSocketStream>; + +/// Wrapper for a thread-safe, asynchronously accessible `WebSocket` connection +#[derive(Debug, Clone)] +pub(crate) struct ConnWrapper { + /// Write-only part of the `WebSocket` stream + writer: Arc>>, + /// Read-only part of the `WebSocket` stream + reader: Arc>>, +} + +impl ConnWrapper { + /// Create a new connection wrapper from a `WebSocket` stream + pub(crate) fn new(stream: WsStream) -> Self { + let (writer, reader) = stream.split(); + + Self { writer: Arc::new(Mutex::new(writer)), reader: Arc::new(Mutex::new(reader)) } + } + + /// Write a JSON string as a text message to the `WebSocket` + pub(crate) async fn write_json(&self, value: &str) -> Result<(), ConnectionError> { + let mut writer = self.writer.lock().await; + writer.send(Message::Text(Utf8Bytes::from(value))).await?; + + Ok(()) + } + + /// Read the next JSON text message from the `WebSocket` + /// + /// Waits for the next text message, parses it as JSON, and returns the value. + /// Ignores non-text messages. Returns an error if the connection is closed or if parsing fails. + pub(crate) async fn read_json(&self) -> Result { + let mut reader = self.reader.lock().await; + while let Some(msg) = reader.next().await { + match msg? { + Message::Text(text) => return Ok(serde_json::from_str(&text)?), + Message::Close(_) => return Err(ConnectionError::ConnectionClosed), + _ => {} // Ignore non-text messages + } + } + + Err(ConnectionError::ConnectionClosed) + } + + /// Close the `WebSocket` connection gracefully + pub(crate) async fn close(&self) -> Result<(), ConnectionError> { + let mut writer = self.writer.lock().await; + writer.close().await?; + + Ok(()) + } +} diff --git a/crates/node/ethstats/src/credentials.rs b/crates/node/ethstats/src/credentials.rs new file mode 100644 index 00000000000..cf2adb785e8 --- /dev/null +++ b/crates/node/ethstats/src/credentials.rs @@ -0,0 +1,47 @@ +use crate::error::EthStatsError; +use std::str::FromStr; + +/// Credentials for connecting to an `EthStats` server +/// +/// Contains the node identifier, authentication secret, and server host +/// information needed to establish a connection with the `EthStats` service. +#[derive(Debug, Clone)] +pub(crate) struct EthstatsCredentials { + /// Unique identifier for this node in the `EthStats` network + pub node_id: String, + /// Authentication secret for the `EthStats` server + pub secret: String, + /// Host address of the `EthStats` server + pub host: String, +} + +impl FromStr for EthstatsCredentials { + type Err = EthStatsError; + + /// Parse credentials from a string in the format "`node_id:secret@host`" + /// + /// # Arguments + /// * `s` - String containing credentials in the format "`node_id:secret@host`" + /// + /// # Returns + /// * `Ok(EthstatsCredentials)` - Successfully parsed credentials + /// * `Err(EthStatsError::InvalidUrl)` - Invalid format or missing separators + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.split('@').collect(); + if parts.len() != 2 { + return Err(EthStatsError::InvalidUrl("Missing '@' separator".to_string())); + } + let creds = parts[0]; + let host = parts[1].to_string(); + let creds_parts: Vec<&str> = creds.split(':').collect(); + if creds_parts.len() != 2 { + return Err(EthStatsError::InvalidUrl( + "Missing ':' separator in credentials".to_string(), + )); + } + let node_id = creds_parts[0].to_string(); + let secret = creds_parts[1].to_string(); + + Ok(Self { node_id, secret, host }) + } +} diff --git a/crates/node/ethstats/src/error.rs b/crates/node/ethstats/src/error.rs new file mode 100644 index 00000000000..fff9bf5306a --- /dev/null +++ b/crates/node/ethstats/src/error.rs @@ -0,0 +1,69 @@ +use thiserror::Error; + +/// Errors that can occur during `WebSocket` connection handling +#[derive(Debug, Error)] +pub enum ConnectionError { + /// The `WebSocket` connection was closed unexpectedly + #[error("Connection closed")] + ConnectionClosed, + + /// Error occurred during JSON serialization/deserialization + #[error("Serialization error: {0}")] + Serialization(#[from] serde_json::Error), + + /// Error occurred during `WebSocket` communication + #[error("WebSocket error: {0}")] + WebSocket(#[from] tokio_tungstenite::tungstenite::Error), +} + +/// Main error type for the `EthStats` client +/// +/// This enum covers all possible errors that can occur when interacting +/// with an `EthStats` server, including connection issues, authentication +/// problems, data fetching errors, and various I/O operations. +#[derive(Debug, Error)] +pub enum EthStatsError { + /// The provided URL is invalid or malformed + #[error("Invalid URL: {0}")] + InvalidUrl(String), + + /// Error occurred during connection establishment or maintenance + #[error("Connection error: {0}")] + ConnectionError(#[from] ConnectionError), + + /// Authentication failed with the `EthStats` server + #[error("Authentication error: {0}")] + AuthError(String), + + /// Attempted to perform an operation while not connected to the server + #[error("Not connected to server")] + NotConnected, + + /// Error occurred during JSON serialization or deserialization + #[error("Serialization error: {0}")] + Serialization(#[from] serde_json::Error), + + /// Error occurred during `WebSocket` communication + #[error("WebSocket error: {0}")] + WebSocket(#[from] tokio_tungstenite::tungstenite::Error), + + /// Operation timed out + #[error("Timeout error")] + Timeout, + + /// Error occurred while parsing a URL + #[error("URL parsing error: {0}")] + Url(#[from] url::ParseError), + + /// Requested block was not found in the blockchain + #[error("Block not found: {0}")] + BlockNotFound(u64), + + /// Error occurred while fetching data from the blockchain or server + #[error("Data fetch error: {0}")] + DataFetchError(String), + + /// The request sent to the server was invalid or malformed + #[error("Inivalid request")] + InvalidRequest, +} diff --git a/crates/node/ethstats/src/ethstats.rs b/crates/node/ethstats/src/ethstats.rs new file mode 100644 index 00000000000..aea8a160fc0 --- /dev/null +++ b/crates/node/ethstats/src/ethstats.rs @@ -0,0 +1,823 @@ +use crate::{ + connection::ConnWrapper, + credentials::EthstatsCredentials, + error::EthStatsError, + events::{ + AuthMsg, BlockMsg, BlockStats, HistoryMsg, LatencyMsg, NodeInfo, NodeStats, PendingMsg, + PendingStats, PingMsg, StatsMsg, TxStats, UncleStats, + }, +}; +use alloy_consensus::{BlockHeader, Sealable}; +use alloy_primitives::U256; +use reth_chain_state::{CanonStateNotification, CanonStateSubscriptions}; +use reth_network_api::{NetworkInfo, Peers}; +use reth_primitives_traits::{Block, BlockBody}; +use reth_storage_api::{BlockReader, BlockReaderIdExt, NodePrimitivesProvider}; +use reth_transaction_pool::TransactionPool; + +use chrono::Local; +use serde_json::Value; +use std::{ + str::FromStr, + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::{ + sync::{mpsc, Mutex, RwLock}, + time::{interval, sleep, timeout}, +}; +use tokio_stream::StreamExt; +use tokio_tungstenite::connect_async; +use tracing::{debug, info}; +use url::Url; + +/// Number of historical blocks to include in a history update sent to the `EthStats` server +const HISTORY_UPDATE_RANGE: u64 = 50; +/// Duration to wait before attempting to reconnect to the `EthStats` server +const RECONNECT_INTERVAL: Duration = Duration::from_secs(5); +/// Maximum time to wait for a ping response from the server +const PING_TIMEOUT: Duration = Duration::from_secs(5); +/// Interval between regular stats reports to the server +const REPORT_INTERVAL: Duration = Duration::from_secs(15); +/// Maximum time to wait for initial connection establishment +const CONNECT_TIMEOUT: Duration = Duration::from_secs(10); +/// Maximum time to wait for reading messages from the server +const READ_TIMEOUT: Duration = Duration::from_secs(30); + +/// Main service for interacting with an `EthStats` server +/// +/// This service handles all communication with the `EthStats` server including +/// authentication, stats reporting, block notifications, and connection management. +/// It maintains a persistent `WebSocket` connection and automatically reconnects +/// when the connection is lost. +#[derive(Debug)] +pub struct EthStatsService { + /// Authentication credentials for the `EthStats` server + credentials: EthstatsCredentials, + /// `WebSocket` connection wrapper, wrapped in `Arc` for shared access + conn: Arc>>, + /// Timestamp of the last ping sent to the server + last_ping: Arc>>, + /// Network interface for getting peer and sync information + network: Network, + /// Blockchain provider for reading block data and state + provider: Provider, + /// Transaction pool for getting pending transaction statistics + pool: Pool, +} + +impl EthStatsService +where + Network: NetworkInfo + Peers, + Provider: BlockReaderIdExt + CanonStateSubscriptions, + Pool: TransactionPool, +{ + /// Create a new `EthStats` service and establish initial connection + /// + /// # Arguments + /// * `url` - Connection string in format "`node_id:secret@host`" + /// * `network` - Network interface implementation + /// * `provider` - Blockchain provider implementation + /// * `pool` - Transaction pool implementation + pub async fn new( + url: &str, + network: Network, + provider: Provider, + pool: Pool, + ) -> Result { + let credentials = EthstatsCredentials::from_str(url)?; + let service = Self { + credentials, + conn: Arc::new(RwLock::new(None)), + last_ping: Arc::new(Mutex::new(None)), + network, + provider, + pool, + }; + service.connect().await?; + + Ok(service) + } + + /// Establish `WebSocket` connection to the `EthStats` server + /// + /// Attempts to connect to the server using the credentials and handles + /// connection timeouts and errors. + async fn connect(&self) -> Result<(), EthStatsError> { + debug!( + target: "ethstats", + "Attempting to connect to EthStats server at {}", self.credentials.host + ); + let full_url = format!("ws://{}/api", self.credentials.host); + let url = Url::parse(&full_url) + .map_err(|e| EthStatsError::InvalidUrl(format!("Invalid URL: {full_url} - {e}")))?; + + match timeout(CONNECT_TIMEOUT, connect_async(url.to_string())).await { + Ok(Ok((ws_stream, _))) => { + debug!( + target: "ethstats", + "Successfully connected to EthStats server at {}", self.credentials.host + ); + let conn: ConnWrapper = ConnWrapper::new(ws_stream); + *self.conn.write().await = Some(conn.clone()); + self.login().await?; + Ok(()) + } + Ok(Err(e)) => Err(EthStatsError::InvalidUrl(e.to_string())), + Err(_) => { + debug!(target: "ethstats", "Connection to EthStats server timed out"); + Err(EthStatsError::Timeout) + } + } + } + + /// Authenticate with the `EthStats` server + /// + /// Sends authentication credentials and node information to the server + /// and waits for a successful acknowledgment. + async fn login(&self) -> Result<(), EthStatsError> { + debug!( + target: "ethstats", + "Attempting to login to EthStats server as node_id {}", self.credentials.node_id + ); + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let network_status = self + .network + .network_status() + .await + .map_err(|e| EthStatsError::AuthError(e.to_string()))?; + let id = &self.credentials.node_id; + let secret = &self.credentials.secret; + let protocol = network_status + .capabilities + .iter() + .map(|cap| format!("{}/{}", cap.name, cap.version)) + .collect::>() + .join(", "); + let port = self.network.local_addr().port() as u64; + + let auth = AuthMsg { + id: id.clone(), + secret: secret.clone(), + info: NodeInfo { + name: id.clone(), + node: network_status.client_version.clone(), + port, + network: self.network.chain_id().to_string(), + protocol, + api: "No".to_string(), + os: std::env::consts::OS.into(), + os_ver: std::env::consts::ARCH.into(), + client: "0.1.1".to_string(), + history: true, + }, + }; + + let message = auth.generate_login_message(); + conn.write_json(&message).await?; + + let response = + timeout(READ_TIMEOUT, conn.read_json()).await.map_err(|_| EthStatsError::Timeout)??; + + if let Some(ack) = response.get("emit") { + if ack.get(0) == Some(&Value::String("ready".to_string())) { + info!( + target: "ethstats", + "Login successful to EthStats server as node_id {}", self.credentials.node_id + ); + return Ok(()); + } + } + + debug!(target: "ethstats", "Login failed: Unauthorized or unexpected login response"); + Err(EthStatsError::AuthError("Unauthorized or unexpected login response".into())) + } + + /// Report current node statistics to the `EthStats` server + /// + /// Sends information about the node's current state including sync status, + /// peer count, and uptime. + async fn report_stats(&self) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let stats_msg = StatsMsg { + id: self.credentials.node_id.clone(), + stats: NodeStats { + active: true, + syncing: self.network.is_syncing(), + peers: self.network.num_connected_peers() as u64, + gas_price: 0, // TODO + uptime: 100, + }, + }; + + let message = stats_msg.generate_stats_message(); + conn.write_json(&message).await?; + + Ok(()) + } + + /// Send a ping message to the `EthStats` server + /// + /// Records the ping time and starts a timeout task to detect if the server + /// doesn't respond within the expected timeframe. + async fn send_ping(&self) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let ping_time = Instant::now(); + *self.last_ping.lock().await = Some(ping_time); + + let client_time = Local::now().format("%Y-%m-%d %H:%M:%S%.f %:z %Z").to_string(); + let ping_msg = PingMsg { id: self.credentials.node_id.clone(), client_time }; + + let message = ping_msg.generate_ping_message(); + conn.write_json(&message).await?; + + // Start ping timeout + let active_ping = self.last_ping.clone(); + let conn_ref = self.conn.clone(); + tokio::spawn(async move { + sleep(PING_TIMEOUT).await; + let mut active = active_ping.lock().await; + if active.is_some() { + debug!(target: "ethstats", "Ping timeout"); + *active = None; + // Clear connection to trigger reconnect + if let Some(conn) = conn_ref.write().await.take() { + let _ = conn.close().await; + } + } + }); + + Ok(()) + } + + /// Report latency measurement to the `EthStats` server + /// + /// Calculates the round-trip time from the last ping and sends it to + /// the server. This is called when a pong response is received. + async fn report_latency(&self) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let mut active = self.last_ping.lock().await; + if let Some(start) = active.take() { + let latency = start.elapsed().as_millis() as u64 / 2; + + debug!(target: "ethstats", "Reporting latency: {}ms", latency); + + let latency_msg = LatencyMsg { id: self.credentials.node_id.clone(), latency }; + + let message = latency_msg.generate_latency_message(); + conn.write_json(&message).await? + } + + Ok(()) + } + + /// Report pending transaction count to the `EthStats` server + /// + /// Gets the current number of pending transactions from the pool and + /// sends this information to the server. + async fn report_pending(&self) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + let pending = self.pool.pool_size().pending as u64; + + debug!(target: "ethstats", "Reporting pending txs: {}", pending); + + let pending_msg = + PendingMsg { id: self.credentials.node_id.clone(), stats: PendingStats { pending } }; + + let message = pending_msg.generate_pending_message(); + conn.write_json(&message).await?; + + Ok(()) + } + + /// Report block information to the `EthStats` server + /// + /// Fetches block data either from a canonical state notification or + /// the current best block, converts it to stats format, and sends + /// it to the server. + /// + /// # Arguments + /// * `head` - Optional canonical state notification containing new block info + async fn report_block( + &self, + head: Option::Primitives>>, + ) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let block_number = if let Some(head) = head { + head.tip().header().number() + } else { + self.provider + .best_block_number() + .map_err(|e| EthStatsError::DataFetchError(e.to_string()))? + }; + + match self.provider.block_by_id(block_number.into()) { + Ok(Some(block)) => { + let block_msg = BlockMsg { + id: self.credentials.node_id.clone(), + block: self.block_to_stats(&block)?, + }; + + debug!(target: "ethstats", "Reporting block: {}", block_number); + + let message = block_msg.generate_block_message(); + conn.write_json(&message).await?; + } + Ok(None) => { + // Block not found, stop fetching + debug!(target: "ethstats", "Block {} not found", block_number); + return Err(EthStatsError::BlockNotFound(block_number)); + } + Err(e) => { + debug!(target: "ethstats", "Error fetching block {}: {}", block_number, e); + return Err(EthStatsError::DataFetchError(e.to_string())); + } + }; + + Ok(()) + } + + /// Convert a block to `EthStats` block statistics format + /// + /// Extracts relevant information from a block and formats it according + /// to the `EthStats` protocol specification. + /// + /// # Arguments + /// * `block` - The block to convert + fn block_to_stats( + &self, + block: &::Block, + ) -> Result { + let body = block.body(); + let header = block.header(); + + let txs = body.transaction_hashes_iter().copied().map(|hash| TxStats { hash }).collect(); + + Ok(BlockStats { + number: U256::from(header.number()), + hash: header.hash_slow(), + parent_hash: header.parent_hash(), + timestamp: U256::from(header.timestamp()), + miner: header.beneficiary(), + gas_used: header.gas_used(), + gas_limit: header.gas_limit(), + diff: header.difficulty().to_string(), + total_diff: "0".into(), + txs, + tx_root: header.transactions_root(), + root: header.state_root(), + uncles: UncleStats(vec![]), + }) + } + + /// Report historical block data to the `EthStats` server + /// + /// Fetches multiple blocks by their numbers and sends their statistics + /// to the server. This is typically called in response to a history + /// request from the server. + /// + /// # Arguments + /// * `list` - Vector of block numbers to fetch and report + async fn report_history(&self, list: Option<&Vec>) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let indexes = if let Some(list) = list { + list + } else { + let best_block_number = self + .provider + .best_block_number() + .map_err(|e| EthStatsError::DataFetchError(e.to_string()))?; + + let start = best_block_number.saturating_sub(HISTORY_UPDATE_RANGE); + + &(start..=best_block_number).collect() + }; + + let mut blocks = Vec::with_capacity(indexes.len()); + for &block_number in indexes { + match self.provider.block_by_id(block_number.into()) { + Ok(Some(block)) => { + blocks.push(block); + } + Ok(None) => { + // Block not found, stop fetching + debug!(target: "ethstats", "Block {} not found", block_number); + break; + } + Err(e) => { + debug!(target: "ethstats", "Error fetching block {}: {}", block_number, e); + break; + } + } + } + + let history: Vec = + blocks.iter().map(|block| self.block_to_stats(block)).collect::>()?; + + if history.is_empty() { + debug!(target: "ethstats", "No history to send to stats server"); + } else { + debug!( + target: "ethstats", + "Sending historical blocks to ethstats, first: {}, last: {}", + history.first().unwrap().number, + history.last().unwrap().number + ); + } + + let history_msg = HistoryMsg { id: self.credentials.node_id.clone(), history }; + + let message = history_msg.generate_history_message(); + conn.write_json(&message).await?; + + Ok(()) + } + + /// Send a complete status report to the `EthStats` server + /// + /// Performs all regular reporting tasks: ping, block info, pending + /// transactions, and general statistics. + async fn report(&self) -> Result<(), EthStatsError> { + self.send_ping().await?; + self.report_block(None).await?; + self.report_pending().await?; + self.report_stats().await?; + + Ok(()) + } + + /// Handle incoming messages from the `EthStats` server + /// + /// # Expected Message Variants + /// + /// This function expects messages in the following format: + /// + /// ```json + /// { "emit": [, ] } + /// ``` + /// + /// ## Supported Commands: + /// + /// - `"node-pong"`: Indicates a pong response to a previously sent ping. The payload is + /// ignored. Triggers a latency report to the server. + /// - Example: ```json { "emit": [ "node-pong", { "clientTime": "2025-07-10 12:00:00.123 + /// +00:00 UTC", "serverTime": "2025-07-10 12:00:01.456 +00:00 UTC" } ] } ``` + /// + /// - `"history"`: Requests historical block data. The payload may contain a `list` field with + /// block numbers to fetch. If `list` is not present, the default range is used. + /// - Example with list: `{ "emit": ["history", {"list": [1, 2, 3], "min": 1, "max": 3}] }` + /// - Example without list: `{ "emit": ["history", {}] }` + /// + /// ## Other Commands: + /// + /// Any other command is logged as unhandled and ignored. + async fn handle_message(&self, msg: Value) -> Result<(), EthStatsError> { + let emit = match msg.get("emit") { + Some(emit) => emit, + None => { + debug!(target: "ethstats", "Stats server sent non-broadcast, msg {}", msg); + return Err(EthStatsError::InvalidRequest); + } + }; + + let command = match emit.get(0) { + Some(Value::String(command)) => command.as_str(), + _ => { + debug!(target: "ethstats", "Invalid stats server message type, msg {}", msg); + return Err(EthStatsError::InvalidRequest); + } + }; + + match command { + "node-pong" => { + self.report_latency().await?; + } + "history" => { + let block_numbers = emit + .get(1) + .and_then(|v| v.as_object()) + .and_then(|obj| obj.get("list")) + .and_then(|v| v.as_array()); + + if block_numbers.is_none() { + self.report_history(None).await?; + + return Ok(()); + } + + let block_numbers = block_numbers + .unwrap() + .iter() + .map(|val| { + val.as_u64().ok_or_else(|| { + debug!( + target: "ethstats", + "Invalid stats history block number, msg {}", msg + ); + EthStatsError::InvalidRequest + }) + }) + .collect::>()?; + + self.report_history(Some(&block_numbers)).await?; + } + other => debug!(target: "ethstats", "Unhandled command: {}", other), + } + + Ok(()) + } + + /// Main service loop that handles all `EthStats` communication + /// + /// This method runs the main event loop that: + /// - Maintains the `WebSocket` connection + /// - Handles incoming messages from the server + /// - Reports statistics at regular intervals + /// - Processes new block notifications + /// - Automatically reconnects when the connection is lost + /// + /// The service runs until explicitly shut down or an unrecoverable + /// error occurs. + pub async fn run(self) { + // Create channels for internal communication + let (shutdown_tx, mut shutdown_rx) = mpsc::channel(1); + let (message_tx, mut message_rx) = mpsc::channel(32); + let (head_tx, mut head_rx) = mpsc::channel(10); + + // Start the read loop in a separate task + let read_handle = { + let conn = self.conn.clone(); + let message_tx = message_tx.clone(); + let shutdown_tx = shutdown_tx.clone(); + + tokio::spawn(async move { + loop { + let conn = conn.read().await; + if let Some(conn) = conn.as_ref() { + match conn.read_json().await { + Ok(msg) => { + if message_tx.send(msg).await.is_err() { + break; + } + } + Err(e) => { + debug!(target: "ethstats", "Read error: {}", e); + break; + } + } + } else { + sleep(RECONNECT_INTERVAL).await; + } + } + + let _ = shutdown_tx.send(()).await; + }) + }; + + let canonical_stream_handle = { + let mut canonical_stream = self.provider.canonical_state_stream(); + let head_tx = head_tx.clone(); + let shutdown_tx = shutdown_tx.clone(); + + tokio::spawn(async move { + loop { + let head = canonical_stream.next().await; + if let Some(head) = head { + if head_tx.send(head).await.is_err() { + break; + } + } + } + + let _ = shutdown_tx.send(()).await; + }) + }; + + let mut pending_tx_receiver = self.pool.pending_transactions_listener(); + + // Set up intervals + let mut report_interval = interval(REPORT_INTERVAL); + let mut reconnect_interval = interval(RECONNECT_INTERVAL); + + // Main event loop using select! + loop { + tokio::select! { + // Handle shutdown signal + _ = shutdown_rx.recv() => { + info!(target: "ethstats", "Shutting down ethstats service"); + break; + } + + // Handle messages from the read loop + Some(msg) = message_rx.recv() => { + if let Err(e) = self.handle_message(msg).await { + debug!(target: "ethstats", "Error handling message: {}", e); + self.disconnect().await; + } + } + + // Handle new block + Some(head) = head_rx.recv() => { + if let Err(e) = self.report_block(Some(head)).await { + debug!(target: "ethstats", "Failed to report block: {}", e); + self.disconnect().await; + } + + if let Err(e) = self.report_pending().await { + debug!(target: "ethstats", "Failed to report pending: {}", e); + self.disconnect().await; + } + } + + // Handle new pending tx + _= pending_tx_receiver.recv() => { + if let Err(e) = self.report_pending().await { + debug!(target: "ethstats", "Failed to report pending: {}", e); + self.disconnect().await; + } + } + + // Handle stats reporting + _ = report_interval.tick() => { + if let Err(e) = self.report().await { + debug!(target: "ethstats", "Failed to report: {}", e); + self.disconnect().await; + } + } + + // Handle reconnection + _ = reconnect_interval.tick(), if self.conn.read().await.is_none() => { + match self.connect().await { + Ok(_) => info!(target: "ethstats", "Reconnected successfully"), + Err(e) => debug!(target: "ethstats", "Reconnect failed: {}", e), + } + } + } + } + + // Cleanup + self.disconnect().await; + + // Cancel background tasks + read_handle.abort(); + canonical_stream_handle.abort(); + } + + /// Gracefully close the `WebSocket` connection + /// + /// Attempts to close the connection cleanly and logs any errors + /// that occur during the process. + async fn disconnect(&self) { + if let Some(conn) = self.conn.write().await.take() { + if let Err(e) = conn.close().await { + debug!(target: "ethstats", "Error closing connection: {}", e); + } + } + } + + /// Test helper to check connection status + #[cfg(test)] + pub async fn is_connected(&self) -> bool { + self.conn.read().await.is_some() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures_util::{SinkExt, StreamExt}; + use reth_network_api::noop::NoopNetwork; + use reth_storage_api::noop::NoopProvider; + use reth_transaction_pool::noop::NoopTransactionPool; + use serde_json::json; + use tokio::net::TcpListener; + use tokio_tungstenite::tungstenite::protocol::{frame::Utf8Bytes, Message}; + + const TEST_HOST: &str = "127.0.0.1"; + const TEST_PORT: u16 = 0; // Let OS choose port + + async fn setup_mock_server() -> (String, tokio::task::JoinHandle<()>) { + let listener = TcpListener::bind((TEST_HOST, TEST_PORT)).await.unwrap(); + let addr = listener.local_addr().unwrap(); + + let handle = tokio::spawn(async move { + let (stream, _) = listener.accept().await.unwrap(); + let mut ws_stream = tokio_tungstenite::accept_async(stream).await.unwrap(); + + // Handle login + if let Some(Ok(Message::Text(text))) = ws_stream.next().await { + let value: serde_json::Value = serde_json::from_str(&text).unwrap(); + if value["emit"][0] == "hello" { + let response = json!({ + "emit": ["ready", []] + }); + ws_stream + .send(Message::Text(Utf8Bytes::from(response.to_string()))) + .await + .unwrap(); + } + } + + // Handle ping + while let Some(Ok(msg)) = ws_stream.next().await { + if let Message::Text(text) = msg { + if text.contains("node-ping") { + let pong = json!({ + "emit": ["node-pong", {"id": "test-node"}] + }); + ws_stream + .send(Message::Text(Utf8Bytes::from(pong.to_string()))) + .await + .unwrap(); + } + } + } + }); + + (addr.to_string(), handle) + } + + #[tokio::test] + async fn test_connection_and_login() { + let (server_url, server_handle) = setup_mock_server().await; + let ethstats_url = format!("test-node:test-secret@{server_url}"); + + let network = NoopNetwork::default(); + let provider = NoopProvider::default(); + let pool = NoopTransactionPool::default(); + + let service = EthStatsService::new(ðstats_url, network, provider, pool) + .await + .expect("Service should connect"); + + // Verify connection was established + assert!(service.is_connected().await, "Service should be connected"); + + // Clean up server + server_handle.abort(); + } + + #[tokio::test] + async fn test_history_command_handling() { + let (server_url, server_handle) = setup_mock_server().await; + let ethstats_url = format!("test-node:test-secret@{server_url}"); + + let network = NoopNetwork::default(); + let provider = NoopProvider::default(); + let pool = NoopTransactionPool::default(); + + let service = EthStatsService::new(ðstats_url, network, provider, pool) + .await + .expect("Service should connect"); + + // Simulate receiving a history command + let history_cmd = json!({ + "emit": ["history", {"list": [1, 2, 3]}] + }); + + service.handle_message(history_cmd).await.expect("History command should be handled"); + + // Clean up server + server_handle.abort(); + } + + #[tokio::test] + async fn test_invalid_url_handling() { + let network = NoopNetwork::default(); + let provider = NoopProvider::default(); + let pool = NoopTransactionPool::default(); + + // Test missing secret + let result = EthStatsService::new( + "test-node@localhost", + network.clone(), + provider.clone(), + pool.clone(), + ) + .await; + assert!( + matches!(result, Err(EthStatsError::InvalidUrl(_))), + "Should detect invalid URL format" + ); + + // Test invalid URL format + let result = EthStatsService::new("invalid-url", network, provider, pool).await; + assert!( + matches!(result, Err(EthStatsError::InvalidUrl(_))), + "Should detect invalid URL format" + ); + } +} diff --git a/crates/node/ethstats/src/events.rs b/crates/node/ethstats/src/events.rs new file mode 100644 index 00000000000..08d0c90feb6 --- /dev/null +++ b/crates/node/ethstats/src/events.rs @@ -0,0 +1,283 @@ +//! Types for ethstats event reporting. +//! These structures define the data format used to report blockchain events to ethstats servers. + +use alloy_consensus::Header; +use alloy_primitives::{Address, B256, U256}; +use serde::{Deserialize, Serialize}; + +/// Collection of meta information about a node that is displayed on the monitoring page. +/// This information is used to identify and display node details in the ethstats monitoring +/// interface. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NodeInfo { + /// The display name of the node in the monitoring interface + pub name: String, + + /// The node's unique identifier + pub node: String, + + /// The port number the node is listening on for P2P connections + pub port: u64, + + /// The network ID the node is connected to (e.g. "1" for mainnet) + #[serde(rename = "net")] + pub network: String, + + /// Comma-separated list of supported protocols and their versions + pub protocol: String, + + /// API availability indicator ("Yes" or "No") + pub api: String, + + /// Operating system the node is running on + pub os: String, + + /// Operating system version/architecture + #[serde(rename = "os_v")] + pub os_ver: String, + + /// Client software version + pub client: String, + + /// Whether the node can provide historical block data + #[serde(rename = "canUpdateHistory")] + pub history: bool, +} + +/// Authentication message used to login to the ethstats monitoring server. +/// Contains node identification and authentication information. +#[derive(Debug, Serialize, Deserialize)] +pub struct AuthMsg { + /// The node's unique identifier + pub id: String, + + /// Detailed information about the node + pub info: NodeInfo, + + /// Secret password for authentication with the monitoring server + pub secret: String, +} + +impl AuthMsg { + /// Generate a login message for the ethstats monitoring server. + pub fn generate_login_message(&self) -> String { + serde_json::json!({ + "emit": ["hello", self] + }) + .to_string() + } +} + +/// Simplified transaction info, containing only the hash. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TxStats { + /// Transaction hash + pub hash: B256, +} + +/// Wrapper for uncle block headers. +/// This ensures empty lists serialize as `[]` instead of `null`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(transparent)] +pub struct UncleStats(pub Vec
); + +/// Information to report about individual blocks. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockStats { + /// Block number (height in the chain). + pub number: U256, + + /// Hash of this block. + pub hash: B256, + + /// Hash of the parent block. + #[serde(rename = "parentHash")] + pub parent_hash: B256, + + /// Timestamp of the block (Unix time). + pub timestamp: U256, + + /// Address of the miner who produced this block. + pub miner: Address, + + /// Total gas used by all transactions in the block. + #[serde(rename = "gasUsed")] + pub gas_used: u64, + + /// Maximum gas allowed for this block. + #[serde(rename = "gasLimit")] + pub gas_limit: u64, + + /// Difficulty for mining this block (as a decimal string). + #[serde(rename = "difficulty")] + pub diff: String, + + /// Cumulative difficulty up to this block (as a decimal string). + #[serde(rename = "totalDifficulty")] + pub total_diff: String, + + /// Simplified list of transactions in the block. + #[serde(rename = "transactions")] + pub txs: Vec, + + /// Root hash of all transactions (Merkle root). + #[serde(rename = "transactionsRoot")] + pub tx_root: B256, + + /// State root after applying this block. + #[serde(rename = "stateRoot")] + pub root: B256, + + /// List of uncle block headers. + pub uncles: UncleStats, +} + +/// Message containing a block to be reported to the ethstats monitoring server. +#[derive(Debug, Serialize, Deserialize)] +pub struct BlockMsg { + /// The node's unique identifier + pub id: String, + + /// The block to report + pub block: BlockStats, +} + +impl BlockMsg { + /// Generate a block message for the ethstats monitoring server. + pub fn generate_block_message(&self) -> String { + serde_json::json!({ + "emit": ["block", self] + }) + .to_string() + } +} + +/// Message containing historical block data to be reported to the ethstats monitoring server. +#[derive(Debug, Serialize, Deserialize)] +pub struct HistoryMsg { + /// The node's unique identifier + pub id: String, + + /// The historical block data to report + pub history: Vec, +} + +impl HistoryMsg { + /// Generate a history message for the ethstats monitoring server. + pub fn generate_history_message(&self) -> String { + serde_json::json!({ + "emit": ["history", self] + }) + .to_string() + } +} + +/// Message containing pending transaction statistics to be reported to the ethstats monitoring +/// server. +#[derive(Debug, Serialize, Deserialize)] +pub struct PendingStats { + /// Number of pending transactions + pub pending: u64, +} + +/// Message containing pending transaction statistics to be reported to the ethstats monitoring +/// server. +#[derive(Debug, Serialize, Deserialize)] +pub struct PendingMsg { + /// The node's unique identifier + pub id: String, + + /// The pending transaction statistics to report + pub stats: PendingStats, +} + +impl PendingMsg { + /// Generate a pending message for the ethstats monitoring server. + pub fn generate_pending_message(&self) -> String { + serde_json::json!({ + "emit": ["pending", self] + }) + .to_string() + } +} + +/// Information reported about the local node. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NodeStats { + /// Whether the node is active + pub active: bool, + + /// Whether the node is currently syncing + pub syncing: bool, + + /// Number of connected peers + pub peers: u64, + + /// Current gas price in wei + #[serde(rename = "gasPrice")] + pub gas_price: u64, + + /// Node uptime percentage + pub uptime: u64, +} + +/// Message containing node statistics to be reported to the ethstats monitoring server. +#[derive(Debug, Serialize, Deserialize)] +pub struct StatsMsg { + /// The node's unique identifier + pub id: String, + + /// The stats to report + pub stats: NodeStats, +} + +impl StatsMsg { + /// Generate a stats message for the ethstats monitoring server. + pub fn generate_stats_message(&self) -> String { + serde_json::json!({ + "emit": ["stats", self] + }) + .to_string() + } +} + +/// Latency report message used to report network latency to the ethstats monitoring server. +#[derive(Serialize, Deserialize, Debug)] +pub struct LatencyMsg { + /// The node's unique identifier + pub id: String, + + /// The latency to report in milliseconds + pub latency: u64, +} + +impl LatencyMsg { + /// Generate a latency message for the ethstats monitoring server. + pub fn generate_latency_message(&self) -> String { + serde_json::json!({ + "emit": ["latency", self] + }) + .to_string() + } +} + +/// Ping message sent to the ethstats monitoring server to initiate latency measurement. +#[derive(Serialize, Deserialize, Debug)] +pub struct PingMsg { + /// The node's unique identifier + pub id: String, + + /// Client timestamp when the ping was sent + #[serde(rename = "clientTime")] + pub client_time: String, +} + +impl PingMsg { + /// Generate a ping message for the ethstats monitoring server. + pub fn generate_ping_message(&self) -> String { + serde_json::json!({ + "emit": ["node-ping", self] + }) + .to_string() + } +} diff --git a/crates/node/ethstats/src/lib.rs b/crates/node/ethstats/src/lib.rs new file mode 100644 index 00000000000..b2cd03243a0 --- /dev/null +++ b/crates/node/ethstats/src/lib.rs @@ -0,0 +1,30 @@ +//! +//! `EthStats` client support for Reth. +//! +//! This crate provides the necessary components to connect to, authenticate with, and report +//! node and network statistics to an `EthStats` server. It includes abstractions for `WebSocket` +//! connections, error handling, event/message types, and the main `EthStats` service logic. +//! +//! - `connection`: `WebSocket` connection management and utilities +//! - `error`: Error types for connection and `EthStats` operations +//! - `ethstats`: Main service logic for `EthStats` client +//! - `events`: Data structures for `EthStats` protocol messages + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod connection; +mod credentials; + +mod error; + +mod ethstats; +pub use ethstats::*; + +mod events; +pub use events::*; diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index 80de3edcb70..55201164701 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -44,11 +44,10 @@ miniz_oxide = { workspace = true, features = ["with-alloc"], optional = true } derive_more.workspace = true paste = { workspace = true, optional = true } thiserror = { workspace = true, optional = true } +op-alloy-consensus.workspace = true [dev-dependencies] reth-chainspec = { workspace = true, features = ["test-utils"] } -alloy-genesis.workspace = true -op-alloy-rpc-types.workspace = true [features] default = ["std"] @@ -71,6 +70,7 @@ std = [ "serde?/std", "miniz_oxide?/std", "thiserror?/std", + "op-alloy-consensus/std", ] serde = [ "alloy-chains/serde", @@ -84,4 +84,5 @@ serde = [ "reth-optimism-forks/serde", "reth-optimism-primitives/serde", "reth-primitives-traits/serde", + "op-alloy-consensus/serde", ] diff --git a/crates/optimism/chainspec/src/basefee.rs b/crates/optimism/chainspec/src/basefee.rs new file mode 100644 index 00000000000..b28c0c478d0 --- /dev/null +++ b/crates/optimism/chainspec/src/basefee.rs @@ -0,0 +1,29 @@ +//! Base fee related utilities for Optimism chains. + +use alloy_consensus::BlockHeader; +use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; +use reth_chainspec::{BaseFeeParams, EthChainSpec}; +use reth_optimism_forks::OpHardforks; + +/// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header. +/// +/// Caution: Caller must ensure that holocene is active in the parent header. +/// +/// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) +pub fn decode_holocene_base_fee( + chain_spec: impl EthChainSpec + OpHardforks, + parent: &H, + timestamp: u64, +) -> Result +where + H: BlockHeader, +{ + let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?; + let base_fee_params = if elasticity == 0 && denominator == 0 { + chain_spec.base_fee_params_at_timestamp(timestamp) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128) + }; + + Ok(parent.next_block_base_fee(base_fee_params).unwrap_or_default()) +} diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index ee5cd27a139..8ec95381ddb 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -34,6 +34,7 @@ extern crate alloc; mod base; mod base_sepolia; +mod basefee; pub mod constants; mod dev; @@ -47,6 +48,7 @@ pub use superchain::*; pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; +pub use basefee::*; pub use dev::OP_DEV; pub use op::OP_MAINNET; pub use op_sepolia::OP_SEPOLIA; @@ -56,7 +58,7 @@ pub use reth_optimism_forks::*; use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; -use alloy_consensus::{proofs::storage_root_unhashed, Header}; +use alloy_consensus::{proofs::storage_root_unhashed, BlockHeader, Header}; use alloy_eips::eip7840::BlobParams; use alloy_genesis::Genesis; use alloy_hardforks::Hardfork; @@ -287,6 +289,14 @@ impl EthChainSpec for OpChainSpec { fn final_paris_total_difficulty(&self) -> Option { self.inner.final_paris_total_difficulty() } + + fn next_block_base_fee(&self, parent: &Header, target_timestamp: u64) -> Option { + if self.is_holocene_active_at_timestamp(parent.timestamp()) { + decode_holocene_base_fee(self, parent, target_timestamp).ok() + } else { + self.inner.next_block_base_fee(parent, target_timestamp) + } + } } impl EthereumCapabilities for OpChainSpec {} diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 1661c3be476..0da12c42b02 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -68,8 +68,6 @@ op-alloy-consensus.workspace = true [dev-dependencies] tempfile.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } -reth-db-common.workspace = true -reth-cli-commands.workspace = true [build-dependencies] reth-optimism-chainspec = { workspace = true, features = ["std", "superchain-configs"] } diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 1c7af0d328c..e0774068b7e 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -8,7 +8,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_node::{OpExecutorProvider, OpNode}; use reth_tracing::{FileWorkerGuard, Layers}; -use std::fmt; +use std::{fmt, sync::Arc}; use tracing::info; /// A wrapper around a parsed CLI that handles command execution. @@ -65,6 +65,10 @@ where // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); + let components = |spec: Arc| { + (OpExecutorProvider::optimism(spec.clone()), OpBeaconConsensus::new(spec)) + }; + match self.cli.command { Commands::Node(command) => { runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) @@ -83,11 +87,9 @@ where } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), - Commands::Stage(command) => runner.run_command_until_exit(|ctx| { - command.execute::(ctx, |spec| { - (OpExecutorProvider::optimism(spec.clone()), OpBeaconConsensus::new(spec)) - }) - }), + Commands::Stage(command) => { + runner.run_command_until_exit(|ctx| command.execute::(ctx, components)) + } Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Recover(command) => { @@ -96,6 +98,9 @@ where Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), #[cfg(feature = "dev")] Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), + Commands::ReExecute(command) => { + runner.run_until_ctrl_c(command.execute::(components)) + } } } diff --git a/crates/optimism/cli/src/commands/mod.rs b/crates/optimism/cli/src/commands/mod.rs index 515307c9ddb..161aa1d0bab 100644 --- a/crates/optimism/cli/src/commands/mod.rs +++ b/crates/optimism/cli/src/commands/mod.rs @@ -7,7 +7,7 @@ use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::{ config_cmd, db, dump_genesis, init_cmd, node::{self, NoArgs}, - p2p, prune, recover, stage, + p2p, prune, re_execute, recover, stage, }; use std::{fmt, sync::Arc}; @@ -62,6 +62,9 @@ pub enum Commands), } impl< @@ -86,6 +89,7 @@ impl< Self::ImportReceiptsOp(cmd) => cmd.chain_spec(), #[cfg(feature = "dev")] Self::TestVectors(_) => None, + Self::ReExecute(cmd) => cmd.chain_spec(), } } } diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 963e409b1dc..90669099f88 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -122,7 +122,9 @@ where { let mut this = self.configure(); this.set_runner(runner); - this.run(FnLauncher::new::(launcher)) + this.run(FnLauncher::new::(async move |builder, chain_spec| { + launcher(builder, chain_spec).await + })) } } diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 92e1642b5ba..e681112eea0 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -32,23 +32,22 @@ alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-trie.workspace = true revm.workspace = true -op-alloy-consensus.workspace = true # misc tracing.workspace = true thiserror.workspace = true +reth-optimism-chainspec.workspace = true [dev-dependencies] reth-provider = { workspace = true, features = ["test-utils"] } reth-db-common.workspace = true reth-revm.workspace = true reth-trie.workspace = true -reth-optimism-chainspec.workspace = true reth-optimism-node.workspace = true reth-db-api = { workspace = true, features = ["op"] } alloy-chains.workspace = true -alloy-primitives.workspace = true + op-alloy-consensus.workspace = true [features] @@ -69,10 +68,10 @@ std = [ "alloy-primitives/std", "alloy-consensus/std", "alloy-trie/std", - "op-alloy-consensus/std", "reth-revm/std", "revm/std", "tracing/std", "thiserror/std", "reth-execution-types/std", + "op-alloy-consensus/std", ] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 3e4201dc73b..5e256593ef0 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -34,9 +34,7 @@ mod proof; pub use proof::calculate_receipt_root_no_memo_optimism; pub mod validation; -pub use validation::{ - canyon, decode_holocene_base_fee, isthmus, next_block_base_fee, validate_block_post_execution, -}; +pub use validation::{canyon, isthmus, validate_block_post_execution}; pub mod error; pub use error::OpConsensusError; @@ -178,29 +176,11 @@ where validate_against_parent_timestamp(header.header(), parent.header())?; } - // EIP1559 base fee validation - // - // > if Holocene is active in parent_header.timestamp, then the parameters from - // > parent_header.extraData are used. - if self.chain_spec.is_holocene_active_at_timestamp(parent.timestamp()) { - let header_base_fee = - header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; - let expected_base_fee = - decode_holocene_base_fee(&self.chain_spec, parent.header(), header.timestamp()) - .map_err(|_| ConsensusError::BaseFeeMissing)?; - if expected_base_fee != header_base_fee { - return Err(ConsensusError::BaseFeeDiff(GotExpected { - expected: expected_base_fee, - got: header_base_fee, - })) - } - } else { - validate_against_parent_eip1559_base_fee( - header.header(), - parent.header(), - &self.chain_spec, - )?; - } + validate_against_parent_eip1559_base_fee( + header.header(), + parent.header(), + &self.chain_spec, + )?; // ensure that the blob gas fields for this block if let Some(blob_params) = self.chain_spec.blob_params_at_timestamp(header.timestamp()) { diff --git a/crates/optimism/consensus/src/validation/mod.rs b/crates/optimism/consensus/src/validation/mod.rs index 4977647d89c..0846572a3d9 100644 --- a/crates/optimism/consensus/src/validation/mod.rs +++ b/crates/optimism/consensus/src/validation/mod.rs @@ -3,13 +3,15 @@ pub mod canyon; pub mod isthmus; +// Re-export the decode_holocene_base_fee function for compatibility +pub use reth_optimism_chainspec::decode_holocene_base_fee; + use crate::proof::calculate_receipt_root_optimism; use alloc::vec::Vec; use alloy_consensus::{BlockHeader, TxReceipt, EMPTY_OMMER_ROOT_HASH}; -use alloy_primitives::{Bloom, B256}; +use alloy_eips::Encodable2718; +use alloy_primitives::{Bloom, Bytes, B256}; use alloy_trie::EMPTY_ROOT_HASH; -use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; -use reth_chainspec::{BaseFeeParams, EthChainSpec}; use reth_consensus::ConsensusError; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::DepositReceipt; @@ -99,6 +101,10 @@ pub fn validate_block_post_execution( chain_spec, header.timestamp(), ) { + let receipts = receipts + .iter() + .map(|r| Bytes::from(r.with_bloom_ref().encoded_2718())) + .collect::>(); tracing::debug!(%error, ?receipts, "receipts verification failed"); return Err(error) } @@ -166,51 +172,13 @@ fn compare_receipts_root_and_logs_bloom( Ok(()) } -/// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header. -/// -/// Caution: Caller must ensure that holocene is active in the parent header. -/// -/// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) -pub fn decode_holocene_base_fee( - chain_spec: impl EthChainSpec + OpHardforks, - parent: impl BlockHeader, - timestamp: u64, -) -> Result { - let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?; - let base_fee_params = if elasticity == 0 && denominator == 0 { - chain_spec.base_fee_params_at_timestamp(timestamp) - } else { - BaseFeeParams::new(denominator as u128, elasticity as u128) - }; - - Ok(parent.next_block_base_fee(base_fee_params).unwrap_or_default()) -} - -/// Read from parent to determine the base fee for the next block -/// -/// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) -pub fn next_block_base_fee( - chain_spec: impl EthChainSpec
+ OpHardforks, - parent: &H, - timestamp: u64, -) -> Result { - // If we are in the Holocene, we need to use the base fee params - // from the parent block's extra data. - // Else, use the base fee params (default values) from chainspec - if chain_spec.is_holocene_active_at_timestamp(parent.timestamp()) { - Ok(decode_holocene_base_fee(chain_spec, parent, timestamp)?) - } else { - Ok(chain_spec.next_block_base_fee(parent, timestamp).unwrap_or_default()) - } -} - #[cfg(test)] mod tests { use super::*; use alloy_consensus::Header; use alloy_primitives::{b256, hex, Bytes, U256}; use op_alloy_consensus::OpTxEnvelope; - use reth_chainspec::{ChainSpec, ForkCondition, Hardfork}; + use reth_chainspec::{BaseFeeParams, ChainSpec, EthChainSpec, ForkCondition, Hardfork}; use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; use reth_optimism_forks::{OpHardfork, BASE_SEPOLIA_HARDFORKS}; use std::sync::Arc; @@ -250,7 +218,8 @@ mod tests { gas_limit: 144000000, ..Default::default() }; - let base_fee = next_block_base_fee(&op_chain_spec, &parent, 0); + let base_fee = + reth_optimism_chainspec::OpChainSpec::next_block_base_fee(&op_chain_spec, &parent, 0); assert_eq!( base_fee.unwrap(), op_chain_spec.next_block_base_fee(&parent, 0).unwrap_or_default() @@ -268,7 +237,11 @@ mod tests { extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]), ..Default::default() }; - let base_fee = next_block_base_fee(&op_chain_spec, &parent, 1800000005); + let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( + &op_chain_spec, + &parent, + 1800000005, + ); assert_eq!( base_fee.unwrap(), op_chain_spec.next_block_base_fee(&parent, 0).unwrap_or_default() @@ -286,7 +259,11 @@ mod tests { ..Default::default() }; - let base_fee = next_block_base_fee(holocene_chainspec(), &parent, 1800000005); + let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( + &holocene_chainspec(), + &parent, + 1800000005, + ); assert_eq!( base_fee.unwrap(), parent @@ -307,7 +284,12 @@ mod tests { ..Default::default() }; - let base_fee = next_block_base_fee(&*BASE_SEPOLIA, &parent, 1735315546).unwrap(); + let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( + &*BASE_SEPOLIA, + &parent, + 1735315546, + ) + .unwrap(); assert_eq!(base_fee, 507); } diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 9acef67dabe..7cdc297a769 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -18,6 +18,8 @@ reth-primitives-traits.workspace = true reth-execution-errors.workspace = true reth-execution-types.workspace = true +reth-rpc-eth-api = { workspace = true, optional = true } + # ethereum alloy-eips.workspace = true alloy-evm.workspace = true @@ -43,7 +45,6 @@ thiserror.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } alloy-genesis.workspace = true -alloy-consensus.workspace = true reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } [features] @@ -71,3 +72,4 @@ std = [ "reth-evm/std", ] portable = ["reth-revm/portable"] +rpc = ["reth-rpc-eth-api"] diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index d3786e6e92e..2d4039020f1 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -56,6 +56,22 @@ pub fn revm_spec_by_timestamp_after_bedrock( } } +#[cfg(feature = "rpc")] +impl reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv + for OpNextBlockEnvAttributes +{ + fn build_pending_env(parent: &crate::SealedHeader) -> Self { + Self { + timestamp: parent.timestamp().saturating_add(12), + suggested_fee_recipient: parent.beneficiary(), + prev_randao: alloy_primitives::B256::random(), + gas_limit: parent.gas_limit(), + parent_beacon_block_root: parent.parent_beacon_block_root(), + extra_data: parent.extra_data().clone(), + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index a3f4e2042af..db42bf929dc 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -22,7 +22,6 @@ use op_revm::{OpSpecId, OpTransaction}; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, EvmEnv}; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_consensus::next_block_base_fee; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{DepositReceipt, OpPrimitives}; use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader, SignedTransaction}; @@ -135,7 +134,7 @@ where let blob_excess_gas_and_price = spec .into_eth_spec() .is_enabled_in(SpecId::CANCUN) - .then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 0 }); + .then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 1 }); let block_env = BlockEnv { number: U256::from(header.number()), @@ -177,7 +176,7 @@ where let blob_excess_gas_and_price = spec_id .into_eth_spec() .is_enabled_in(SpecId::CANCUN) - .then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 0 }); + .then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 1 }); let block_env = BlockEnv { number: U256::from(parent.number() + 1), @@ -187,7 +186,10 @@ where prevrandao: Some(attributes.prev_randao), gas_limit: attributes.gas_limit, // calculate basefee based on parent block's gas usage - basefee: next_block_base_fee(self.chain_spec(), parent, attributes.timestamp)?, + basefee: self + .chain_spec() + .next_block_base_fee(parent, attributes.timestamp) + .unwrap_or_default(), // calculate excess gas based on parent block's blob gas usage blob_excess_gas_and_price, }; diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 63de9ec3291..9ef5e5f7a78 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -13,7 +13,8 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-primitives-traits.workspace = true +## ensure secp256k1 recovery with rayon support is activated +reth-primitives-traits = { workspace = true, features = ["secp256k1", "rayon"] } reth-payload-builder.workspace = true reth-consensus.workspace = true reth-node-api.workspace = true @@ -25,17 +26,17 @@ reth-network.workspace = true reth-evm.workspace = true reth-trie-db.workspace = true reth-rpc-server-types.workspace = true -reth-rpc-eth-api.workspace = true -reth-rpc-eth-types.workspace = true reth-tasks = { workspace = true, optional = true } reth-trie-common.workspace = true reth-node-core.workspace = true reth-rpc-engine-api.workspace = true +reth-engine-primitives.workspace = true +reth-engine-local = { workspace = true, features = ["op"] } reth-rpc-api.workspace = true # op-reth reth-optimism-payload-builder.workspace = true -reth-optimism-evm.workspace = true +reth-optimism-evm = { workspace = true, features = ["rpc"] } reth-optimism-rpc.workspace = true reth-optimism-storage.workspace = true reth-optimism-txpool.workspace = true @@ -45,13 +46,13 @@ reth-optimism-forks.workspace = true reth-optimism-primitives = { workspace = true, features = ["serde", "serde-bincode-compat", "reth-codec"] } # revm with required optimism features +# Note: this must be kept to ensure all features are properly enabled/forwarded revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } op-revm.workspace = true # ethereum alloy-primitives.workspace = true op-alloy-consensus.workspace = true -op-alloy-network.workspace = true op-alloy-rpc-types-engine.workspace = true alloy-rpc-types-engine.workspace = true alloy-rpc-types-eth.workspace = true @@ -71,19 +72,14 @@ serde_json = { workspace = true, optional = true } [dev-dependencies] reth-optimism-node = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["op"] } -reth-node-core.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-payload-util.workspace = true reth-payload-validator.workspace = true reth-revm = { workspace = true, features = ["std"] } -reth-engine-local = { workspace = true, features = ["op"] } -alloy-primitives.workspace = true -op-alloy-consensus.workspace = true alloy-network.workspace = true -alloy-consensus.workspace = true futures.workspace = true alloy-eips.workspace = true @@ -91,9 +87,9 @@ alloy-eips.workspace = true default = ["reth-codec"] asm-keccak = [ "alloy-primitives/asm-keccak", - "revm/asm-keccak", "reth-optimism-node/asm-keccak", "reth-node-core/asm-keccak", + "revm/asm-keccak", ] js-tracer = ["reth-node-builder/js-tracer"] test-utils = [ @@ -119,3 +115,7 @@ test-utils = [ "reth-trie-common/test-utils", ] reth-codec = ["reth-optimism-primitives/reth-codec"] + +[[test]] +name = "e2e_testsuite" +path = "tests/e2e-testsuite/main.rs" diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index bba734ae8fd..75012d34374 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -6,14 +6,14 @@ use op_alloy_rpc_types_engine::{ OpPayloadAttributes, }; use reth_consensus::ConsensusError; +use reth_engine_primitives::EngineValidator; use reth_node_api::{ payload::{ validate_parent_beacon_block_root_presence, EngineApiMessageVersion, EngineObjectValidationError, MessageValidationKind, NewPayloadError, PayloadOrAttributes, PayloadTypes, VersionSpecificValidationError, }, - validate_version_specific_fields, BuiltPayload, EngineTypes, EngineValidator, NodePrimitives, - PayloadValidator, + validate_version_specific_fields, BuiltPayload, EngineTypes, NodePrimitives, PayloadValidator, }; use reth_optimism_consensus::isthmus; use reth_optimism_forks::OpHardforks; @@ -112,18 +112,18 @@ where } } -impl PayloadValidator for OpEngineValidator +impl PayloadValidator for OpEngineValidator where P: StateProviderFactory + Unpin + 'static, Tx: SignedTransaction + Unpin + 'static, ChainSpec: OpHardforks + Send + Sync + 'static, + Types: PayloadTypes, { type Block = alloy_consensus::Block; - type ExecutionData = OpExecutionData; fn ensure_well_formed_payload( &self, - payload: Self::ExecutionData, + payload: OpExecutionData, ) -> Result, NewPayloadError> { let sealed_block = self.inner.ensure_well_formed_payload(payload).map_err(NewPayloadError::other)?; @@ -165,7 +165,7 @@ impl EngineValidator for OpEngineValidator::ExecutionData, + ExecutionData = OpExecutionData, BuiltPayload: BuiltPayload>, >, P: StateProviderFactory + Unpin + 'static, @@ -205,7 +205,7 @@ where validate_version_specific_fields( self.chain_spec(), version, - PayloadOrAttributes::::PayloadAttributes( + PayloadOrAttributes::::PayloadAttributes( attributes, ), )?; @@ -290,7 +290,7 @@ mod test { use alloy_primitives::{b64, Address, B256, B64}; use alloy_rpc_types_engine::PayloadAttributes; use reth_chainspec::ChainSpec; - use reth_node_builder::EngineValidator; + use reth_engine_primitives::EngineValidator; use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; use reth_provider::noop::NoopProvider; use reth_trie_common::KeccakKeyHasher; diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 4ef8a706785..e62f5b1b439 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -42,3 +42,6 @@ pub use reth_optimism_payload_builder::{ pub use reth_optimism_evm::*; pub use reth_optimism_storage::OpStorage; + +use op_revm as _; +use revm as _; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 2d33f05f4ae..ed9e9b08f16 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -7,16 +7,17 @@ use crate::{ OpEngineApiBuilder, OpEngineTypes, }; use op_alloy_consensus::{interop::SafetyLevel, OpPooledTransaction}; -use op_alloy_rpc_types_engine::{OpExecutionData, OpPayloadAttributes}; +use op_alloy_rpc_types_engine::OpExecutionData; use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks}; -use reth_evm::{ConfigureEvm, EvmFactory, EvmFactoryFor}; +use reth_engine_local::LocalPayloadAttributesBuilder; +use reth_evm::ConfigureEvm; use reth_network::{ types::BasicNetworkPrimitives, NetworkConfig, NetworkHandle, NetworkManager, NetworkPrimitives, PeersInfo, }; use reth_node_api::{ - AddOnsContext, EngineTypes, FullNodeComponents, KeyHasherTy, NodeAddOns, NodePrimitives, - PayloadTypes, PrimitivesTy, TxTy, + AddOnsContext, BuildNextEnv, EngineTypes, FullNodeComponents, HeaderTy, KeyHasherTy, + NodeAddOns, NodePrimitives, PayloadAttributesBuilder, PayloadTypes, PrimitivesTy, TxTy, }; use reth_node_builder::{ components::{ @@ -33,12 +34,12 @@ use reth_node_builder::{ }; use reth_optimism_chainspec::{OpChainSpec, OpHardfork}; use reth_optimism_consensus::OpBeaconConsensus; -use reth_optimism_evm::{OpEvmConfig, OpNextBlockEnvAttributes, OpRethReceiptBuilder}; +use reth_optimism_evm::{OpEvmConfig, OpRethReceiptBuilder}; use reth_optimism_forks::OpHardforks; use reth_optimism_payload_builder::{ builder::OpPayloadTransactions, config::{OpBuilderConfig, OpDAConfig}, - OpBuiltPayload, OpPayloadBuilderAttributes, OpPayloadPrimitives, + OpAttributes, OpBuiltPayload, OpPayloadPrimitives, }; use reth_optimism_primitives::{DepositReceipt, OpPrimitives}; use reth_optimism_rpc::{ @@ -46,7 +47,7 @@ use reth_optimism_rpc::{ historical::{HistoricalRpc, HistoricalRpcClient}, miner::{MinerApiExtServer, OpMinerExtApi}, witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, - OpEthApi, OpEthApiError, SequencerClient, + SequencerClient, }; use reth_optimism_storage::OpStorage; use reth_optimism_txpool::{ @@ -54,9 +55,7 @@ use reth_optimism_txpool::{ OpPooledTx, }; use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions}; -use reth_rpc_api::DebugApiServer; -use reth_rpc_eth_api::{ext::L2EthApiExtServer, FullEthApiServer}; -use reth_rpc_eth_types::error::FromEvmError; +use reth_rpc_api::{eth::RpcTypes, DebugApiServer, L2EthApiExtServer}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -64,7 +63,7 @@ use reth_transaction_pool::{ TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; -use revm::context::TxEnv; +use serde::de::DeserializeOwned; use std::{marker::PhantomData, sync::Arc}; /// Marker trait for Optimism node types with standard engine, chain spec, and primitives. @@ -166,6 +165,17 @@ impl OpNode { .consensus(OpConsensusBuilder::default()) } + /// Returns [`OpAddOnsBuilder`] with configured arguments. + pub fn add_ons_builder(&self) -> OpAddOnsBuilder { + OpAddOnsBuilder::default() + .with_sequencer(self.args.sequencer.clone()) + .with_sequencer_headers(self.args.sequencer_headers.clone()) + .with_da_config(self.da_config.clone()) + .with_enable_tx_conditional(self.args.enable_tx_conditional) + .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) + .with_historical_rpc(self.args.historical_rpc.clone()) + } + /// Instantiates the [`ProviderFactoryBuilder`] for an opstack node. /// /// # Open a Providerfactory in read-only mode from a datadir @@ -226,14 +236,7 @@ where } fn add_ons(&self) -> Self::AddOns { - Self::AddOns::builder() - .with_sequencer(self.args.sequencer.clone()) - .with_sequencer_headers(self.args.sequencer_headers.clone()) - .with_da_config(self.da_config.clone()) - .with_enable_tx_conditional(self.args.enable_tx_conditional) - .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) - .with_historical_rpc(self.args.historical_rpc.clone()) - .build() + self.add_ons_builder().build() } } @@ -246,6 +249,12 @@ where fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_node_api::BlockTy { rpc_block.into_consensus() } + + fn local_payload_attributes_builder( + chain_spec: &Self::ChainSpec, + ) -> impl PayloadAttributesBuilder<::PayloadAttributes> { + LocalPayloadAttributesBuilder::new(Arc::new(chain_spec.clone())) + } } impl NodeTypes for OpNode { @@ -282,28 +291,30 @@ pub struct OpAddOns, EV, EB, RpcMi min_suggested_priority_fee: u64, } -impl Default +impl Default for OpAddOns< N, - OpEthApiBuilder, + OpEthApiBuilder, OpEngineValidatorBuilder, OpEngineApiBuilder, + Identity, > where N: FullNodeComponents, - OpEthApiBuilder: EthApiBuilder, + OpEthApiBuilder: EthApiBuilder, { fn default() -> Self { Self::builder().build() } } -impl +impl OpAddOns< N, OpEthApiBuilder, OpEngineValidatorBuilder, OpEngineApiBuilder, + RpcMiddleware, > where N: FullNodeComponents, @@ -419,24 +430,31 @@ where } } -impl NodeAddOns - for OpAddOns, EV, EB, RpcMiddleware> +impl NodeAddOns + for OpAddOns where N: FullNodeComponents< - Types: OpFullNodeTypes, - Evm: ConfigureEvm, + Types: NodeTypes< + ChainSpec: OpHardforks, + Primitives: OpPayloadPrimitives, + Payload: PayloadTypes, + >, + Evm: ConfigureEvm< + NextBlockEnvCtx: BuildNextEnv< + Attrs, + HeaderTy, + ::ChainSpec, + >, + >, + Pool: TransactionPool, >, - N::Types: NodeTypes, - OpEthApiError: FromEvmError, - ::Transaction: OpPooledTx, - EvmFactoryFor: EvmFactory>, - OpEthApi: FullEthApiServer, - NetworkT: op_alloy_network::Network + Unpin, + EthB: EthApiBuilder, EV: EngineValidatorBuilder, EB: EngineApiBuilder, RpcMiddleware: RethRpcMiddleware, + Attrs: OpAttributes, RpcPayloadAttributes: DeserializeOwned>, { - type Handle = RpcHandle>; + type Handle = RpcHandle; async fn launch_add_ons( self, @@ -480,7 +498,7 @@ where ctx.node.evm_config().clone(), ); // install additional OP specific rpc methods - let debug_ext = OpDebugWitnessApi::new( + let debug_ext = OpDebugWitnessApi::<_, _, _, Attrs>::new( ctx.node.provider().clone(), Box::new(ctx.node.task_executor().clone()), builder, @@ -539,33 +557,45 @@ where } } -impl RethRpcAddOns for OpAddOns, EV, EB> +impl RethRpcAddOns + for OpAddOns where N: FullNodeComponents< - Types: OpFullNodeTypes, - Evm: ConfigureEvm, + Types: NodeTypes< + ChainSpec: OpHardforks, + Primitives: OpPayloadPrimitives, + Payload: PayloadTypes, + >, + Evm: ConfigureEvm< + NextBlockEnvCtx: BuildNextEnv< + Attrs, + HeaderTy, + ::ChainSpec, + >, + >, >, - OpEthApiError: FromEvmError, <::Pool as TransactionPool>::Transaction: OpPooledTx, - EvmFactoryFor: EvmFactory>, - OpEthApi: FullEthApiServer, - NetworkT: op_alloy_network::Network + Unpin, + EthB: EthApiBuilder, EV: EngineValidatorBuilder, EB: EngineApiBuilder, + RpcMiddleware: RethRpcMiddleware, + Attrs: OpAttributes, RpcPayloadAttributes: DeserializeOwned>, { - type EthApi = OpEthApi; + type EthApi = EthB::EthApi; fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks { self.rpc_add_ons.hooks_mut() } } -impl EngineValidatorAddOn for OpAddOns, EV, EB> +impl EngineValidatorAddOn + for OpAddOns, EV, EB, RpcMiddleware> where N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, EV: EngineValidatorBuilder + Default, EB: EngineApiBuilder, + RpcMiddleware: Send, { type Validator = >::Validator; @@ -577,7 +607,7 @@ where /// A regular optimism evm and executor builder. #[derive(Debug, Clone)] #[non_exhaustive] -pub struct OpAddOnsBuilder { +pub struct OpAddOnsBuilder { /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. sequencer_url: Option, @@ -593,6 +623,8 @@ pub struct OpAddOnsBuilder { _nt: PhantomData, /// Minimum suggested priority fee (tip) min_suggested_priority_fee: u64, + /// RPC middleware to use + rpc_middleware: RpcMiddleware, } impl Default for OpAddOnsBuilder { @@ -605,11 +637,12 @@ impl Default for OpAddOnsBuilder { enable_tx_conditional: false, min_suggested_priority_fee: 1_000_000, _nt: PhantomData, + rpc_middleware: Identity::new(), } } } -impl OpAddOnsBuilder { +impl OpAddOnsBuilder { /// With a [`SequencerClient`]. pub fn with_sequencer(mut self, sequencer_client: Option) -> Self { self.sequencer_url = sequencer_client; @@ -645,11 +678,35 @@ impl OpAddOnsBuilder { self.historical_rpc = historical_rpc; self } + + /// Configure the RPC middleware to use + pub fn with_rpc_middleware(self, rpc_middleware: T) -> OpAddOnsBuilder { + let Self { + sequencer_url, + sequencer_headers, + historical_rpc, + da_config, + enable_tx_conditional, + min_suggested_priority_fee, + _nt, + .. + } = self; + OpAddOnsBuilder { + sequencer_url, + sequencer_headers, + historical_rpc, + da_config, + enable_tx_conditional, + min_suggested_priority_fee, + _nt, + rpc_middleware, + } + } } -impl OpAddOnsBuilder { +impl OpAddOnsBuilder { /// Builds an instance of [`OpAddOns`]. - pub fn build(self) -> OpAddOns, EV, EB> + pub fn build(self) -> OpAddOns, EV, EB, RpcMiddleware> where N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, @@ -663,6 +720,7 @@ impl OpAddOnsBuilder { enable_tx_conditional, min_suggested_priority_fee, historical_rpc, + rpc_middleware, .. } = self; @@ -674,7 +732,7 @@ impl OpAddOnsBuilder { .with_min_suggested_priority_fee(min_suggested_priority_fee), EV::default(), EB::default(), - Default::default(), + rpc_middleware, ), da_config: da_config.unwrap_or_default(), sequencer_url, @@ -806,6 +864,8 @@ where .with_max_tx_input_bytes(ctx.config().txpool.max_tx_input_bytes) .kzg_settings(ctx.kzg_settings()?) .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) + .with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit) + .with_minimum_priority_fee(ctx.config().txpool.minimum_priority_fee) .with_additional_tasks( pool_config_overrides .additional_validation_tasks @@ -906,7 +966,7 @@ impl OpPayloadBuilder { } } -impl PayloadBuilderBuilder for OpPayloadBuilder +impl PayloadBuilderBuilder for OpPayloadBuilder where Node: FullNodeTypes< Provider: ChainSpecProvider, @@ -914,20 +974,24 @@ where Primitives: OpPayloadPrimitives, Payload: PayloadTypes< BuiltPayload = OpBuiltPayload>, - PayloadAttributes = OpPayloadAttributes, - PayloadBuilderAttributes = OpPayloadBuilderAttributes>, + PayloadBuilderAttributes = Attrs, >, >, >, Evm: ConfigureEvm< Primitives = PrimitivesTy, - NextBlockEnvCtx = OpNextBlockEnvAttributes, + NextBlockEnvCtx: BuildNextEnv< + Attrs, + HeaderTy, + ::ChainSpec, + >, > + 'static, Pool: TransactionPool>> + Unpin + 'static, Txs: OpPayloadTransactions, + Attrs: OpAttributes>, { type PayloadBuilder = - reth_optimism_payload_builder::OpPayloadBuilder; + reth_optimism_payload_builder::OpPayloadBuilder; async fn build_payload_builder( self, diff --git a/crates/optimism/node/tests/e2e/main.rs b/crates/optimism/node/tests/e2e-testsuite/main.rs similarity index 100% rename from crates/optimism/node/tests/e2e/main.rs rename to crates/optimism/node/tests/e2e-testsuite/main.rs diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e-testsuite/p2p.rs similarity index 100% rename from crates/optimism/node/tests/e2e/p2p.rs rename to crates/optimism/node/tests/e2e-testsuite/p2p.rs diff --git a/crates/optimism/node/tests/e2e/testsuite.rs b/crates/optimism/node/tests/e2e-testsuite/testsuite.rs similarity index 100% rename from crates/optimism/node/tests/e2e/testsuite.rs rename to crates/optimism/node/tests/e2e-testsuite/testsuite.rs diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index d5a3260420d..d511b17392f 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -3,14 +3,13 @@ use crate::{ config::{OpBuilderConfig, OpDAConfig}, error::OpPayloadBuilderError, - payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, - OpPayloadPrimitives, + payload::OpBuiltPayload, + OpAttributes, OpPayloadBuilderAttributes, OpPayloadPrimitives, }; use alloy_consensus::{BlockHeader, Transaction, Typed2718}; -use alloy_primitives::{Bytes, B256, U256}; +use alloy_primitives::{B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; -use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_basic_payload_builder::*; use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; @@ -21,7 +20,6 @@ use reth_evm::{ ConfigureEvm, Database, Evm, }; use reth_execution_types::ExecutionOutcome; -use reth_optimism_evm::OpNextBlockEnvAttributes; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{transaction::OpTransaction, ADDRESS_L2_TO_L1_MESSAGE_PASSER}; use reth_optimism_txpool::{ @@ -30,7 +28,7 @@ use reth_optimism_txpool::{ OpPooledTx, }; use reth_payload_builder_primitives::PayloadBuilderError; -use reth_payload_primitives::PayloadBuilderAttributes; +use reth_payload_primitives::{BuildNextEnv, PayloadBuilderAttributes}; use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; use reth_primitives_traits::{ HeaderTy, NodePrimitives, SealedHeader, SealedHeaderFor, SignedTransaction, TxTy, @@ -42,12 +40,18 @@ use reth_revm::{ use reth_storage_api::{errors::ProviderError, StateProvider, StateProviderFactory}; use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; use revm::context::{Block, BlockEnv}; -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; use tracing::{debug, trace, warn}; /// Optimism's payload builder -#[derive(Debug, Clone)] -pub struct OpPayloadBuilder { +#[derive(Debug)] +pub struct OpPayloadBuilder< + Pool, + Client, + Evm, + Txs = (), + Attrs = OpPayloadBuilderAttributes::Primitives>>, +> { /// The rollup's compute pending block configuration option. // TODO(clabby): Implement this feature. pub compute_pending_block: bool, @@ -62,9 +66,31 @@ pub struct OpPayloadBuilder { /// The type responsible for yielding the best transactions for the payload if mempool /// transactions are allowed. pub best_transactions: Txs, + /// Marker for the payload attributes type. + _pd: PhantomData, +} + +impl Clone for OpPayloadBuilder +where + Pool: Clone, + Client: Clone, + Evm: ConfigureEvm, + Txs: Clone, +{ + fn clone(&self) -> Self { + Self { + evm_config: self.evm_config.clone(), + pool: self.pool.clone(), + client: self.client.clone(), + config: self.config.clone(), + best_transactions: self.best_transactions.clone(), + compute_pending_block: self.compute_pending_block, + _pd: PhantomData, + } + } } -impl OpPayloadBuilder { +impl OpPayloadBuilder { /// `OpPayloadBuilder` constructor. /// /// Configures the builder with the default settings. @@ -86,11 +112,12 @@ impl OpPayloadBuilder { evm_config, config, best_transactions: (), + _pd: PhantomData, } } } -impl OpPayloadBuilder { +impl OpPayloadBuilder { /// Sets the rollup's compute pending block configuration option. pub const fn set_compute_pending_block(mut self, compute_pending_block: bool) -> Self { self.compute_pending_block = compute_pending_block; @@ -102,7 +129,7 @@ impl OpPayloadBuilder { pub fn with_transactions( self, best_transactions: T, - ) -> OpPayloadBuilder { + ) -> OpPayloadBuilder { let Self { pool, client, compute_pending_block, evm_config, config, .. } = self; OpPayloadBuilder { pool, @@ -111,6 +138,7 @@ impl OpPayloadBuilder { evm_config, best_transactions, config, + _pd: PhantomData, } } @@ -125,12 +153,16 @@ impl OpPayloadBuilder { } } -impl OpPayloadBuilder +impl OpPayloadBuilder where Pool: TransactionPool>, Client: StateProviderFactory + ChainSpecProvider, N: OpPayloadPrimitives, - Evm: ConfigureEvm, + Evm: ConfigureEvm< + Primitives = N, + NextBlockEnvCtx: BuildNextEnv, + >, + Attrs: OpAttributes>, { /// Constructs an Optimism payload from the transactions sent via the /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in @@ -142,7 +174,7 @@ where /// a result indicating success with the payload or an error in case of failure. fn build_payload<'a, Txs>( &self, - args: BuildArguments, OpBuiltPayload>, + args: BuildArguments>, best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a, ) -> Result>, PayloadBuilderError> where @@ -165,7 +197,7 @@ where let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?; let state = StateProviderDatabase::new(&state_provider); - if ctx.attributes().no_tx_pool { + if ctx.attributes().no_tx_pool() { builder.build(state, &state_provider, ctx) } else { // sequencer mode we can reuse cachedreads from previous runs @@ -178,10 +210,13 @@ where pub fn payload_witness( &self, parent: SealedHeader, - attributes: OpPayloadAttributes, - ) -> Result { - let attributes = OpPayloadBuilderAttributes::try_new(parent.hash(), attributes, 3) - .map_err(PayloadBuilderError::other)?; + attributes: Attrs::RpcPayloadAttributes, + ) -> Result + where + Attrs: PayloadBuilderAttributes, + { + let attributes = + Attrs::try_new(parent.hash(), attributes, 3).map_err(PayloadBuilderError::other)?; let config = PayloadConfig { parent_header: Arc::new(parent), attributes }; let ctx = OpPayloadBuilderCtx { @@ -201,15 +236,20 @@ where } /// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. -impl PayloadBuilder for OpPayloadBuilder +impl PayloadBuilder + for OpPayloadBuilder where N: OpPayloadPrimitives, Client: StateProviderFactory + ChainSpecProvider + Clone, Pool: TransactionPool>, - Evm: ConfigureEvm, + Evm: ConfigureEvm< + Primitives = N, + NextBlockEnvCtx: BuildNextEnv, + >, Txs: OpPayloadTransactions, + Attrs: OpAttributes, { - type Attributes = OpPayloadBuilderAttributes; + type Attributes = Attrs; type BuiltPayload = OpBuiltPayload; fn try_build( @@ -278,18 +318,22 @@ impl<'a, Txs> OpBuilder<'a, Txs> { impl OpBuilder<'_, Txs> { /// Builds the payload on top of the state. - pub fn build( + pub fn build( self, db: impl Database, state_provider: impl StateProvider, - ctx: OpPayloadBuilderCtx, + ctx: OpPayloadBuilderCtx, ) -> Result>, PayloadBuilderError> where - EvmConfig: ConfigureEvm, + Evm: ConfigureEvm< + Primitives = N, + NextBlockEnvCtx: BuildNextEnv, + >, ChainSpec: EthChainSpec + OpHardforks, N: OpPayloadPrimitives, Txs: PayloadTransactions + OpPooledTx>, + Attrs: OpAttributes, { let Self { best } = self; debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number(), "building new payload"); @@ -308,7 +352,7 @@ impl OpBuilder<'_, Txs> { let mut info = ctx.execute_sequencer_transactions(&mut builder)?; // 3. if mem pool transactions are requested we execute them - if !ctx.attributes().no_tx_pool { + if !ctx.attributes().no_tx_pool() { let best_txs = best(ctx.best_transaction_attributes(builder.evm_mut().block())); if ctx.execute_best_transactions(&mut info, &mut builder, best_txs)?.is_some() { return Ok(BuildOutcomeKind::Cancelled) @@ -344,7 +388,7 @@ impl OpBuilder<'_, Txs> { trie: ExecutedTrieUpdates::Present(Arc::new(trie_updates)), }; - let no_tx_pool = ctx.attributes().no_tx_pool; + let no_tx_pool = ctx.attributes().no_tx_pool(); let payload = OpBuiltPayload::new(ctx.payload_id(), sealed_block, info.total_fees, Some(executed)); @@ -360,16 +404,20 @@ impl OpBuilder<'_, Txs> { } /// Builds the payload and returns its [`ExecutionWitness`] based on the state after execution. - pub fn witness( + pub fn witness( self, state_provider: impl StateProvider, - ctx: &OpPayloadBuilderCtx, + ctx: &OpPayloadBuilderCtx, ) -> Result where - Evm: ConfigureEvm, + Evm: ConfigureEvm< + Primitives = N, + NextBlockEnvCtx: BuildNextEnv, + >, ChainSpec: EthChainSpec + OpHardforks, N: OpPayloadPrimitives, Txs: PayloadTransactions>, + Attrs: OpAttributes, { let mut db = State::builder() .with_database(StateProviderDatabase::new(&state_provider)) @@ -480,7 +528,11 @@ impl ExecutionInfo { /// Container type that holds all necessities to build a new payload. #[derive(derive_more::Debug)] -pub struct OpPayloadBuilderCtx { +pub struct OpPayloadBuilderCtx< + Evm: ConfigureEvm, + ChainSpec, + Attrs = OpPayloadBuilderAttributes::Primitives>>, +> { /// The type that knows how to perform system calls and configure the evm. pub evm_config: Evm, /// The DA config for the payload builder @@ -488,18 +540,21 @@ pub struct OpPayloadBuilderCtx { /// The chainspec pub chain_spec: Arc, /// How to build the payload. - pub config: - PayloadConfig>, HeaderTy>, + pub config: PayloadConfig>, /// Marker to check whether the job has been cancelled. pub cancel: CancelOnDrop, /// The currently best payload. pub best_payload: Option>, } -impl OpPayloadBuilderCtx +impl OpPayloadBuilderCtx where - Evm: ConfigureEvm, + Evm: ConfigureEvm< + Primitives: OpPayloadPrimitives, + NextBlockEnvCtx: BuildNextEnv, ChainSpec>, + >, ChainSpec: EthChainSpec + OpHardforks, + Attrs: OpAttributes>, { /// Returns the parent block the payload will be build on. pub fn parent(&self) -> &SealedHeaderFor { @@ -507,27 +562,10 @@ where } /// Returns the builder attributes. - pub const fn attributes(&self) -> &OpPayloadBuilderAttributes> { + pub const fn attributes(&self) -> &Attrs { &self.config.attributes } - /// Returns the extra data for the block. - /// - /// After holocene this extracts the extra data from the payload - pub fn extra_data(&self) -> Result { - if self.is_holocene_active() { - self.attributes() - .get_holocene_extra_data( - self.chain_spec.base_fee_params_at_timestamp( - self.attributes().payload_attributes.timestamp, - ), - ) - .map_err(PayloadBuilderError::other) - } else { - Ok(Default::default()) - } - } - /// Returns the current fee settings for transactions from the mempool pub fn best_transaction_attributes(&self, block_env: &BlockEnv) -> BestTransactionsAttributes { BestTransactionsAttributes::new( @@ -541,11 +579,6 @@ where self.attributes().payload_id() } - /// Returns true if holocene is active for the payload. - pub fn is_holocene_active(&self) -> bool { - self.chain_spec.is_holocene_active_at_timestamp(self.attributes().timestamp()) - } - /// Returns true if the fees are higher than the previous payload. pub fn is_better_payload(&self, total_fees: U256) -> bool { is_better_payload(self.best_payload.as_ref(), total_fees) @@ -560,27 +593,16 @@ where .builder_for_next_block( db, self.parent(), - OpNextBlockEnvAttributes { - timestamp: self.attributes().timestamp(), - suggested_fee_recipient: self.attributes().suggested_fee_recipient(), - prev_randao: self.attributes().prev_randao(), - gas_limit: self - .attributes() - .gas_limit - .unwrap_or_else(|| self.parent().gas_limit()), - parent_beacon_block_root: self.attributes().parent_beacon_block_root(), - extra_data: self.extra_data()?, - }, + Evm::NextBlockEnvCtx::build_next_env( + self.attributes(), + self.parent(), + self.chain_spec.as_ref(), + ) + .map_err(PayloadBuilderError::other)?, ) .map_err(PayloadBuilderError::other) } -} -impl OpPayloadBuilderCtx -where - Evm: ConfigureEvm, - ChainSpec: EthChainSpec + OpHardforks, -{ /// Executes all sequencer transactions that are included in the payload attributes. pub fn execute_sequencer_transactions( &self, @@ -588,7 +610,7 @@ where ) -> Result { let mut info = ExecutionInfo::new(); - for sequencer_tx in &self.attributes().transactions { + for sequencer_tx in self.attributes().sequencer_transactions() { // A sequencer's block should never contain blob transactions. if sequencer_tx.value().is_eip4844() { return Err(PayloadBuilderError::other( diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 0416cf68bab..c84e9c70ec7 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -2,7 +2,7 @@ use std::{fmt::Debug, sync::Arc}; -use alloy_consensus::Block; +use alloy_consensus::{Block, BlockHeader}; use alloy_eips::{ eip1559::BaseFeeParams, eip2718::Decodable2718, eip4895::Withdrawals, eip7685::Requests, }; @@ -17,9 +17,14 @@ use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4, }; use reth_chain_state::ExecutedBlockWithTrieUpdates; -use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives_traits::{NodePrimitives, SealedBlock, SignedTransaction, WithEncoded}; +use reth_chainspec::EthChainSpec; +use reth_optimism_evm::OpNextBlockEnvAttributes; +use reth_optimism_forks::OpHardforks; +use reth_payload_builder::{EthPayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_primitives::{BuildNextEnv, BuiltPayload, PayloadBuilderAttributes}; +use reth_primitives_traits::{ + NodePrimitives, SealedBlock, SealedHeader, SignedTransaction, WithEncoded, +}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; @@ -65,7 +70,7 @@ impl OpPayloadBuilderAttributes { } } -impl PayloadBuilderAttributes +impl PayloadBuilderAttributes for OpPayloadBuilderAttributes { type RpcPayloadAttributes = OpPayloadAttributes; @@ -377,6 +382,39 @@ pub fn payload_id_optimism( PayloadId::new(out.as_slice()[..8].try_into().expect("sufficient length")) } +impl BuildNextEnv, H, ChainSpec> + for OpNextBlockEnvAttributes +where + H: BlockHeader, + T: SignedTransaction, + ChainSpec: EthChainSpec + OpHardforks, +{ + fn build_next_env( + attributes: &OpPayloadBuilderAttributes, + parent: &SealedHeader, + chain_spec: &ChainSpec, + ) -> Result { + let extra_data = if chain_spec.is_holocene_active_at_timestamp(attributes.timestamp()) { + attributes + .get_holocene_extra_data( + chain_spec.base_fee_params_at_timestamp(attributes.timestamp()), + ) + .map_err(PayloadBuilderError::other)? + } else { + Default::default() + }; + + Ok(Self { + timestamp: attributes.timestamp(), + suggested_fee_recipient: attributes.suggested_fee_recipient(), + prev_randao: attributes.prev_randao(), + gas_limit: attributes.gas_limit.unwrap_or_else(|| parent.gas_limit()), + parent_beacon_block_root: attributes.parent_beacon_block_root(), + extra_data, + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/optimism/payload/src/traits.rs b/crates/optimism/payload/src/traits.rs index 6ca07e86e3f..485b8d1df9e 100644 --- a/crates/optimism/payload/src/traits.rs +++ b/crates/optimism/payload/src/traits.rs @@ -1,6 +1,9 @@ use alloy_consensus::BlockBody; use reth_optimism_primitives::{transaction::OpTransaction, DepositReceipt}; -use reth_primitives_traits::{FullBlockHeader, NodePrimitives, SignedTransaction}; +use reth_payload_primitives::PayloadBuilderAttributes; +use reth_primitives_traits::{FullBlockHeader, NodePrimitives, SignedTransaction, WithEncoded}; + +use crate::OpPayloadBuilderAttributes; /// Helper trait to encapsulate common bounds on [`NodePrimitives`] for OP payload builder. pub trait OpPayloadPrimitives: @@ -31,3 +34,27 @@ where type _TX = Tx; type _Header = Header; } + +/// Attributes for the OP payload builder. +pub trait OpAttributes: PayloadBuilderAttributes { + /// Primitive transaction type. + type Transaction: SignedTransaction; + + /// Whether to use the transaction pool for the payload. + fn no_tx_pool(&self) -> bool; + + /// Sequencer transactions to include in the payload. + fn sequencer_transactions(&self) -> &[WithEncoded]; +} + +impl OpAttributes for OpPayloadBuilderAttributes { + type Transaction = T; + + fn no_tx_pool(&self) -> bool { + self.no_tx_pool + } + + fn sequencer_transactions(&self) -> &[WithEncoded] { + &self.transactions + } +} diff --git a/crates/optimism/payload/src/validator.rs b/crates/optimism/payload/src/validator.rs index b287c553989..fa0d610469c 100644 --- a/crates/optimism/payload/src/validator.rs +++ b/crates/optimism/payload/src/validator.rs @@ -27,59 +27,74 @@ where } /// Ensures that the given payload does not violate any consensus rules that concern the block's - /// layout, like: - /// - missing or invalid base fee - /// - invalid extra data - /// - invalid transactions - /// - incorrect hash - /// - block contains blob transactions or blob versioned hashes - /// - block contains l1 withdrawals + /// layout. /// - /// The checks are done in the order that conforms with the engine-API specification. - /// - /// This is intended to be invoked after receiving the payload from the CLI. - /// The additional fields, starting with [`MaybeCancunPayloadFields`](alloy_rpc_types_engine::MaybeCancunPayloadFields), are not part of the payload, but are additional fields starting in the `engine_newPayloadV3` RPC call, See also - /// - /// If the cancun fields are provided this also validates that the versioned hashes in the block - /// are empty as well as those passed in the sidecar. If the payload fields are not provided. - /// - /// Validation according to specs . + /// See also [`ensure_well_formed_payload`]. pub fn ensure_well_formed_payload( &self, payload: OpExecutionData, ) -> Result>, OpPayloadError> { - let OpExecutionData { payload, sidecar } = payload; + ensure_well_formed_payload(self.chain_spec(), payload) + } +} - let expected_hash = payload.block_hash(); +/// Ensures that the given payload does not violate any consensus rules that concern the block's +/// layout, like: +/// - missing or invalid base fee +/// - invalid extra data +/// - invalid transactions +/// - incorrect hash +/// - block contains blob transactions or blob versioned hashes +/// - block contains l1 withdrawals +/// +/// The checks are done in the order that conforms with the engine-API specification. +/// +/// This is intended to be invoked after receiving the payload from the CLI. +/// The additional fields, starting with [`MaybeCancunPayloadFields`](alloy_rpc_types_engine::MaybeCancunPayloadFields), are not part of the payload, but are additional fields starting in the `engine_newPayloadV3` RPC call, See also +/// +/// If the cancun fields are provided this also validates that the versioned hashes in the block +/// are empty as well as those passed in the sidecar. If the payload fields are not provided. +/// +/// Validation according to specs . +pub fn ensure_well_formed_payload( + chain_spec: ChainSpec, + payload: OpExecutionData, +) -> Result>, OpPayloadError> +where + ChainSpec: OpHardforks, + T: SignedTransaction, +{ + let OpExecutionData { payload, sidecar } = payload; - // First parse the block - let sealed_block = payload.try_into_block_with_sidecar(&sidecar)?.seal_slow(); + let expected_hash = payload.block_hash(); - // Ensure the hash included in the payload matches the block hash - if expected_hash != sealed_block.hash() { - return Err(PayloadError::BlockHash { - execution: sealed_block.hash(), - consensus: expected_hash, - })? - } + // First parse the block + let sealed_block = payload.try_into_block_with_sidecar(&sidecar)?.seal_slow(); - shanghai::ensure_well_formed_fields( - sealed_block.body(), - self.is_shanghai_active_at_timestamp(sealed_block.timestamp), - )?; + // Ensure the hash included in the payload matches the block hash + if expected_hash != sealed_block.hash() { + return Err(PayloadError::BlockHash { + execution: sealed_block.hash(), + consensus: expected_hash, + })? + } - cancun::ensure_well_formed_header_and_sidecar_fields( - &sealed_block, - sidecar.ecotone(), - self.is_cancun_active_at_timestamp(sealed_block.timestamp), - )?; + shanghai::ensure_well_formed_fields( + sealed_block.body(), + chain_spec.is_shanghai_active_at_timestamp(sealed_block.timestamp), + )?; - prague::ensure_well_formed_fields( - sealed_block.body(), - sidecar.isthmus(), - self.is_prague_active_at_timestamp(sealed_block.timestamp), - )?; + cancun::ensure_well_formed_header_and_sidecar_fields( + &sealed_block, + sidecar.ecotone(), + chain_spec.is_cancun_active_at_timestamp(sealed_block.timestamp), + )?; - Ok(sealed_block) - } + prague::ensure_well_formed_fields( + sealed_block.body(), + sidecar.isthmus(), + chain_spec.is_prague_active_at_timestamp(sealed_block.timestamp), + )?; + + Ok(sealed_block) } diff --git a/crates/optimism/primitives/src/receipt.rs b/crates/optimism/primitives/src/receipt.rs index e0ef6318081..74f21eab115 100644 --- a/crates/optimism/primitives/src/receipt.rs +++ b/crates/optimism/primitives/src/receipt.rs @@ -1,3 +1,4 @@ +use alloc::vec::Vec; use alloy_consensus::{ Eip2718EncodableReceipt, Eip658Value, Receipt, ReceiptWithBloom, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, Typed2718, @@ -357,6 +358,16 @@ impl TxReceipt for OpReceipt { fn logs(&self) -> &[Log] { self.as_receipt().logs() } + + fn into_logs(self) -> Vec { + match self { + Self::Legacy(receipt) | + Self::Eip2930(receipt) | + Self::Eip1559(receipt) | + Self::Eip7702(receipt) => receipt.logs, + Self::Deposit(receipt) => receipt.inner.logs, + } + } } impl Typed2718 for OpReceipt { @@ -377,6 +388,30 @@ impl InMemorySize for OpReceipt { } } +impl From for OpReceipt { + fn from(envelope: op_alloy_consensus::OpReceiptEnvelope) -> Self { + match envelope { + op_alloy_consensus::OpReceiptEnvelope::Legacy(receipt) => Self::Legacy(receipt.receipt), + op_alloy_consensus::OpReceiptEnvelope::Eip2930(receipt) => { + Self::Eip2930(receipt.receipt) + } + op_alloy_consensus::OpReceiptEnvelope::Eip1559(receipt) => { + Self::Eip1559(receipt.receipt) + } + op_alloy_consensus::OpReceiptEnvelope::Eip7702(receipt) => { + Self::Eip7702(receipt.receipt) + } + op_alloy_consensus::OpReceiptEnvelope::Deposit(receipt) => { + Self::Deposit(OpDepositReceipt { + deposit_nonce: receipt.receipt.deposit_nonce, + deposit_receipt_version: receipt.receipt.deposit_receipt_version, + inner: receipt.receipt.inner, + }) + } + } + } +} + /// Trait for deposit receipt. pub trait DepositReceipt: reth_primitives_traits::Receipt { /// Converts a `Receipt` into a mutable Optimism deposit receipt. diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs index 3284b67fcbf..306f5459046 100644 --- a/crates/optimism/primitives/src/transaction/mod.rs +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -2,7 +2,7 @@ mod tx_type; -/// Kept for concistency tests +/// Kept for consistency tests #[cfg(test)] mod signed; diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs index 2a345229a65..75276754687 100644 --- a/crates/optimism/primitives/src/transaction/signed.rs +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -127,17 +127,8 @@ impl SignerRecoverable for OpTransactionSigned { let signature_hash = signature_hash(transaction); recover_signer_unchecked(signature, signature_hash) } -} - -impl SignedTransaction for OpTransactionSigned { - fn tx_hash(&self) -> &TxHash { - self.hash.get_or_init(|| self.recalculate_hash()) - } - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { + fn recover_unchecked_with_buf(&self, buf: &mut Vec) -> Result { match &self.transaction { // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. @@ -149,6 +140,12 @@ impl SignedTransaction for OpTransactionSigned { }; recover_signer_unchecked(&self.signature, keccak256(buf)) } +} + +impl SignedTransaction for OpTransactionSigned { + fn tx_hash(&self) -> &TxHash { + self.hash.get_or_init(|| self.recalculate_hash()) + } fn recalculate_hash(&self) -> B256 { keccak256(self.encoded_2718()) diff --git a/crates/optimism/reth/Cargo.toml b/crates/optimism/reth/Cargo.toml index 150a50fc84d..ae673efecf1 100644 --- a/crates/optimism/reth/Cargo.toml +++ b/crates/optimism/reth/Cargo.toml @@ -38,6 +38,7 @@ reth-trie-db = { workspace = true, optional = true } reth-node-builder = { workspace = true, optional = true } reth-tasks = { workspace = true, optional = true } reth-cli-util = { workspace = true, optional = true } +reth-engine-local = { workspace = true, optional = true } # reth-op reth-optimism-primitives.workspace = true @@ -110,6 +111,7 @@ node = [ "node-api", "dep:reth-optimism-node", "dep:reth-node-builder", + "dep:reth-engine-local", "rpc", "trie-db", ] diff --git a/crates/optimism/reth/src/lib.rs b/crates/optimism/reth/src/lib.rs index 3028b07b237..dd5fb5ba6c8 100644 --- a/crates/optimism/reth/src/lib.rs +++ b/crates/optimism/reth/src/lib.rs @@ -10,7 +10,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #![allow(unused_crate_dependencies)] -/// Re-exported ethereum types +/// Re-exported optimism types #[doc(inline)] pub use reth_optimism_primitives::*; @@ -111,7 +111,7 @@ pub mod storage { pub use reth_storage_api::*; } -/// Re-exported ethereum node +/// Re-exported optimism node #[cfg(feature = "node-api")] pub mod node { #[doc(inline)] @@ -124,6 +124,15 @@ pub mod node { pub use reth_optimism_node::*; } +/// Re-exported engine types +#[cfg(feature = "node")] +pub mod engine { + #[doc(inline)] + pub use reth_engine_local as local; + #[doc(inline)] + pub use reth_optimism_node::engine::*; +} + /// Re-exported reth trie types #[cfg(feature = "trie")] pub mod trie { diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index d31de8a0b43..97f598628ef 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -14,9 +14,8 @@ workspace = true [dependencies] # reth reth-evm.workspace = true -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["op"] } reth-storage-api.workspace = true -reth-chain-state.workspace = true reth-rpc-eth-api = { workspace = true, features = ["op"] } reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true @@ -25,7 +24,6 @@ reth-transaction-pool.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true reth-node-api.workspace = true -reth-network-api.workspace = true reth-node-builder.workspace = true reth-chainspec.workspace = true reth-rpc-engine-api.workspace = true @@ -58,7 +56,6 @@ revm.workspace = true op-revm.workspace = true # async -parking_lot.workspace = true tokio.workspace = true reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } async-trait.workspace = true diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 34ce4081b2e..0efd9aea988 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -1,104 +1,23 @@ //! Loads and formats OP block RPC response. -use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; -use alloy_rpc_types_eth::BlockId; -use op_alloy_rpc_types::OpTransactionReceipt; -use reth_chainspec::ChainSpecProvider; -use reth_node_api::BlockBody; -use reth_optimism_forks::OpHardforks; -use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; +use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; use reth_rpc_eth_api::{ - helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - types::RpcTypes, - RpcReceipt, + helpers::{EthBlocks, LoadBlock}, + FromEvmError, RpcConvert, }; -use reth_storage_api::{BlockReader, HeaderProvider, ProviderTx}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, OpReceiptBuilder}; - -impl EthBlocks for OpEthApi +impl EthBlocks for OpEthApi where - Self: LoadBlock< - Error = OpEthApiError, - NetworkTypes: RpcTypes, - Provider: BlockReader, - >, - N: OpNodeCore + HeaderProvider>, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { - async fn block_receipts( - &self, - block_id: BlockId, - ) -> Result>>, Self::Error> - where - Self: LoadReceipt, - { - if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { - let block_number = block.number(); - let base_fee = block.base_fee_per_gas(); - let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas(); - let timestamp = block.timestamp(); - - let mut l1_block_info = match reth_optimism_evm::extract_l1_info(block.body()) { - Ok(l1_block_info) => l1_block_info, - Err(err) => { - // If it is the genesis block (i.e block number is 0), there is no L1 info, so - // we return an empty l1_block_info. - if block_number == 0 { - return Ok(Some(vec![])); - } - return Err(err.into()); - } - }; - - return block - .body() - .transactions() - .iter() - .zip(receipts.iter()) - .enumerate() - .map(|(idx, (tx, receipt))| -> Result<_, _> { - let meta = TransactionMeta { - tx_hash: tx.tx_hash(), - index: idx as u64, - block_hash, - block_number, - base_fee, - excess_blob_gas, - timestamp, - }; - - // We must clear this cache as different L2 transactions can have different - // L1 costs. A potential improvement here is to only clear the cache if the - // new transaction input has changed, since otherwise the L1 cost wouldn't. - l1_block_info.clear_tx_l1_cost(); - - Ok(OpReceiptBuilder::new( - &self.inner.eth_api.provider().chain_spec(), - tx, - meta, - receipt, - &receipts, - &mut l1_block_info, - )? - .build()) - }) - .collect::, Self::Error>>() - .map(Some) - } - - Ok(None) - } } -impl LoadBlock for OpEthApi +impl LoadBlock for OpEthApi where - Self: LoadPendingBlock< - Pool: TransactionPool< - Transaction: PoolTransaction>, - >, - > + SpawnBlocking, - N: OpNodeCore, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { } diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index d886b201bdf..e929ef7ca75 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,51 +1,31 @@ -use super::OpNodeCore; -use crate::{OpEthApi, OpEthApiError}; -use alloy_rpc_types_eth::TransactionRequest; -use op_revm::OpTransaction; -use reth_evm::{execute::BlockExecutorFactory, ConfigureEvm, EvmFactory, TxEnvFor}; -use reth_node_api::NodePrimitives; +use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; +use reth_evm::TxEnvFor; use reth_rpc_eth_api::{ - helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking}, - FromEvmError, FullEthApiTypes, RpcConvert, RpcTypes, + helpers::{estimate::EstimateCall, Call, EthCall}, + FromEvmError, RpcConvert, }; -use reth_storage_api::{errors::ProviderError, ProviderHeader, ProviderTx}; -use revm::context::TxEnv; -impl EthCall for OpEthApi +impl EthCall for OpEthApi where - Self: EstimateCall + LoadBlock + FullEthApiTypes, - N: OpNodeCore, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert>, { } -impl EstimateCall for OpEthApi +impl EstimateCall for OpEthApi where - Self: Call, - Self::Error: From, - N: OpNodeCore, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert>, { } -impl Call for OpEthApi +impl Call for OpEthApi where - Self: LoadState< - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - BlockExecutorFactory: BlockExecutorFactory< - EvmFactory: EvmFactory>, - >, - >, - RpcConvert: RpcConvert, Network = Self::NetworkTypes>, - NetworkTypes: RpcTypes>, - Error: FromEvmError - + From<::Error> - + From, - > + SpawnBlocking, - Self::Error: From, - N: OpNodeCore, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert>, { #[inline] fn call_gas_limit(&self) -> u64 { diff --git a/crates/optimism/rpc/src/eth/ext.rs b/crates/optimism/rpc/src/eth/ext.rs index 46008d0608b..6c4e1bc7cf1 100644 --- a/crates/optimism/rpc/src/eth/ext.rs +++ b/crates/optimism/rpc/src/eth/ext.rs @@ -10,7 +10,9 @@ use reth_optimism_txpool::conditional::MaybeConditionalTransaction; use reth_rpc_eth_api::L2EthApiExtServer; use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; -use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, +}; use std::sync::Arc; use tokio::sync::Semaphore; @@ -157,7 +159,7 @@ where } else { // otherwise, add to pool with the appended conditional tx.set_conditional(condition); - let hash = + let AddedTransactionOutcome { hash, .. } = self.pool().add_transaction(TransactionOrigin::Private, tx).await.map_err(|e| { OpEthApiError::Eth(reth_rpc_eth_types::EthApiError::PoolError(e.into())) })?; diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 29384e3aa0b..3b11c6a28fa 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -8,48 +8,36 @@ mod block; mod call; mod pending_block; -use crate::{eth::transaction::OpTxInfoMapper, OpEthApiError, SequencerClient}; +use crate::{ + eth::{receipt::OpReceiptConverter, transaction::OpTxInfoMapper}, + OpEthApiError, SequencerClient, +}; use alloy_primitives::U256; use eyre::WrapErr; use op_alloy_network::Optimism; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; -use reth_chain_state::CanonStateSubscriptions; -use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; -use reth_network_api::NetworkInfo; -use reth_node_api::{FullNodeComponents, NodePrimitives}; +use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ helpers::{ - AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState, - SpawnBlocking, Trace, + pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees, + EthState, LoadFee, LoadState, SpawnBlocking, Trace, }, - EthApiTypes, FromEvmError, FullEthApiServer, RpcConverter, RpcNodeCore, RpcNodeCoreExt, + EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore, + RpcNodeCoreExt, RpcTypes, SignableTxRequest, }; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; -use reth_storage_api::{ - BlockNumReader, BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, ProviderReceipt, - ProviderTx, StageCheckpointReader, StateProviderFactory, -}; +use reth_storage_api::{ProviderHeader, ProviderTx}; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, }; -use reth_transaction_pool::TransactionPool; use std::{fmt, fmt::Formatter, marker::PhantomData, sync::Arc}; /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. -pub type EthApiNodeBackend = EthApiInner< - ::Provider, - ::Pool, - ::Network, - ::Evm, ->; - -/// A helper trait with requirements for [`RpcNodeCore`] to be used in [`OpEthApi`]. -pub trait OpNodeCore: RpcNodeCore {} -impl OpNodeCore for T where T: RpcNodeCore {} +pub type EthApiNodeBackend = EthApiInner; /// OP-Reth `Eth` API implementation. /// @@ -61,80 +49,68 @@ impl OpNodeCore for T where T: RpcNodeCore {} /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. -#[derive(Clone)] -pub struct OpEthApi { +pub struct OpEthApi { /// Gateway to node's core components. - inner: Arc>, - /// Converter for RPC types. - tx_resp_builder: RpcConverter>, + inner: Arc>, +} + +impl Clone for OpEthApi { + fn clone(&self) -> Self { + Self { inner: self.inner.clone() } + } } -impl OpEthApi { +impl OpEthApi { /// Creates a new `OpEthApi`. pub fn new( - eth_api: EthApiNodeBackend, + eth_api: EthApiNodeBackend, sequencer_client: Option, min_suggested_priority_fee: U256, ) -> Self { let inner = Arc::new(OpEthApiInner { eth_api, sequencer_client, min_suggested_priority_fee }); - Self { - inner: inner.clone(), - tx_resp_builder: RpcConverter::with_mapper(OpTxInfoMapper::new(inner)), - } + Self { inner } } -} -impl OpEthApi -where - N: OpNodeCore< - Provider: BlockReaderIdExt + ChainSpecProvider + CanonStateSubscriptions + Clone + 'static, - >, -{ /// Returns a reference to the [`EthApiNodeBackend`]. - pub fn eth_api(&self) -> &EthApiNodeBackend { + pub fn eth_api(&self) -> &EthApiNodeBackend { self.inner.eth_api() } - /// Returns the configured sequencer client, if any. pub fn sequencer_client(&self) -> Option<&SequencerClient> { self.inner.sequencer_client() } /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. - pub const fn builder() -> OpEthApiBuilder { + pub const fn builder() -> OpEthApiBuilder { OpEthApiBuilder::new() } } -impl EthApiTypes for OpEthApi +impl EthApiTypes for OpEthApi where - Self: Send + Sync + fmt::Debug, - N: OpNodeCore, - NetworkT: op_alloy_network::Network + Clone + fmt::Debug, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { type Error = OpEthApiError; - type NetworkTypes = NetworkT; - type RpcConvert = RpcConverter>; + type NetworkTypes = Rpc::Network; + type RpcConvert = Rpc; fn tx_resp_builder(&self) -> &Self::RpcConvert { - &self.tx_resp_builder + self.inner.eth_api.tx_resp_builder() } } -impl RpcNodeCore for OpEthApi +impl RpcNodeCore for OpEthApi where - N: OpNodeCore, - NetworkT: op_alloy_network::Network, + N: RpcNodeCore, + Rpc: RpcConvert, { type Primitives = N::Primitives; type Provider = N::Provider; type Pool = N::Pool; - type Evm = ::Evm; - type Network = ::Network; - type PayloadBuilder = (); + type Evm = N::Evm; + type Network = N::Network; #[inline] fn pool(&self) -> &Self::Pool { @@ -151,39 +127,30 @@ where self.inner.eth_api.network() } - #[inline] - fn payload_builder(&self) -> &Self::PayloadBuilder { - &() - } - #[inline] fn provider(&self) -> &Self::Provider { self.inner.eth_api.provider() } } -impl RpcNodeCoreExt for OpEthApi +impl RpcNodeCoreExt for OpEthApi where - N: OpNodeCore, - NetworkT: op_alloy_network::Network, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] - fn cache(&self) -> &EthStateCache, ProviderReceipt> { + fn cache(&self) -> &EthStateCache { self.inner.eth_api.cache() } } -impl EthApiSpec for OpEthApi +impl EthApiSpec for OpEthApi where - N: OpNodeCore< - Provider: ChainSpecProvider - + BlockNumReader - + StageCheckpointReader, - Network: NetworkInfo, - >, - NetworkT: op_alloy_network::Network, + N: RpcNodeCore, + Rpc: RpcConvert, { type Transaction = ProviderTx; + type Rpc = Rpc::Network; #[inline] fn starting_block(&self) -> U256 { @@ -191,18 +158,15 @@ where } #[inline] - fn signers(&self) -> &parking_lot::RwLock>>>> { + fn signers(&self) -> &SignersForApi { self.inner.eth_api.signers() } } -impl SpawnBlocking for OpEthApi +impl SpawnBlocking for OpEthApi where - Self: Send + Sync + Clone + 'static, - N: OpNodeCore, - NetworkT: op_alloy_network::Network, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -220,14 +184,11 @@ where } } -impl LoadFee for OpEthApi +impl LoadFee for OpEthApi where - Self: LoadBlock, - N: OpNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, - >, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { @@ -245,22 +206,17 @@ where } } -impl LoadState for OpEthApi +impl LoadState for OpEthApi where - N: OpNodeCore< - Provider: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, - >, - NetworkT: op_alloy_network::Network, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { } -impl EthState for OpEthApi +impl EthState for OpEthApi where - Self: LoadState + SpawnBlocking, - N: OpNodeCore, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] fn max_proof_window(&self) -> u64 { @@ -268,52 +224,44 @@ where } } -impl EthFees for OpEthApi +impl EthFees for OpEthApi where - Self: LoadFee< - Provider: ChainSpecProvider< - ChainSpec: EthChainSpec
>, - >, - >, - N: OpNodeCore, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { } -impl Trace for OpEthApi +impl Trace for OpEthApi where - Self: RpcNodeCore - + LoadState< - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, - Error: FromEvmError, - >, - N: OpNodeCore, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { } -impl AddDevSigners for OpEthApi +impl AddDevSigners for OpEthApi where - N: OpNodeCore, + N: RpcNodeCore, + Rpc: RpcConvert< + Network: RpcTypes>>, + >, { fn with_dev_accounts(&self) { *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) } } -impl fmt::Debug for OpEthApi { +impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() } } /// Container type `OpEthApi` -pub struct OpEthApiInner { +pub struct OpEthApiInner { /// Gateway to node's core components. - eth_api: EthApiNodeBackend, + eth_api: EthApiNodeBackend, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. sequencer_client: Option, @@ -323,15 +271,15 @@ pub struct OpEthApiInner { min_suggested_priority_fee: U256, } -impl fmt::Debug for OpEthApiInner { +impl fmt::Debug for OpEthApiInner { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApiInner").finish() } } -impl OpEthApiInner { +impl OpEthApiInner { /// Returns a reference to the [`EthApiNodeBackend`]. - const fn eth_api(&self) -> &EthApiNodeBackend { + const fn eth_api(&self) -> &EthApiNodeBackend { &self.eth_api } @@ -341,6 +289,15 @@ impl OpEthApiInner { } } +/// Converter for OP RPC types. +pub type OpRpcConvert = RpcConverter< + NetworkT, + ::Evm, + OpReceiptConverter<::Provider>, + (), + OpTxInfoMapper<::Provider>, +>; + /// Builds [`OpEthApi`] for Optimism. #[derive(Debug)] pub struct OpEthApiBuilder { @@ -398,29 +355,21 @@ impl OpEthApiBuilder { impl EthApiBuilder for OpEthApiBuilder where - N: FullNodeComponents, - OpEthApi: FullEthApiServer, - NetworkT: op_alloy_network::Network + Unpin, + N: FullNodeComponents>>>, + NetworkT: RpcTypes, + OpRpcConvert: RpcConvert, + OpEthApi>: + FullEthApiServer + AddDevSigners, { - type EthApi = OpEthApi; + type EthApi = OpEthApi>; async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { let Self { sequencer_url, sequencer_headers, min_suggested_priority_fee, .. } = self; - let eth_api = reth_rpc::EthApiBuilder::new( - ctx.components.provider().clone(), - ctx.components.pool().clone(), - ctx.components.network().clone(), - ctx.components.evm_config().clone(), - ) - .eth_cache(ctx.cache) - .task_spawner(ctx.components.task_executor().clone()) - .gas_cap(ctx.config.rpc_gas_cap.into()) - .max_simulate_blocks(ctx.config.rpc_max_simulate_blocks) - .eth_proof_window(ctx.config.eth_proof_window) - .fee_history_cache_config(ctx.config.fee_history_cache) - .proof_permits(ctx.config.proof_permits) - .gas_oracle_config(ctx.config.gas_oracle) - .build_inner(); + let rpc_converter = + RpcConverter::new(OpReceiptConverter::new(ctx.components.provider().clone())) + .with_mapper(OpTxInfoMapper::new(ctx.components.provider().clone())); + + let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner(); let sequencer_client = if let Some(url) = sequencer_url { Some( diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index de011aa2797..5b50ea68f0e 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,76 +1,33 @@ //! Loads OP pending block for a RPC response. -use crate::OpEthApi; -use alloy_consensus::BlockHeader; +use std::sync::Arc; + +use crate::{OpEthApi, OpEthApiError}; use alloy_eips::BlockNumberOrTag; -use alloy_primitives::B256; -use reth_chainspec::{ChainSpecProvider, EthChainSpec}; -use reth_evm::ConfigureEvm; -use reth_node_api::NodePrimitives; -use reth_optimism_evm::OpNextBlockEnvAttributes; -use reth_optimism_forks::OpHardforks; -use reth_primitives_traits::{RecoveredBlock, SealedHeader}; +use reth_primitives_traits::RecoveredBlock; use reth_rpc_eth_api::{ - helpers::{LoadPendingBlock, SpawnBlocking}, - types::RpcTypes, - EthApiTypes, FromEthApiError, FromEvmError, RpcConvert, RpcNodeCore, + helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock}, + FromEvmError, RpcConvert, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_storage_api::{ - BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, ProviderReceipt, ProviderTx, - ReceiptProvider, StateProviderFactory, + BlockReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ReceiptProvider, }; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; -impl LoadPendingBlock for OpEthApi +impl LoadPendingBlock for OpEthApi where - Self: SpawnBlocking - + EthApiTypes< - NetworkTypes: RpcTypes< - Header = alloy_rpc_types_eth::Header>, - >, - Error: FromEvmError, - RpcConvert: RpcConvert, - >, - N: RpcNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, - Pool: TransactionPool>>, - Evm: ConfigureEvm< - Primitives = ::Primitives, - NextBlockEnvCtx: From, - >, - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, - >, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] - fn pending_block( - &self, - ) -> &tokio::sync::Mutex< - Option, ProviderReceipt>>, - > { + fn pending_block(&self) -> &tokio::sync::Mutex>> { self.inner.eth_api.pending_block() } - fn next_env_attributes( - &self, - parent: &SealedHeader>, - ) -> Result<::NextBlockEnvCtx, Self::Error> { - Ok(OpNextBlockEnvAttributes { - timestamp: parent.timestamp().saturating_add(12), - suggested_fee_recipient: parent.beneficiary(), - prev_randao: B256::random(), - gas_limit: parent.gas_limit(), - parent_beacon_block_root: parent.parent_beacon_block_root(), - extra_data: parent.extra_data().clone(), - } - .into()) + #[inline] + fn pending_env_builder(&self) -> &dyn PendingEnvBuilder { + self.inner.eth_api.pending_env_builder() } /// Returns the locally built pending block @@ -78,30 +35,27 @@ where &self, ) -> Result< Option<( - RecoveredBlock>, - Vec>, + Arc>>, + Arc>>, )>, Self::Error, > { // See: let latest = self .provider() - .latest_header() - .map_err(Self::Error::from_eth_err)? + .latest_header()? .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; let block_id = latest.hash().into(); let block = self .provider() - .recovered_block(block_id, Default::default()) - .map_err(Self::Error::from_eth_err)? + .recovered_block(block_id, Default::default())? .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; let receipts = self .provider() - .receipts_by_block(block_id) - .map_err(Self::Error::from_eth_err)? + .receipts_by_block(block_id)? .ok_or(EthApiError::ReceiptsNotFound(block_id.into()))?; - Ok(Some((block, receipts))) + Ok(Some((Arc::new(block), Arc::new(receipts)))) } } diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 92bd6fb1957..edf16900f04 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,57 +1,95 @@ //! Loads and formats OP receipt RPC response. -use alloy_consensus::transaction::TransactionMeta; +use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; use alloy_eips::eip2718::Encodable2718; use alloy_rpc_types_eth::{Log, TransactionReceipt}; -use op_alloy_consensus::{OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope}; +use op_alloy_consensus::{ + OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope, OpTransaction, +}; use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; use reth_chainspec::ChainSpecProvider; -use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_node_api::NodePrimitives; use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OpHardforks; -use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; -use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; +use reth_optimism_primitives::OpReceipt; +use reth_primitives_traits::Block; +use reth_rpc_eth_api::{ + helpers::LoadReceipt, + transaction::{ConvertReceiptInput, ReceiptConverter}, + RpcConvert, +}; use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; -use reth_storage_api::{ReceiptProvider, TransactionsProvider}; +use reth_storage_api::BlockReader; +use std::fmt::Debug; -use crate::{OpEthApi, OpEthApiError}; +impl LoadReceipt for OpEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, +{ +} + +/// Converter for OP receipts. +#[derive(Debug, Clone)] +pub struct OpReceiptConverter { + provider: Provider, +} -impl LoadReceipt for OpEthApi +impl OpReceiptConverter { + /// Creates a new [`OpReceiptConverter`]. + pub const fn new(provider: Provider) -> Self { + Self { provider } + } +} + +impl ReceiptConverter for OpReceiptConverter where - Self: Send + Sync, - N: FullNodeComponents>, - Self::Provider: TransactionsProvider - + ReceiptProvider, + N: NodePrimitives, + Provider: BlockReader + ChainSpecProvider + Debug + 'static, { - async fn build_transaction_receipt( + type RpcReceipt = OpTransactionReceipt; + type Error = OpEthApiError; + + fn convert_receipts( &self, - tx: OpTransactionSigned, - meta: TransactionMeta, - receipt: OpReceipt, - ) -> Result, Self::Error> { - let (block, receipts) = self - .inner - .eth_api - .cache() - .get_block_and_receipts(meta.block_hash) - .await - .map_err(Self::Error::from_eth_err)? - .ok_or(Self::Error::from_eth_err(EthApiError::HeaderNotFound( - meta.block_hash.into(), - )))?; + inputs: Vec>, + ) -> Result, Self::Error> { + let Some(block_number) = inputs.first().map(|r| r.meta.block_number) else { + return Ok(Vec::new()); + }; - let mut l1_block_info = - reth_optimism_evm::extract_l1_info(block.body()).map_err(OpEthApiError::from)?; - - Ok(OpReceiptBuilder::new( - &self.inner.eth_api.provider().chain_spec(), - &tx, - meta, - &receipt, - &receipts, - &mut l1_block_info, - )? - .build()) + let block = self + .provider + .block_by_number(block_number)? + .ok_or(EthApiError::HeaderNotFound(block_number.into()))?; + + let mut l1_block_info = match reth_optimism_evm::extract_l1_info(block.body()) { + Ok(l1_block_info) => l1_block_info, + Err(err) => { + // If it is the genesis block (i.e block number is 0), there is no L1 info, so + // we return an empty l1_block_info. + if block_number == 0 { + return Ok(vec![]); + } + return Err(err.into()); + } + }; + + let mut receipts = Vec::with_capacity(inputs.len()); + + for input in inputs { + // We must clear this cache as different L2 transactions can have different + // L1 costs. A potential improvement here is to only clear the cache if the + // new transaction input has changed, since otherwise the L1 cost wouldn't. + l1_block_info.clear_tx_l1_cost(); + + receipts.push( + OpReceiptBuilder::new(&self.provider.chain_spec(), input, &mut l1_block_info)? + .build(), + ); + } + + Ok(receipts) } } @@ -112,10 +150,10 @@ impl OpReceiptFieldsBuilder { } /// Applies [`L1BlockInfo`](op_revm::L1BlockInfo). - pub fn l1_block_info( + pub fn l1_block_info( mut self, chain_spec: &impl OpHardforks, - tx: &OpTransactionSigned, + tx: &T, l1_block_info: &mut op_revm::L1BlockInfo, ) -> Result { let raw_tx = tx.encoded_2718(); @@ -221,44 +259,43 @@ pub struct OpReceiptBuilder { impl OpReceiptBuilder { /// Returns a new builder. - pub fn new( + pub fn new( chain_spec: &impl OpHardforks, - transaction: &OpTransactionSigned, - meta: TransactionMeta, - receipt: &OpReceipt, - all_receipts: &[OpReceipt], + input: ConvertReceiptInput<'_, N>, l1_block_info: &mut op_revm::L1BlockInfo, - ) -> Result { - let timestamp = meta.timestamp; - let block_number = meta.block_number; + ) -> Result + where + N: NodePrimitives, + { + let timestamp = input.meta.timestamp; + let block_number = input.meta.block_number; + let tx_signed = *input.tx.inner(); let core_receipt = - build_receipt(transaction, meta, receipt, all_receipts, None, |receipt_with_bloom| { - match receipt { - OpReceipt::Legacy(_) => OpReceiptEnvelope::::Legacy(receipt_with_bloom), - OpReceipt::Eip2930(_) => OpReceiptEnvelope::::Eip2930(receipt_with_bloom), - OpReceipt::Eip1559(_) => OpReceiptEnvelope::::Eip1559(receipt_with_bloom), - OpReceipt::Eip7702(_) => OpReceiptEnvelope::::Eip7702(receipt_with_bloom), - OpReceipt::Deposit(receipt) => { - OpReceiptEnvelope::::Deposit(OpDepositReceiptWithBloom:: { - receipt: OpDepositReceipt:: { - inner: receipt_with_bloom.receipt, - deposit_nonce: receipt.deposit_nonce, - deposit_receipt_version: receipt.deposit_receipt_version, - }, - logs_bloom: receipt_with_bloom.logs_bloom, - }) - } + build_receipt(&input, None, |receipt_with_bloom| match input.receipt.as_ref() { + OpReceipt::Legacy(_) => OpReceiptEnvelope::Legacy(receipt_with_bloom), + OpReceipt::Eip2930(_) => OpReceiptEnvelope::Eip2930(receipt_with_bloom), + OpReceipt::Eip1559(_) => OpReceiptEnvelope::Eip1559(receipt_with_bloom), + OpReceipt::Eip7702(_) => OpReceiptEnvelope::Eip7702(receipt_with_bloom), + OpReceipt::Deposit(receipt) => { + OpReceiptEnvelope::Deposit(OpDepositReceiptWithBloom { + receipt: OpDepositReceipt { + inner: receipt_with_bloom.receipt, + deposit_nonce: receipt.deposit_nonce, + deposit_receipt_version: receipt.deposit_receipt_version, + }, + logs_bloom: receipt_with_bloom.logs_bloom, + }) } - })?; + }); let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp, block_number) - .l1_block_info(chain_spec, transaction, l1_block_info)? + .l1_block_info(chain_spec, tx_signed, l1_block_info)? .build(); Ok(Self { core_receipt, op_receipt_fields }) } - /// Builds [`OpTransactionReceipt`] by combing core (l1) receipt fields and additional OP + /// Builds [`OpTransactionReceipt`] by combining core (l1) receipt fields and additional OP /// receipt fields. pub fn build(self) -> OpTransactionReceipt { let Self { core_receipt: inner, op_receipt_fields } = self; @@ -276,6 +313,7 @@ mod test { use alloy_primitives::{hex, U256}; use op_alloy_network::eip2718::Decodable2718; use reth_optimism_chainspec::{BASE_MAINNET, OP_MAINNET}; + use reth_optimism_primitives::OpTransactionSigned; /// OP Mainnet transaction at index 0 in block 124665056. /// diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 30422316ad9..f8437c12623 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,36 +1,28 @@ //! Loads and formats OP transaction RPC response. -use crate::{ - eth::{OpEthApiInner, OpNodeCore}, - OpEthApi, OpEthApiError, SequencerClient, -}; +use crate::{OpEthApi, OpEthApiError, SequencerClient}; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_eth::TransactionInfo; -use op_alloy_consensus::{transaction::OpTransactionInfo, OpTxEnvelope}; -use reth_node_api::FullNodeComponents; +use op_alloy_consensus::{transaction::OpTransactionInfo, OpTransaction}; use reth_optimism_primitives::DepositReceipt; +use reth_primitives_traits::SignedTransaction; use reth_rpc_eth_api::{ - helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - try_into_op_tx_info, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcNodeCore, - RpcNodeCoreExt, TxInfoMapper, + helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, + try_into_op_tx_info, FromEthApiError, RpcConvert, RpcNodeCore, TxInfoMapper, }; use reth_rpc_eth_types::utils::recover_raw_transaction; -use reth_storage_api::{ - errors::ProviderError, BlockReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, - TransactionsProvider, -}; -use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -use std::{ - fmt::{Debug, Formatter}, - sync::Arc, +use reth_storage_api::{errors::ProviderError, ReceiptProvider}; +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, }; +use std::fmt::{Debug, Formatter}; -impl EthTransactions for OpEthApi +impl EthTransactions for OpEthApi where - Self: LoadTransaction + EthApiTypes, - N: OpNodeCore>>, + N: RpcNodeCore, + Rpc: RpcConvert, { - fn signers(&self) -> &parking_lot::RwLock>>>> { + fn signers(&self) -> &SignersForRpc { self.inner.eth_api.signers() } @@ -39,6 +31,10 @@ where /// Returns the hash of the transaction. async fn send_raw_transaction(&self, tx: Bytes) -> Result { let recovered = recover_raw_transaction(&tx)?; + + // broadcast raw transaction to subscribers if there is any. + self.eth_api().broadcast_raw_transaction(tx.clone()); + let pool_transaction = ::Transaction::from_pooled(recovered); // On optimism, transactions are forwarded directly to the sequencer to be included in @@ -61,7 +57,7 @@ where } // submit the transaction to the pool with a `Local` origin - let hash = self + let AddedTransactionOutcome { hash, .. } = self .pool() .add_transaction(TransactionOrigin::Local, pool_transaction) .await @@ -71,17 +67,17 @@ where } } -impl LoadTransaction for OpEthApi +impl LoadTransaction for OpEthApi where - Self: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt, - N: OpNodeCore, - Self::Pool: TransactionPool, + N: RpcNodeCore, + Rpc: RpcConvert, { } -impl OpEthApi +impl OpEthApi where - N: OpNodeCore, + N: RpcNodeCore, + Rpc: RpcConvert, { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { @@ -93,35 +89,38 @@ where /// /// For deposits, receipt is fetched to extract `deposit_nonce` and `deposit_receipt_version`. /// Otherwise, it works like regular Ethereum implementation, i.e. uses [`TransactionInfo`]. -#[derive(Clone)] -pub struct OpTxInfoMapper(Arc>); +pub struct OpTxInfoMapper { + provider: Provider, +} + +impl Clone for OpTxInfoMapper { + fn clone(&self) -> Self { + Self { provider: self.provider.clone() } + } +} -impl Debug for OpTxInfoMapper { +impl Debug for OpTxInfoMapper { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("OpTxInfoMapper").finish() } } -impl OpTxInfoMapper { +impl OpTxInfoMapper { /// Creates [`OpTxInfoMapper`] that uses [`ReceiptProvider`] borrowed from given `eth_api`. - pub const fn new(eth_api: Arc>) -> Self { - Self(eth_api) + pub const fn new(provider: Provider) -> Self { + Self { provider } } } -impl TxInfoMapper<&OpTxEnvelope> for OpTxInfoMapper +impl TxInfoMapper<&T> for OpTxInfoMapper where - N: FullNodeComponents, - N::Provider: ReceiptProvider, + T: OpTransaction + SignedTransaction, + Provider: ReceiptProvider, { type Out = OpTransactionInfo; type Err = ProviderError; - fn try_map( - &self, - tx: &OpTxEnvelope, - tx_info: TransactionInfo, - ) -> Result { - try_into_op_tx_info(self.0.eth_api.provider(), tx, tx_info) + fn try_map(&self, tx: &T, tx_info: TransactionInfo) -> Result { + try_into_op_tx_info(&self.provider, tx, tx_info) } } diff --git a/crates/optimism/rpc/src/historical.rs b/crates/optimism/rpc/src/historical.rs index 0f8824882b3..07cbadf4619 100644 --- a/crates/optimism/rpc/src/historical.rs +++ b/crates/optimism/rpc/src/historical.rs @@ -136,16 +136,24 @@ where Box::pin(async move { let maybe_block_id = match req.method_name() { - "eth_getBlockByNumber" | "eth_getBlockByHash" => { - parse_block_id_from_params(&req.params(), 0) - } + "eth_getBlockByNumber" | + "eth_getBlockByHash" | + "debug_traceBlockByNumber" | + "debug_traceBlockByHash" => parse_block_id_from_params(&req.params(), 0), "eth_getBalance" | "eth_getCode" | "eth_getTransactionCount" | "eth_call" | "eth_estimateGas" | - "eth_createAccessList" => parse_block_id_from_params(&req.params(), 1), + "eth_createAccessList" | + "debug_traceCall" => parse_block_id_from_params(&req.params(), 1), "eth_getStorageAt" | "eth_getProof" => parse_block_id_from_params(&req.params(), 2), + "debug_traceTransaction" => { + // debug_traceTransaction takes a transaction hash as its first parameter, + // not a BlockId. We assume the op-reth instance is configured with minimal + // bootstrap without the bodies so we can't check if this tx is pre bedrock + None + } _ => None, }; @@ -173,8 +181,7 @@ where .request::<_, serde_json::Value>(req.method_name(), params) .await { - let payload = - jsonrpsee_types::ResponsePayload::success(raw.to_string()).into(); + let payload = jsonrpsee_types::ResponsePayload::success(raw).into(); return MethodResponse::response(req.id, payload, usize::MAX); } } diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs index bc86e93f91c..1858b4fd2f1 100644 --- a/crates/optimism/rpc/src/witness.rs +++ b/crates/optimism/rpc/src/witness.rs @@ -3,15 +3,13 @@ use alloy_primitives::B256; use alloy_rpc_types_debug::ExecutionWitness; use jsonrpsee_core::{async_trait, RpcResult}; -use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_chainspec::ChainSpecProvider; use reth_evm::ConfigureEvm; -use reth_node_api::NodePrimitives; -use reth_optimism_evm::OpNextBlockEnvAttributes; +use reth_node_api::{BuildNextEnv, NodePrimitives}; use reth_optimism_forks::OpHardforks; -use reth_optimism_payload_builder::{OpPayloadBuilder, OpPayloadPrimitives}; +use reth_optimism_payload_builder::{OpAttributes, OpPayloadBuilder, OpPayloadPrimitives}; use reth_optimism_txpool::OpPooledTx; -use reth_primitives_traits::SealedHeader; +use reth_primitives_traits::{SealedHeader, TxTy}; pub use reth_rpc_api::DebugExecutionWitnessApiServer; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_storage_api::{ @@ -24,16 +22,16 @@ use std::{fmt::Debug, sync::Arc}; use tokio::sync::{oneshot, Semaphore}; /// An extension to the `debug_` namespace of the RPC API. -pub struct OpDebugWitnessApi { - inner: Arc>, +pub struct OpDebugWitnessApi { + inner: Arc>, } -impl OpDebugWitnessApi { +impl OpDebugWitnessApi { /// Creates a new instance of the `OpDebugWitnessApi`. pub fn new( provider: Provider, task_spawner: Box, - builder: OpPayloadBuilder, + builder: OpPayloadBuilder, ) -> Self { let semaphore = Arc::new(Semaphore::new(3)); let inner = OpDebugWitnessApiInner { provider, builder, task_spawner, semaphore }; @@ -41,7 +39,7 @@ impl OpDebugWitnessApi { } } -impl OpDebugWitnessApi +impl OpDebugWitnessApi where EvmConfig: ConfigureEvm, Provider: NodePrimitivesProvider> @@ -60,8 +58,8 @@ where } #[async_trait] -impl DebugExecutionWitnessApiServer - for OpDebugWitnessApi +impl DebugExecutionWitnessApiServer + for OpDebugWitnessApi where Pool: TransactionPool< Transaction: OpPooledTx::SignedTx>, @@ -72,13 +70,16 @@ where + ChainSpecProvider + Clone + 'static, - EvmConfig: ConfigureEvm - + 'static, + EvmConfig: ConfigureEvm< + Primitives = Provider::Primitives, + NextBlockEnvCtx: BuildNextEnv, + > + 'static, + Attrs: OpAttributes>, { async fn execute_payload( &self, parent_block_hash: B256, - attributes: OpPayloadAttributes, + attributes: Attrs::RpcPayloadAttributes, ) -> RpcResult { let _permit = self.inner.semaphore.acquire().await; @@ -97,20 +98,24 @@ where } } -impl Clone for OpDebugWitnessApi { +impl Clone + for OpDebugWitnessApi +{ fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -impl Debug for OpDebugWitnessApi { +impl Debug + for OpDebugWitnessApi +{ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("OpDebugWitnessApi").finish_non_exhaustive() } } -struct OpDebugWitnessApiInner { +struct OpDebugWitnessApiInner { provider: Provider, - builder: OpPayloadBuilder, + builder: OpPayloadBuilder, task_spawner: Box, semaphore: Arc, } diff --git a/crates/optimism/storage/Cargo.toml b/crates/optimism/storage/Cargo.toml index 56ced8d74e1..564d6e38cda 100644 --- a/crates/optimism/storage/Cargo.toml +++ b/crates/optimism/storage/Cargo.toml @@ -26,7 +26,6 @@ alloy-consensus.workspace = true [dev-dependencies] reth-codecs = { workspace = true, features = ["test-utils"] } -reth-db-api.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 5ae0425f3e1..222af0a664d 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -38,7 +38,6 @@ tracing.workspace = true [dev-dependencies] alloy-primitives.workspace = true -alloy-consensus.workspace = true tokio = { workspace = true, features = ["sync", "rt"] } diff --git a/crates/payload/builder/src/noop.rs b/crates/payload/builder/src/noop.rs index 6047bffa8b1..c20dac0f2d5 100644 --- a/crates/payload/builder/src/noop.rs +++ b/crates/payload/builder/src/noop.rs @@ -64,3 +64,10 @@ impl Default for NoopPayloadBuilderService { service } } + +impl PayloadBuilderHandle { + /// Returns a new noop instance. + pub fn noop() -> Self { + Self::new(mpsc::unbounded_channel().0) + } +} diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index fb78cae16c7..811b9da7f19 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -26,7 +26,8 @@ pub use error::{ mod traits; pub use traits::{ - BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, PayloadBuilderAttributes, + BuildNextEnv, BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, + PayloadBuilderAttributes, }; mod payload; @@ -413,6 +414,17 @@ impl EngineApiMessageVersion { pub const fn is_v5(&self) -> bool { matches!(self, Self::V5) } + + /// Returns the method name for the given version. + pub const fn method_name(&self) -> &'static str { + match self { + Self::V1 => "engine_newPayloadV1", + Self::V2 => "engine_newPayloadV2", + Self::V3 => "engine_newPayloadV3", + Self::V4 => "engine_newPayloadV4", + Self::V5 => "engine_newPayloadV5", + } + } } /// Determines how we should choose the payload to return. diff --git a/crates/payload/primitives/src/payload.rs b/crates/payload/primitives/src/payload.rs index 9648a5675c0..709a37768f4 100644 --- a/crates/payload/primitives/src/payload.rs +++ b/crates/payload/primitives/src/payload.rs @@ -2,7 +2,7 @@ use crate::{MessageValidationKind, PayloadAttributes}; use alloc::vec::Vec; -use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; +use alloy_eips::{eip1898::BlockWithParent, eip4895::Withdrawal, eip7685::Requests, BlockNumHash}; use alloy_primitives::B256; use alloy_rpc_types_engine::ExecutionData; use core::fmt::Debug; @@ -25,6 +25,16 @@ pub trait ExecutionPayload: /// Returns this block's number (height). fn block_number(&self) -> u64; + /// Returns this block's number hash. + fn num_hash(&self) -> BlockNumHash { + BlockNumHash::new(self.block_number(), self.block_hash()) + } + + /// Returns a [`BlockWithParent`] for this block. + fn block_with_parent(&self) -> BlockWithParent { + BlockWithParent::new(self.parent_hash(), self.num_hash()) + } + /// Returns the withdrawals included in this payload. /// /// Returns `None` for pre-Shanghai blocks. diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 9d712acc827..4301fbe1961 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -9,7 +9,9 @@ use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use core::fmt; use reth_chain_state::ExecutedBlockWithTrieUpdates; -use reth_primitives_traits::{NodePrimitives, SealedBlock}; +use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader}; + +use crate::PayloadBuilderError; /// Represents a successfully built execution payload (block). /// @@ -44,11 +46,11 @@ pub trait BuiltPayload: Send + Sync + fmt::Debug { /// /// Extends basic payload attributes with additional context needed during the /// building process, tracking in-progress payload jobs and their parameters. -pub trait PayloadBuilderAttributes: Send + Sync + fmt::Debug { +pub trait PayloadBuilderAttributes: Send + Sync + Unpin + fmt::Debug + 'static { /// The external payload attributes format this type can be constructed from. - type RpcPayloadAttributes; + type RpcPayloadAttributes: Send + Sync + 'static; /// The error type used in [`PayloadBuilderAttributes::try_new`]. - type Error: core::error::Error; + type Error: core::error::Error + Send + Sync + 'static; /// Constructs new builder attributes from external payload attributes. /// @@ -159,3 +161,15 @@ pub trait PayloadAttributesBuilder: Send + Sync + 'static { /// Constructs new payload attributes for the given timestamp. fn build(&self, timestamp: u64) -> Attributes; } + +/// Trait to build the EVM environment for the next block from the given payload attributes. +/// +/// Accepts payload attributes from CL, parent header and additional payload builder context. +pub trait BuildNextEnv: Sized { + /// Builds the EVM environment for the next block from the given payload attributes. + fn build_next_env( + attributes: &Attributes, + parent: &SealedHeader
, + ctx: &Ctx, + ) -> Result; +} diff --git a/crates/payload/validator/src/cancun.rs b/crates/payload/validator/src/cancun.rs index 5a4deb139fd..cea8aca5144 100644 --- a/crates/payload/validator/src/cancun.rs +++ b/crates/payload/validator/src/cancun.rs @@ -11,14 +11,15 @@ use reth_primitives_traits::{AlloyBlockHeader, Block, SealedBlock}; /// - doesn't contain EIP-4844 transactions unless Cancun is active /// - checks blob versioned hashes in block and sidecar match #[inline] -pub fn ensure_well_formed_fields( +pub fn ensure_well_formed_fields( block: &SealedBlock, cancun_sidecar_fields: Option<&CancunPayloadFields>, is_cancun_active: bool, ) -> Result<(), PayloadError> where T: Transaction + Typed2718, - B: Block>, + H: AlloyBlockHeader, + B: Block
>, { ensure_well_formed_header_and_sidecar_fields(block, cancun_sidecar_fields, is_cancun_active)?; ensure_well_formed_transactions_field_with_sidecar( @@ -72,8 +73,8 @@ pub fn ensure_well_formed_header_and_sidecar_fields( /// - doesn't contain EIP-4844 transactions unless Cancun is active /// - checks blob versioned hashes in block and sidecar match #[inline] -pub fn ensure_well_formed_transactions_field_with_sidecar( - block_body: &BlockBody, +pub fn ensure_well_formed_transactions_field_with_sidecar( + block_body: &BlockBody, cancun_sidecar_fields: Option<&CancunPayloadFields>, is_cancun_active: bool, ) -> Result<(), PayloadError> { @@ -89,8 +90,8 @@ pub fn ensure_well_formed_transactions_field_with_sidecar( - block_body: &BlockBody, +pub fn ensure_matching_blob_versioned_hashes( + block_body: &BlockBody, cancun_sidecar_fields: Option<&CancunPayloadFields>, ) -> Result<(), PayloadError> { let num_blob_versioned_hashes = block_body.blob_versioned_hashes_iter().count(); diff --git a/crates/payload/validator/src/prague.rs b/crates/payload/validator/src/prague.rs index d663469a826..9dff206d74f 100644 --- a/crates/payload/validator/src/prague.rs +++ b/crates/payload/validator/src/prague.rs @@ -10,8 +10,8 @@ use alloy_rpc_types_engine::{PayloadError, PraguePayloadFields}; /// - Prague fields are not present unless Prague is active /// - does not contain EIP-7702 transactions if Prague is not active #[inline] -pub fn ensure_well_formed_fields( - block_body: &BlockBody, +pub fn ensure_well_formed_fields( + block_body: &BlockBody, prague_fields: Option<&PraguePayloadFields>, is_prague_active: bool, ) -> Result<(), PayloadError> { @@ -36,8 +36,8 @@ pub const fn ensure_well_formed_sidecar_fields( /// Checks that transactions field doesn't contain EIP-7702 transactions if Prague is not /// active. #[inline] -pub fn ensure_well_formed_transactions_field( - block_body: &BlockBody, +pub fn ensure_well_formed_transactions_field( + block_body: &BlockBody, is_prague_active: bool, ) -> Result<(), PayloadError> { if !is_prague_active && block_body.has_eip7702_transactions() { diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index ed4115b43df..2bf898ca063 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -133,6 +133,8 @@ serde-bincode-compat = [ "alloy-eips/serde-bincode-compat", "op-alloy-consensus?/serde", "op-alloy-consensus?/serde-bincode-compat", + "alloy-genesis/serde-bincode-compat", + "alloy-rpc-types-eth?/serde-bincode-compat", "scroll-alloy-consensus?/serde-bincode-compat", ] serde = [ diff --git a/crates/primitives-traits/src/block/recovered.rs b/crates/primitives-traits/src/block/recovered.rs index 7da2bcf3733..599aea1f8ac 100644 --- a/crates/primitives-traits/src/block/recovered.rs +++ b/crates/primitives-traits/src/block/recovered.rs @@ -560,7 +560,7 @@ impl RecoveredBlock { self.block.header_mut() } - /// Returns a mutable reference to the header. + /// Returns a mutable reference to the body. pub const fn block_mut(&mut self) -> &mut B::Body { self.block.body_mut() } @@ -591,15 +591,12 @@ mod rpc_compat { use super::{ Block as BlockTrait, BlockBody as BlockBodyTrait, RecoveredBlock, SignedTransaction, }; - use crate::block::error::BlockRecoveryError; + use crate::{block::error::BlockRecoveryError, SealedHeader}; use alloc::vec::Vec; use alloy_consensus::{ transaction::Recovered, Block as CBlock, BlockBody, BlockHeader, Sealable, }; - use alloy_primitives::U256; - use alloy_rpc_types_eth::{ - Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, - }; + use alloy_rpc_types_eth::{Block, BlockTransactions, BlockTransactionsKind, TransactionInfo}; impl RecoveredBlock where @@ -609,11 +606,16 @@ mod rpc_compat { /// /// The `tx_resp_builder` closure transforms each transaction into the desired response /// type. - pub fn into_rpc_block( + /// + /// `header_builder` transforms the block header into RPC representation. It takes the + /// consensus header and RLP length of the block which is a common dependency of RPC + /// headers. + pub fn into_rpc_block( self, kind: BlockTransactionsKind, tx_resp_builder: F, - ) -> Result>, E> + header_builder: impl FnOnce(SealedHeader, usize) -> Result, + ) -> Result, E> where F: Fn( Recovered<<::Body as BlockBodyTrait>::Transaction>, @@ -621,8 +623,10 @@ mod rpc_compat { ) -> Result, { match kind { - BlockTransactionsKind::Hashes => Ok(self.into_rpc_block_with_tx_hashes()), - BlockTransactionsKind::Full => self.into_rpc_block_full(tx_resp_builder), + BlockTransactionsKind::Hashes => self.into_rpc_block_with_tx_hashes(header_builder), + BlockTransactionsKind::Full => { + self.into_rpc_block_full(tx_resp_builder, header_builder) + } } } @@ -633,11 +637,16 @@ mod rpc_compat { /// /// The `tx_resp_builder` closure transforms each transaction into the desired response /// type. - pub fn clone_into_rpc_block( + /// + /// `header_builder` transforms the block header into RPC representation. It takes the + /// consensus header and RLP length of the block which is a common dependency of RPC + /// headers. + pub fn clone_into_rpc_block( &self, kind: BlockTransactionsKind, tx_resp_builder: F, - ) -> Result>, E> + header_builder: impl FnOnce(SealedHeader, usize) -> Result, + ) -> Result, E> where F: Fn( Recovered<<::Body as BlockBodyTrait>::Transaction>, @@ -645,8 +654,10 @@ mod rpc_compat { ) -> Result, { match kind { - BlockTransactionsKind::Hashes => Ok(self.to_rpc_block_with_tx_hashes()), - BlockTransactionsKind::Full => self.clone().into_rpc_block_full(tx_resp_builder), + BlockTransactionsKind::Hashes => self.to_rpc_block_with_tx_hashes(header_builder), + BlockTransactionsKind::Full => { + self.clone().into_rpc_block_full(tx_resp_builder, header_builder) + } } } @@ -654,7 +665,10 @@ mod rpc_compat { /// /// Returns [`BlockTransactions::Hashes`] containing only transaction hashes. /// Efficiently clones only necessary parts, not the entire block. - pub fn to_rpc_block_with_tx_hashes(&self) -> Block> { + pub fn to_rpc_block_with_tx_hashes( + &self, + header_builder: impl FnOnce(SealedHeader, usize) -> Result, + ) -> Result, E> { let transactions = self.body().transaction_hashes_iter().copied().collect(); let rlp_length = self.rlp_length(); let header = self.clone_sealed_header(); @@ -663,16 +677,19 @@ mod rpc_compat { let transactions = BlockTransactions::Hashes(transactions); let uncles = self.body().ommers().unwrap_or(&[]).iter().map(|h| h.hash_slow()).collect(); - let header = Header::from_consensus(header.into(), None, Some(U256::from(rlp_length))); + let header = header_builder(header, rlp_length)?; - Block { header, uncles, transactions, withdrawals } + Ok(Block { header, uncles, transactions, withdrawals }) } /// Converts the block into an RPC [`Block`] with transaction hashes. /// /// Consumes self and returns [`BlockTransactions::Hashes`] containing only transaction /// hashes. - pub fn into_rpc_block_with_tx_hashes(self) -> Block> { + pub fn into_rpc_block_with_tx_hashes( + self, + f: impl FnOnce(SealedHeader, usize) -> Result, + ) -> Result, E> { let transactions = self.body().transaction_hashes_iter().copied().collect(); let rlp_length = self.rlp_length(); let (header, body) = self.into_sealed_block().split_sealed_header_body(); @@ -680,19 +697,20 @@ mod rpc_compat { let transactions = BlockTransactions::Hashes(transactions); let uncles = ommers.into_iter().map(|h| h.hash_slow()).collect(); - let header = Header::from_consensus(header.into(), None, Some(U256::from(rlp_length))); + let header = f(header, rlp_length)?; - Block { header, uncles, transactions, withdrawals } + Ok(Block { header, uncles, transactions, withdrawals }) } /// Converts the block into an RPC [`Block`] with full transaction objects. /// /// Returns [`BlockTransactions::Full`] with complete transaction data. /// The `tx_resp_builder` closure transforms each transaction with its metadata. - pub fn into_rpc_block_full( + pub fn into_rpc_block_full( self, tx_resp_builder: F, - ) -> Result>, E> + header_builder: impl FnOnce(SealedHeader, usize) -> Result, + ) -> Result, E> where F: Fn( Recovered<<::Body as BlockBodyTrait>::Transaction>, @@ -727,8 +745,7 @@ mod rpc_compat { let transactions = BlockTransactions::Full(transactions); let uncles = ommers.into_iter().map(|h| h.hash_slow()).collect(); - let header = - Header::from_consensus(header.into(), None, Some(U256::from(block_length))); + let header = header_builder(header, block_length)?; let block = Block { header, uncles, transactions, withdrawals }; diff --git a/crates/primitives-traits/src/block/sealed.rs b/crates/primitives-traits/src/block/sealed.rs index dd0bc0b6652..9e160728192 100644 --- a/crates/primitives-traits/src/block/sealed.rs +++ b/crates/primitives-traits/src/block/sealed.rs @@ -349,7 +349,7 @@ impl SealedBlock { self.header.set_hash(hash) } - /// Returns a mutable reference to the header. + /// Returns a mutable reference to the body. pub const fn body_mut(&mut self) -> &mut B::Body { &mut self.body } diff --git a/crates/primitives-traits/src/extended.rs b/crates/primitives-traits/src/extended.rs index e235f47033e..b2731aa5a96 100644 --- a/crates/primitives-traits/src/extended.rs +++ b/crates/primitives-traits/src/extended.rs @@ -25,7 +25,7 @@ macro_rules! delegate { /// An enum that combines two different transaction types. /// -/// This is intended to be used to extend existing presets, for example the ethereum or optstack +/// This is intended to be used to extend existing presets, for example the ethereum or opstack /// transaction types and receipts /// /// Note: The [`Extended::Other`] variants must not overlap with the builtin one, transaction @@ -149,6 +149,10 @@ where fn recover_signer_unchecked(&self) -> Result { delegate!(self => tx.recover_signer_unchecked()) } + + fn recover_unchecked_with_buf(&self, buf: &mut Vec) -> Result { + delegate!(self => tx.recover_unchecked_with_buf(buf)) + } } impl SignedTransaction for Extended @@ -162,13 +166,6 @@ where Self::Other(tx) => tx.tx_hash(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - delegate!(self => tx.recover_signer_unchecked_with_buf(buf)) - } } impl Typed2718 for Extended diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 60d265d2be6..60f83532dfc 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -50,7 +50,7 @@ //! #### Naming //! //! The types in this crate support multiple recovery functions, e.g. -//! [`SealedBlock::try_recover_unchecked`] and [`SealedBlock::try_recover_unchecked`]. The `_unchecked` suffix indicates that this function recovers the signer _without ensuring that the signature has a low `s` value_, in other words this rule introduced in [EIP-2](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md) is ignored. +//! [`SealedBlock::try_recover`] and [`SealedBlock::try_recover_unchecked`]. The `_unchecked` suffix indicates that this function recovers the signer _without ensuring that the signature has a low `s` value_, in other words this rule introduced in [EIP-2](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md) is ignored. //! Hence this function is necessary when dealing with pre EIP-2 transactions on the ethereum //! mainnet. Newer transactions must always be recovered with the regular `recover` functions, see //! also [`recover_signer`](crypto::secp256k1::recover_signer). diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 56ce917a33b..d45edc3031b 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -1,10 +1,7 @@ //! API of a signed transaction. -use crate::{ - crypto::secp256k1::recover_signer_unchecked, InMemorySize, MaybeCompact, MaybeSerde, - MaybeSerdeBincodeCompat, -}; -use alloc::{fmt, vec::Vec}; +use crate::{InMemorySize, MaybeCompact, MaybeSerde, MaybeSerdeBincodeCompat}; +use alloc::fmt; use alloy_consensus::{ transaction::{Recovered, RlpEcdsaEncodableTx, SignerRecoverable}, EthereumTxEnvelope, SignableTransaction, @@ -77,14 +74,6 @@ pub trait SignedTransaction: self.recover_signer_unchecked() } - /// Same as [`SignerRecoverable::recover_signer_unchecked`] but receives a buffer to operate on. - /// This is used during batch recovery to avoid allocating a new buffer for each - /// transaction. - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result; - /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with /// tx type. fn recalculate_hash(&self) -> B256 { @@ -97,6 +86,12 @@ pub trait SignedTransaction: self.recover_signer().map(|signer| Recovered::new_unchecked(self.clone(), signer)) } + /// Tries to recover signer and return [`Recovered`] by cloning the type. + #[auto_impl(keep_default_for(&, Arc))] + fn try_clone_into_recovered_unchecked(&self) -> Result, RecoveryError> { + self.recover_signer_unchecked().map(|signer| Recovered::new_unchecked(self.clone(), signer)) + } + /// Tries to recover signer and return [`Recovered`]. /// /// Returns `Err(Self)` if the transaction's signature is invalid, see also @@ -150,21 +145,6 @@ where Self::Eip4844(tx) => tx.hash(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - Self::Legacy(tx) => tx.tx().encode_for_signing(buf), - Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), - Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), - Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), - Self::Eip4844(tx) => tx.tx().encode_for_signing(buf), - } - let signature_hash = keccak256(buf); - recover_signer_unchecked(self.signature(), signature_hash) - } } #[cfg(feature = "op")] @@ -181,20 +161,6 @@ mod op { Self::Eip7702(tx) => tx.hash(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - Self::Legacy(tx) => tx.tx().encode_for_signing(buf), - Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), - Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), - Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), - } - let signature_hash = keccak256(buf); - recover_signer_unchecked(self.signature(), signature_hash) - } } impl SignedTransaction for OpTxEnvelope { @@ -207,28 +173,6 @@ mod op { Self::Deposit(tx) => tx.hash_ref(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - Self::Deposit(tx) => return Ok(tx.from), - Self::Legacy(tx) => tx.tx().encode_for_signing(buf), - Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), - Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), - Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), - } - let signature_hash = keccak256(buf); - let signature = match self { - Self::Legacy(tx) => tx.signature(), - Self::Eip2930(tx) => tx.signature(), - Self::Eip1559(tx) => tx.signature(), - Self::Eip7702(tx) => tx.signature(), - Self::Deposit(_) => unreachable!("Deposit transactions should not be handled here"), - }; - recover_signer_unchecked(signature, signature_hash) - } } } @@ -246,20 +190,6 @@ mod scroll { Self::Eip7702(tx) => tx.hash(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - Self::Legacy(tx) => tx.tx().encode_for_signing(buf), - Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), - Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), - Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), - } - let signature_hash = keccak256(buf); - recover_signer_unchecked(self.signature(), signature_hash) - } } impl SignedTransaction for ScrollTxEnvelope { @@ -272,21 +202,5 @@ mod scroll { Self::L1Message(tx) => tx.hash_ref(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - Self::Legacy(tx) => tx.tx().encode_for_signing(buf), - Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), - Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), - Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), - Self::L1Message(tx) => return Ok(tx.sender), - } - let signature_hash = keccak256(buf); - let signature = self.signature().expect("handled L1 message in previous match"); - recover_signer_unchecked(&signature, signature_hash) - } } } diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 4f1aa64b30c..67fae820d93 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -101,6 +101,7 @@ serde-bincode-compat = [ "alloy-consensus/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", "reth-ethereum-primitives/serde-bincode-compat", + "alloy-genesis/serde-bincode-compat", ] [[bench]] diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index f5bb95df3f5..509ef6a5be8 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -82,7 +82,7 @@ impl PrunerBuilder { ProviderRW: PruneCheckpointWriter + BlockReader + StaticFileProviderFactory< - Primitives: NodePrimitives, + Primitives: NodePrimitives, >, > + StaticFileProviderFactory< Primitives = ::Primitives, @@ -107,8 +107,9 @@ impl PrunerBuilder { static_file_provider: StaticFileProvider, ) -> Pruner where - Provider: StaticFileProviderFactory> - + DBProvider + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + DBProvider + BlockReader + PruneCheckpointWriter, { diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 52e6ee75442..7d5db03714b 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -47,8 +47,9 @@ impl SegmentSet { impl SegmentSet where - Provider: StaticFileProviderFactory> - + DBProvider + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + DBProvider + PruneCheckpointWriter + BlockReader, { diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs index be4e50fe48b..d8b7e6a5398 100644 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ b/crates/prune/prune/src/segments/static_file/headers.rs @@ -7,9 +7,11 @@ use alloy_primitives::BlockNumber; use itertools::Itertools; use reth_db_api::{ cursor::{DbCursorRO, RangeWalker}, + table::Value, tables, transaction::DbTxMut, }; +use reth_primitives_traits::NodePrimitives; use reth_provider::{providers::StaticFileProvider, DBProvider, StaticFileProviderFactory}; use reth_prune_types::{ PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, @@ -32,8 +34,10 @@ impl Headers { } } -impl> Segment - for Headers +impl Segment for Headers +where + Provider: StaticFileProviderFactory> + + DBProvider, { fn segment(&self) -> PruneSegment { PruneSegment::Headers @@ -63,7 +67,12 @@ impl> Segment()?; + // let mut headers_cursor = provider.tx_ref().cursor_write::()?; + let mut headers_cursor = provider + .tx_ref() + .cursor_write::::BlockHeader>>( + )?; + let mut header_tds_cursor = provider.tx_ref().cursor_write::()?; let mut canonical_headers_cursor = @@ -108,11 +117,16 @@ type Walker<'a, Provider, T> = #[allow(missing_debug_implementations)] struct HeaderTablesIter<'a, Provider> where - Provider: DBProvider, + Provider: StaticFileProviderFactory> + + DBProvider, { provider: &'a Provider, limiter: &'a mut PruneLimiter, - headers_walker: Walker<'a, Provider, tables::Headers>, + headers_walker: Walker< + 'a, + Provider, + tables::Headers<::BlockHeader>, + >, header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>, canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, } @@ -124,12 +138,17 @@ struct HeaderTablesIterItem { impl<'a, Provider> HeaderTablesIter<'a, Provider> where - Provider: DBProvider, + Provider: StaticFileProviderFactory> + + DBProvider, { const fn new( provider: &'a Provider, limiter: &'a mut PruneLimiter, - headers_walker: Walker<'a, Provider, tables::Headers>, + headers_walker: Walker< + 'a, + Provider, + tables::Headers<::BlockHeader>, + >, header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>, canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, ) -> Self { @@ -139,7 +158,8 @@ where impl Iterator for HeaderTablesIter<'_, Provider> where - Provider: DBProvider, + Provider: StaticFileProviderFactory> + + DBProvider, { type Item = Result; fn next(&mut self) -> Option { diff --git a/crates/ress/provider/src/recorder.rs b/crates/ress/provider/src/recorder.rs index b692dd9a4d1..ec5afacbf0c 100644 --- a/crates/ress/provider/src/recorder.rs +++ b/crates/ress/provider/src/recorder.rs @@ -8,6 +8,7 @@ use reth_trie::{HashedPostState, HashedStorage}; /// The state witness recorder that records all state accesses during execution. /// It does so by implementing the [`reth_revm::Database`] and recording accesses of accounts and /// slots. +#[derive(Debug)] pub(crate) struct StateWitnessRecorderDatabase { database: D, state: HashedPostState, diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 95ffe22f05a..629b5faf00d 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -27,7 +27,6 @@ revm.workspace = true [dev-dependencies] reth-trie.workspace = true reth-ethereum-forks.workspace = true -alloy-primitives.workspace = true alloy-consensus.workspace = true [features] diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 50415815759..6b829c3d734 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -61,7 +61,7 @@ impl EvmStateProvider for T { /// A [Database] and [`DatabaseRef`] implementation that uses [`EvmStateProvider`] as the underlying /// data source. -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct StateProviderDatabase(pub DB); impl StateProviderDatabase { @@ -76,6 +76,12 @@ impl StateProviderDatabase { } } +impl core::fmt::Debug for StateProviderDatabase { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("StateProviderDatabase").finish_non_exhaustive() + } +} + impl AsRef for StateProviderDatabase { fn as_ref(&self) -> &DB { self diff --git a/crates/revm/src/either.rs b/crates/revm/src/either.rs deleted file mode 100644 index e26d2ccb721..00000000000 --- a/crates/revm/src/either.rs +++ /dev/null @@ -1,49 +0,0 @@ -use alloy_primitives::{Address, B256, U256}; -use revm::{bytecode::Bytecode, state::AccountInfo, Database}; - -/// An enum type that can hold either of two different [`Database`] implementations. -/// -/// This allows flexible usage of different [`Database`] types in the same context. -#[derive(Debug, Clone)] -pub enum Either { - /// A value of type `L`. - Left(L), - /// A value of type `R`. - Right(R), -} - -impl Database for Either -where - L: Database, - R: Database, -{ - type Error = L::Error; - - fn basic(&mut self, address: Address) -> Result, Self::Error> { - match self { - Self::Left(db) => db.basic(address), - Self::Right(db) => db.basic(address), - } - } - - fn code_by_hash(&mut self, code_hash: B256) -> Result { - match self { - Self::Left(db) => db.code_by_hash(code_hash), - Self::Right(db) => db.code_by_hash(code_hash), - } - } - - fn storage(&mut self, address: Address, index: U256) -> Result { - match self { - Self::Left(db) => db.storage(address, index), - Self::Right(db) => db.storage(address, index), - } - } - - fn block_hash(&mut self, number: u64) -> Result { - match self { - Self::Left(db) => db.block_hash(number), - Self::Right(db) => db.block_hash(number), - } - } -} diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index caaae237c8a..ecc5b576a84 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -30,9 +30,6 @@ pub mod test_utils; // Convenience re-exports. pub use revm::{self, database::State, *}; -/// Either type for flexible usage of different database types in the same context. -pub mod either; - /// Helper types for execution witness generation. #[cfg(feature = "witness")] pub mod witness; diff --git a/crates/rpc/ipc/src/server/connection.rs b/crates/rpc/ipc/src/server/connection.rs index 0734296b98e..e8b827078ba 100644 --- a/crates/rpc/ipc/src/server/connection.rs +++ b/crates/rpc/ipc/src/server/connection.rs @@ -1,4 +1,4 @@ -//! A IPC connection. +//! An IPC connection. use crate::stream_codec::StreamCodec; use futures::{stream::FuturesUnordered, FutureExt, Sink, Stream}; diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index e9e00a7f6c0..ece2eef7803 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -139,7 +139,20 @@ where .to_fs_name::() .and_then(|name| ListenerOptions::new().name(name).create_tokio()) { - Ok(listener) => listener, + Ok(listener) => { + #[cfg(unix)] + { + // set permissions only on unix + use std::os::unix::fs::PermissionsExt; + if let Some(perms_str) = &self.cfg.ipc_socket_permissions { + if let Ok(mode) = u32::from_str_radix(&perms_str.replace("0o", ""), 8) { + let perms = std::fs::Permissions::from_mode(mode); + let _ = std::fs::set_permissions(&self.endpoint, perms); + } + } + } + listener + } Err(err) => { on_ready .send(Err(IpcServerStartError { endpoint: self.endpoint.clone(), source: err })) @@ -550,6 +563,8 @@ pub struct Settings { message_buffer_capacity: u32, /// Custom tokio runtime to run the server on. tokio_runtime: Option, + /// The permissions to create the IPC socket with. + ipc_socket_permissions: Option, } impl Default for Settings { @@ -562,6 +577,7 @@ impl Default for Settings { max_subscriptions_per_connection: 1024, message_buffer_capacity: 1024, tokio_runtime: None, + ipc_socket_permissions: None, } } } @@ -648,6 +664,12 @@ impl Builder { self } + /// Sets the permissions for the IPC socket file. + pub fn set_ipc_socket_permissions(mut self, permissions: Option) -> Self { + self.settings.ipc_socket_permissions = permissions; + self + } + /// Configure custom `subscription ID` provider for the server to use /// to when getting new subscription calls. /// @@ -768,6 +790,24 @@ mod tests { use tokio::sync::broadcast; use tokio_stream::wrappers::BroadcastStream; + #[tokio::test] + #[cfg(unix)] + async fn test_ipc_socket_permissions() { + use std::os::unix::fs::PermissionsExt; + let endpoint = &dummy_name(); + let perms = "0777"; + let server = Builder::default() + .set_ipc_socket_permissions(Some(perms.to_string())) + .build(endpoint.clone()); + let module = RpcModule::new(()); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let meta = std::fs::metadata(endpoint).unwrap(); + let perms = meta.permissions(); + assert_eq!(perms.mode() & 0o777, 0o777); + } + async fn pipe_from_stream_with_bounded_buffer( pending: PendingSubscriptionSink, stream: BroadcastStream, diff --git a/crates/rpc/ipc/src/stream_codec.rs b/crates/rpc/ipc/src/stream_codec.rs index 4205081e3de..aa5cda16b7f 100644 --- a/crates/rpc/ipc/src/stream_codec.rs +++ b/crates/rpc/ipc/src/stream_codec.rs @@ -209,7 +209,7 @@ mod tests { let request2 = codec .decode(&mut buf) .expect("There should be no error in first 2nd test") - .expect("There should be aa request in 2nd whitespace test"); + .expect("There should be a request in 2nd whitespace test"); // TODO: maybe actually trim it out assert_eq!(request2, "\n\n\n\n{ test: 2 }"); diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 8aefda4767b..5dd7401782f 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,8 +1,9 @@ use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_genesis::ChainConfig; +use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, B256}; use alloy_rpc_types_debug::ExecutionWitness; -use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Bundle, StateContext}; +use alloy_rpc_types_eth::{Block, Bundle, StateContext}; use alloy_rpc_types_trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }; @@ -12,7 +13,7 @@ use reth_trie_common::{updates::TrieUpdates, HashedPostState}; /// Debug rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "debug"))] -pub trait DebugApi { +pub trait DebugApi { /// Returns an RLP-encoded header. #[method(name = "getRawHeader")] async fn raw_header(&self, block_id: BlockId) -> RpcResult; @@ -105,7 +106,7 @@ pub trait DebugApi { #[method(name = "traceCall")] async fn debug_trace_call( &self, - request: TransactionRequest, + request: TxReq, block_id: Option, opts: Option, ) -> RpcResult; @@ -128,7 +129,7 @@ pub trait DebugApi { #[method(name = "traceCallMany")] async fn debug_trace_call_many( &self, - bundles: Vec, + bundles: Vec>, state_context: Option, opts: Option, ) -> RpcResult>>; diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 6d9ba5211b6..088d18b9bf4 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -15,8 +15,7 @@ use alloy_rpc_types_engine::{ ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, }; use alloy_rpc_types_eth::{ - state::StateOverride, transaction::TransactionRequest, BlockOverrides, - EIP1186AccountProofResponse, Filter, Log, SyncStatus, + state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc, RpcModule}; @@ -250,7 +249,7 @@ pub trait EngineApi { /// Specifically for the engine auth server: #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] -pub trait EngineEthApi { +pub trait EngineEthApi { /// Returns an object with data about the sync status or false. #[method(name = "syncing")] fn syncing(&self) -> RpcResult; @@ -267,7 +266,7 @@ pub trait EngineEthApi { #[method(name = "call")] async fn call( &self, - request: TransactionRequest, + request: TxReq, block_id: Option, state_overrides: Option, block_overrides: Option>, diff --git a/crates/rpc/rpc-api/src/mev.rs b/crates/rpc/rpc-api/src/mev.rs index 76de76a079b..274fcbf9316 100644 --- a/crates/rpc/rpc-api/src/mev.rs +++ b/crates/rpc/rpc-api/src/mev.rs @@ -1,6 +1,4 @@ -use alloy_rpc_types_mev::{ - EthBundleHash, SendBundleRequest, SimBundleOverrides, SimBundleResponse, -}; +use alloy_rpc_types_mev::{EthBundleHash, MevSendBundle, SimBundleOverrides, SimBundleResponse}; use jsonrpsee::proc_macros::rpc; /// Mev rpc interface. @@ -12,7 +10,7 @@ pub trait MevSimApi { #[method(name = "simBundle")] async fn sim_bundle( &self, - bundle: SendBundleRequest, + bundle: MevSendBundle, sim_overrides: SimBundleOverrides, ) -> jsonrpsee::core::RpcResult; } @@ -26,7 +24,7 @@ pub trait MevFullApi { #[method(name = "sendBundle")] async fn send_bundle( &self, - request: SendBundleRequest, + request: MevSendBundle, ) -> jsonrpsee::core::RpcResult; /// Similar to `mev_sendBundle` but instead of submitting a bundle to the relay, it returns @@ -34,7 +32,7 @@ pub trait MevFullApi { #[method(name = "simBundle")] async fn sim_bundle( &self, - bundle: SendBundleRequest, + bundle: MevSendBundle, sim_overrides: SimBundleOverrides, ) -> jsonrpsee::core::RpcResult; } diff --git a/crates/rpc/rpc-api/src/trace.rs b/crates/rpc/rpc-api/src/trace.rs index 425fe1bb63e..1c4b148a098 100644 --- a/crates/rpc/rpc-api/src/trace.rs +++ b/crates/rpc/rpc-api/src/trace.rs @@ -1,8 +1,6 @@ use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256}; -use alloy_rpc_types_eth::{ - state::StateOverride, transaction::TransactionRequest, BlockOverrides, Index, -}; +use alloy_rpc_types_eth::{state::StateOverride, BlockOverrides, Index}; use alloy_rpc_types_trace::{ filter::TraceFilter, opcode::{BlockOpcodeGas, TransactionOpcodeGas}, @@ -13,12 +11,12 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; /// Ethereum trace API #[cfg_attr(not(feature = "client"), rpc(server, namespace = "trace"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "trace"))] -pub trait TraceApi { +pub trait TraceApi { /// Executes the given call and returns a number of possible traces for it. #[method(name = "call")] async fn trace_call( &self, - call: TransactionRequest, + call: TxReq, trace_types: HashSet, block_id: Option, state_overrides: Option, @@ -31,7 +29,7 @@ pub trait TraceApi { #[method(name = "callMany")] async fn trace_call_many( &self, - calls: Vec<(TransactionRequest, HashSet)>, + calls: Vec<(TxReq, HashSet)>, block_id: Option, ) -> RpcResult>; diff --git a/crates/rpc/rpc-api/src/validation.rs b/crates/rpc/rpc-api/src/validation.rs index 5e4f2e26143..9ff47b5eaf2 100644 --- a/crates/rpc/rpc-api/src/validation.rs +++ b/crates/rpc/rpc-api/src/validation.rs @@ -3,6 +3,7 @@ use alloy_rpc_types_beacon::relay::{ BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, + BuilderBlockValidationRequestV5, }; use jsonrpsee::proc_macros::rpc; @@ -37,4 +38,11 @@ pub trait BlockSubmissionValidationApi { &self, request: BuilderBlockValidationRequestV4, ) -> jsonrpsee::core::RpcResult<()>; + + /// A Request to validate a block submission. + #[method(name = "validateBuilderSubmissionV5")] + async fn validate_builder_submission_v5( + &self, + request: BuilderBlockValidationRequestV5, + ) -> jsonrpsee::core::RpcResult<()>; } diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 281b32ef568..12da375f143 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -52,10 +52,7 @@ alloy-provider = { workspace = true, features = ["ws", "ipc"] } alloy-network.workspace = true [dev-dependencies] -reth-primitives-traits.workspace = true reth-ethereum-primitives.workspace = true -reth-chainspec.workspace = true -reth-network-api.workspace = true reth-network-peers.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true @@ -67,6 +64,7 @@ reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-rpc-convert.workspace = true reth-engine-primitives.workspace = true +reth-engine-tree.workspace = true reth-node-ethereum.workspace = true alloy-primitives.workspace = true @@ -75,6 +73,5 @@ alloy-rpc-types-trace.workspace = true alloy-eips.workspace = true alloy-rpc-types-engine.workspace = true -tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } serde_json.workspace = true clap = { workspace = true, features = ["derive"] } diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index b1a4f4166bd..777081a7e6f 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -1,9 +1,13 @@ -use crate::error::{RpcError, ServerKind}; +use crate::{ + error::{RpcError, ServerKind}, + middleware::RethRpcMiddleware, +}; use http::header::AUTHORIZATION; use jsonrpsee::{ core::{client::SubscriptionClientT, RegisterMethodError}, http_client::HeaderMap, server::{AlreadyStoppedError, RpcModule}, + ws_client::RpcServiceBuilder, Methods, }; use reth_rpc_api::servers::*; @@ -21,7 +25,7 @@ pub use reth_ipc::server::Builder as IpcServerBuilder; /// Server configuration for the auth server. #[derive(Debug)] -pub struct AuthServerConfig { +pub struct AuthServerConfig { /// Where the server should listen. pub(crate) socket_addr: SocketAddr, /// The secret for the auth layer of the server. @@ -32,6 +36,8 @@ pub struct AuthServerConfig { pub(crate) ipc_server_config: Option>, /// IPC endpoint pub(crate) ipc_endpoint: Option, + /// Configurable RPC middleware + pub(crate) rpc_middleware: RpcMiddleware, } // === impl AuthServerConfig === @@ -41,24 +47,51 @@ impl AuthServerConfig { pub const fn builder(secret: JwtSecret) -> AuthServerConfigBuilder { AuthServerConfigBuilder::new(secret) } - +} +impl AuthServerConfig { /// Returns the address the server will listen on. pub const fn address(&self) -> SocketAddr { self.socket_addr } + /// Configures the rpc middleware. + pub fn with_rpc_middleware(self, rpc_middleware: T) -> AuthServerConfig { + let Self { socket_addr, secret, server_config, ipc_server_config, ipc_endpoint, .. } = self; + AuthServerConfig { + socket_addr, + secret, + server_config, + ipc_server_config, + ipc_endpoint, + rpc_middleware, + } + } + /// Convenience function to start a server in one step. - pub async fn start(self, module: AuthRpcModule) -> Result { - let Self { socket_addr, secret, server_config, ipc_server_config, ipc_endpoint } = self; + pub async fn start(self, module: AuthRpcModule) -> Result + where + RpcMiddleware: RethRpcMiddleware, + { + let Self { + socket_addr, + secret, + server_config, + ipc_server_config, + ipc_endpoint, + rpc_middleware, + } = self; // Create auth middleware. let middleware = tower::ServiceBuilder::new().layer(AuthLayer::new(JwtAuthValidator::new(secret))); + let rpc_middleware = RpcServiceBuilder::default().layer(rpc_middleware); + // By default, both http and ws are enabled. let server = ServerBuilder::new() .set_config(server_config.build()) .set_http_middleware(middleware) + .set_rpc_middleware(rpc_middleware) .build(socket_addr) .await .map_err(|err| RpcError::server_error(err, ServerKind::Auth(socket_addr)))?; @@ -86,12 +119,13 @@ impl AuthServerConfig { /// Builder type for configuring an `AuthServerConfig`. #[derive(Debug)] -pub struct AuthServerConfigBuilder { +pub struct AuthServerConfigBuilder { socket_addr: Option, secret: JwtSecret, server_config: Option, ipc_server_config: Option>, ipc_endpoint: Option, + rpc_middleware: RpcMiddleware, } // === impl AuthServerConfigBuilder === @@ -105,6 +139,22 @@ impl AuthServerConfigBuilder { server_config: None, ipc_server_config: None, ipc_endpoint: None, + rpc_middleware: Identity::new(), + } + } +} + +impl AuthServerConfigBuilder { + /// Configures the rpc middleware. + pub fn with_rpc_middleware(self, rpc_middleware: T) -> AuthServerConfigBuilder { + let Self { socket_addr, secret, server_config, ipc_server_config, ipc_endpoint, .. } = self; + AuthServerConfigBuilder { + socket_addr, + secret, + server_config, + ipc_server_config, + ipc_endpoint, + rpc_middleware, } } @@ -150,7 +200,7 @@ impl AuthServerConfigBuilder { } /// Build the `AuthServerConfig`. - pub fn build(self) -> AuthServerConfig { + pub fn build(self) -> AuthServerConfig { AuthServerConfig { socket_addr: self.socket_addr.unwrap_or_else(|| { SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), constants::DEFAULT_AUTH_PORT) @@ -182,6 +232,7 @@ impl AuthServerConfigBuilder { .set_id_provider(EthSubscriptionIdProvider::default()) }), ipc_endpoint: self.ipc_endpoint, + rpc_middleware: self.rpc_middleware, } } } diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index e2ae09e71ce..602f4e275e5 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -174,6 +174,7 @@ impl RethRpcServerConfig for RpcServerArgs { .max_request_body_size(self.rpc_max_request_size_bytes()) .max_response_body_size(self.rpc_max_response_size_bytes()) .max_connections(self.rpc_max_connections.get()) + .set_ipc_socket_permissions(self.ipc_socket_permissions.clone()) } fn rpc_server_config(&self) -> RpcServerConfig { diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 4dcce346c0d..6c1866836e5 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -20,6 +20,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; +use alloy_network::Ethereum; use alloy_provider::{fillers::RecommendedFillers, Provider, ProviderBuilder}; use core::marker::PhantomData; use error::{ConflictingModules, RpcError, ServerKind}; @@ -40,14 +41,18 @@ use reth_rpc::{ }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ - helpers::{Call, EthApiSpec, EthTransactions, LoadPendingBlock, TraceExt}, - EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, - RpcTxReq, + helpers::{ + pending_block::PendingEnvBuilder, Call, EthApiSpec, EthTransactions, LoadPendingBlock, + TraceExt, + }, + node::RpcNodeCoreAdapter, + EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcConvert, RpcConverter, RpcHeader, + RpcNodeCore, RpcReceipt, RpcTransaction, RpcTxReq, }; -use reth_rpc_eth_types::{EthConfig, EthSubscriptionIdProvider}; +use reth_rpc_eth_types::{receipt::EthReceiptConverter, EthConfig, EthSubscriptionIdProvider}; use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; use reth_storage_api::{ - AccountReader, BlockReader, BlockReaderIdExt, ChangeSetReader, FullRpcProvider, ProviderBlock, + AccountReader, BlockReader, ChangeSetReader, FullRpcProvider, ProviderBlock, StateProviderFactory, }; use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; @@ -248,12 +253,20 @@ impl } /// Instantiates a new [`EthApiBuilder`] from the configured components. - pub fn eth_api_builder(&self) -> EthApiBuilder + #[expect(clippy::type_complexity)] + pub fn eth_api_builder( + &self, + ) -> EthApiBuilder< + RpcNodeCoreAdapter, + RpcConverter>, + > where - Provider: BlockReaderIdExt + Clone, + Provider: Clone, Pool: Clone, Network: Clone, EvmConfig: Clone, + RpcNodeCoreAdapter: + RpcNodeCore, Evm = EvmConfig>, { EthApiBuilder::new( self.provider.clone(), @@ -268,19 +281,22 @@ impl /// Note: This spawns all necessary tasks. /// /// See also [`EthApiBuilder`]. - pub fn bootstrap_eth_api(&self) -> EthApi + #[expect(clippy::type_complexity)] + pub fn bootstrap_eth_api( + &self, + ) -> EthApi< + RpcNodeCoreAdapter, + RpcConverter>, + > where - N: NodePrimitives, - Provider: BlockReaderIdExt - + StateProviderFactory - + CanonStateSubscriptions - + ChainSpecProvider - + Clone - + Unpin - + 'static, + Provider: Clone, Pool: Clone, - EvmConfig: Clone, Network: Clone, + EvmConfig: ConfigureEvm + Clone, + RpcNodeCoreAdapter: + RpcNodeCore, Evm = EvmConfig>, + RpcConverter>: RpcConvert, + (): PendingEnvBuilder, { self.eth_api_builder().build() } @@ -769,10 +785,7 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn trace_api(&self) -> TraceApi - where - EthApi: TraceExt, - { + pub fn trace_api(&self) -> TraceApi { TraceApi::new(self.eth_api().clone(), self.blocking_pool_guard.clone(), self.eth_config) } @@ -794,16 +807,8 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn debug_api(&self) -> DebugApi - where - EthApi: EthApiSpec + EthTransactions + TraceExt, - EvmConfig::Primitives: NodePrimitives>, - { - DebugApi::new( - self.eth_api().clone(), - self.blocking_pool_guard.clone(), - self.evm_config.clone(), - ) + pub fn debug_api(&self) -> DebugApi { + DebugApi::new(self.eth_api().clone(), self.blocking_pool_guard.clone()) } /// Instantiates `NetApi` @@ -835,7 +840,7 @@ where + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, - EthApi: FullEthApiServer, + EthApi: FullEthApiServer, EvmConfig: ConfigureEvm + 'static, Consensus: FullConsensus + Clone + 'static, { @@ -920,13 +925,11 @@ where .into_rpc() .into() } - RethRpcModule::Debug => DebugApi::new( - eth_api.clone(), - self.blocking_pool_guard.clone(), - self.evm_config.clone(), - ) - .into_rpc() - .into(), + RethRpcModule::Debug => { + DebugApi::new(eth_api.clone(), self.blocking_pool_guard.clone()) + .into_rpc() + .into() + } RethRpcModule::Eth => { // merge all eth handlers let mut module = eth_api.clone().into_rpc(); @@ -1188,6 +1191,22 @@ impl RpcServerConfig { self } + /// Configures a custom tokio runtime for the rpc server. + pub fn with_tokio_runtime(mut self, tokio_runtime: tokio::runtime::Handle) -> Self { + if let Some(http_server_config) = self.http_server_config { + self.http_server_config = + Some(http_server_config.custom_tokio_runtime(tokio_runtime.clone())); + } + if let Some(ws_server_config) = self.ws_server_config { + self.ws_server_config = + Some(ws_server_config.custom_tokio_runtime(tokio_runtime.clone())); + } + if let Some(ipc_server_config) = self.ipc_server_config { + self.ipc_server_config = Some(ipc_server_config.custom_tokio_runtime(tokio_runtime)); + } + self + } + /// Returns true if any server is configured. /// /// If no server is configured, no server will be launched on [`RpcServerConfig::start`]. diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index d21d6f915a9..a790253d266 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -410,11 +410,11 @@ where { let block_id = BlockId::Number(BlockNumberOrTag::default()); - DebugApiClient::raw_header(client, block_id).await.unwrap(); - DebugApiClient::raw_block(client, block_id).await.unwrap_err(); - DebugApiClient::raw_transaction(client, B256::default()).await.unwrap(); - DebugApiClient::raw_receipts(client, block_id).await.unwrap(); - DebugApiClient::bad_blocks(client).await.unwrap(); + DebugApiClient::::raw_header(client, block_id).await.unwrap(); + DebugApiClient::::raw_block(client, block_id).await.unwrap_err(); + DebugApiClient::::raw_transaction(client, B256::default()).await.unwrap(); + DebugApiClient::::raw_receipts(client, block_id).await.unwrap(); + DebugApiClient::::bad_blocks(client).await.unwrap(); } async fn test_basic_net_calls(client: &C) @@ -441,22 +441,39 @@ where count: None, }; - TraceApiClient::trace_raw_transaction(client, Bytes::default(), HashSet::default(), None) - .await - .unwrap_err(); - TraceApiClient::trace_call_many(client, vec![], Some(BlockNumberOrTag::Latest.into())) - .await - .unwrap_err(); - TraceApiClient::replay_transaction(client, B256::default(), HashSet::default()) - .await - .err() - .unwrap(); - TraceApiClient::trace_block(client, block_id).await.unwrap_err(); - TraceApiClient::replay_block_transactions(client, block_id, HashSet::default()) - .await - .unwrap_err(); + TraceApiClient::::trace_raw_transaction( + client, + Bytes::default(), + HashSet::default(), + None, + ) + .await + .unwrap_err(); + TraceApiClient::::trace_call_many( + client, + vec![], + Some(BlockNumberOrTag::Latest.into()), + ) + .await + .unwrap_err(); + TraceApiClient::::replay_transaction( + client, + B256::default(), + HashSet::default(), + ) + .await + .err() + .unwrap(); + TraceApiClient::::trace_block(client, block_id).await.unwrap_err(); + TraceApiClient::::replay_block_transactions( + client, + block_id, + HashSet::default(), + ) + .await + .unwrap_err(); - TraceApiClient::trace_filter(client, trace_filter).await.unwrap(); + TraceApiClient::::trace_filter(client, trace_filter).await.unwrap(); } async fn test_basic_web3_calls(client: &C) diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 5c95dbc7ad5..293dd4e1937 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,11 +1,10 @@ -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; - use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_chainspec::MAINNET; use reth_consensus::noop::NoopConsensus; use reth_engine_primitives::BeaconConsensusEngineHandle; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_ethereum_primitives::EthPrimitives; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; diff --git a/crates/rpc/rpc-convert/Cargo.toml b/crates/rpc/rpc-convert/Cargo.toml index 45721742310..7f87e9e721b 100644 --- a/crates/rpc/rpc-convert/Cargo.toml +++ b/crates/rpc/rpc-convert/Cargo.toml @@ -16,10 +16,12 @@ workspace = true reth-primitives-traits.workspace = true reth-storage-api = { workspace = true, optional = true } reth-evm.workspace = true +reth-ethereum-primitives.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-eth = { workspace = true, features = ["serde"] } +alloy-signer.workspace = true alloy-consensus.workspace = true alloy-network.workspace = true alloy-json-rpc.workspace = true @@ -27,6 +29,7 @@ alloy-json-rpc.workspace = true # optimism op-alloy-consensus = { workspace = true, optional = true } op-alloy-rpc-types = { workspace = true, optional = true } +op-alloy-network = { workspace = true, optional = true } reth-optimism-primitives = { workspace = true, optional = true } op-revm = { workspace = true, optional = true } @@ -51,6 +54,7 @@ default = [] op = [ "dep:op-alloy-consensus", "dep:op-alloy-rpc-types", + "dep:op-alloy-network", "dep:reth-optimism-primitives", "dep:reth-storage-api", "dep:op-revm", diff --git a/crates/rpc/rpc-convert/src/lib.rs b/crates/rpc/rpc-convert/src/lib.rs index db1d7b86fc7..9fb9c40cd8e 100644 --- a/crates/rpc/rpc-convert/src/lib.rs +++ b/crates/rpc/rpc-convert/src/lib.rs @@ -12,15 +12,17 @@ pub mod block; mod fees; +pub mod receipt; mod rpc; pub mod transaction; pub use block::TryFromBlockResponse; pub use fees::{CallFees, CallFeesError}; +pub use receipt::TryFromReceiptResponse; pub use rpc::*; pub use transaction::{ - EthTxEnvError, IntoRpcTx, RpcConvert, RpcConverter, TransactionConversionError, TryIntoSimTx, - TxInfoMapper, + EthTxEnvError, IntoRpcTx, RpcConvert, RpcConverter, TransactionConversionError, + TryFromTransactionResponse, TryIntoSimTx, TxInfoMapper, }; #[cfg(feature = "op")] diff --git a/crates/rpc/rpc-convert/src/receipt.rs b/crates/rpc/rpc-convert/src/receipt.rs new file mode 100644 index 00000000000..5f37c1cad5e --- /dev/null +++ b/crates/rpc/rpc-convert/src/receipt.rs @@ -0,0 +1,99 @@ +//! Conversion traits for receipt responses to primitive receipt types. + +use alloy_network::Network; +use std::convert::Infallible; + +/// Trait for converting network receipt responses to primitive receipt types. +pub trait TryFromReceiptResponse { + /// The error type returned if the conversion fails. + type Error: core::error::Error + Send + Sync + Unpin; + + /// Converts a network receipt response to a primitive receipt type. + /// + /// # Returns + /// + /// Returns `Ok(Self)` on successful conversion, or `Err(Self::Error)` if the conversion fails. + fn from_receipt_response(receipt_response: N::ReceiptResponse) -> Result + where + Self: Sized; +} + +impl TryFromReceiptResponse for reth_ethereum_primitives::Receipt { + type Error = Infallible; + + fn from_receipt_response( + receipt_response: alloy_rpc_types_eth::TransactionReceipt, + ) -> Result { + Ok(receipt_response.into_inner().into()) + } +} + +#[cfg(feature = "op")] +impl TryFromReceiptResponse for reth_optimism_primitives::OpReceipt { + type Error = Infallible; + + fn from_receipt_response( + receipt_response: op_alloy_rpc_types::OpTransactionReceipt, + ) -> Result { + Ok(receipt_response.inner.inner.map_logs(Into::into).into()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::ReceiptEnvelope; + use alloy_network::Ethereum; + use reth_ethereum_primitives::Receipt; + + #[test] + fn test_try_from_receipt_response() { + let rpc_receipt = alloy_rpc_types_eth::TransactionReceipt { + inner: ReceiptEnvelope::Eip1559(Default::default()), + transaction_hash: Default::default(), + transaction_index: None, + block_hash: None, + block_number: None, + gas_used: 0, + effective_gas_price: 0, + blob_gas_used: None, + blob_gas_price: None, + from: Default::default(), + to: None, + contract_address: None, + }; + let result = + >::from_receipt_response(rpc_receipt); + assert!(result.is_ok()); + } + + #[cfg(feature = "op")] + #[test] + fn test_try_from_receipt_response_optimism() { + use op_alloy_consensus::OpReceiptEnvelope; + use op_alloy_network::Optimism; + use op_alloy_rpc_types::OpTransactionReceipt; + use reth_optimism_primitives::OpReceipt; + + let op_receipt = OpTransactionReceipt { + inner: alloy_rpc_types_eth::TransactionReceipt { + inner: OpReceiptEnvelope::Eip1559(Default::default()), + transaction_hash: Default::default(), + transaction_index: None, + block_hash: None, + block_number: None, + gas_used: 0, + effective_gas_price: 0, + blob_gas_used: None, + blob_gas_price: None, + from: Default::default(), + to: None, + contract_address: None, + }, + l1_block_info: Default::default(), + }; + let result = + >::from_receipt_response(op_receipt); + assert!(result.is_ok()); + } +} diff --git a/crates/rpc/rpc-convert/src/rpc.rs b/crates/rpc/rpc-convert/src/rpc.rs index 7b5c457419c..180f5150b6e 100644 --- a/crates/rpc/rpc-convert/src/rpc.rs +++ b/crates/rpc/rpc-convert/src/rpc.rs @@ -1,23 +1,32 @@ +use std::{fmt::Debug, future::Future}; + +use alloy_consensus::{ + EthereumTxEnvelope, EthereumTypedTransaction, SignableTransaction, TxEip4844, +}; use alloy_json_rpc::RpcObject; -use alloy_network::{Network, ReceiptResponse, TransactionResponse}; +use alloy_network::{ + primitives::HeaderResponse, Network, ReceiptResponse, TransactionResponse, TxSigner, +}; +use alloy_primitives::Signature; +use alloy_rpc_types_eth::TransactionRequest; /// RPC types used by the `eth_` RPC API. /// /// This is a subset of [`Network`] trait with only RPC response types kept. -pub trait RpcTypes { +pub trait RpcTypes: Send + Sync + Clone + Unpin + Debug + 'static { /// Header response type. - type Header: RpcObject; + type Header: RpcObject + HeaderResponse; /// Receipt response type. type Receipt: RpcObject + ReceiptResponse; /// Transaction response type. type TransactionResponse: RpcObject + TransactionResponse; /// Transaction response type. - type TransactionRequest: RpcObject; + type TransactionRequest: RpcObject + AsRef + AsMut; } impl RpcTypes for T where - T: Network, + T: Network + AsMut> + Unpin, { type Header = T::HeaderResponse; type Receipt = T::ReceiptResponse; @@ -28,5 +37,128 @@ where /// Adapter for network specific transaction response. pub type RpcTransaction = ::TransactionResponse; +/// Adapter for network specific receipt response. +pub type RpcReceipt = ::Receipt; + +/// Adapter for network specific header response. +pub type RpcHeader = ::Header; + +/// Adapter for network specific block type. +pub type RpcBlock = alloy_rpc_types_eth::Block, RpcHeader>; + /// Adapter for network specific transaction request. pub type RpcTxReq = ::TransactionRequest; + +/// Error for [`SignableTxRequest`] trait. +#[derive(Debug, thiserror::Error)] +pub enum SignTxRequestError { + /// The transaction request is invalid. + #[error("invalid transaction request")] + InvalidTransactionRequest, + + /// The signer is not supported. + #[error(transparent)] + SignerNotSupported(#[from] alloy_signer::Error), +} + +/// An abstraction over transaction requests that can be signed. +pub trait SignableTxRequest: Send + Sync + 'static { + /// Attempts to build a transaction request and sign it with the given signer. + fn try_build_and_sign( + self, + signer: impl TxSigner + Send, + ) -> impl Future> + Send; +} + +impl SignableTxRequest> for TransactionRequest { + async fn try_build_and_sign( + self, + signer: impl TxSigner + Send, + ) -> Result, SignTxRequestError> { + let mut tx = + self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?; + let signature = signer.sign_transaction(&mut tx).await?; + let signed = match tx { + EthereumTypedTransaction::Legacy(tx) => { + EthereumTxEnvelope::Legacy(tx.into_signed(signature)) + } + EthereumTypedTransaction::Eip2930(tx) => { + EthereumTxEnvelope::Eip2930(tx.into_signed(signature)) + } + EthereumTypedTransaction::Eip1559(tx) => { + EthereumTxEnvelope::Eip1559(tx.into_signed(signature)) + } + EthereumTypedTransaction::Eip4844(tx) => { + EthereumTxEnvelope::Eip4844(TxEip4844::from(tx).into_signed(signature)) + } + EthereumTypedTransaction::Eip7702(tx) => { + EthereumTxEnvelope::Eip7702(tx.into_signed(signature)) + } + }; + Ok(signed) + } +} + +#[cfg(feature = "op")] +impl SignableTxRequest + for op_alloy_rpc_types::OpTransactionRequest +{ + async fn try_build_and_sign( + self, + signer: impl TxSigner + Send, + ) -> Result { + let mut tx = + self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?; + let signature = signer.sign_transaction(&mut tx).await?; + let signed = match tx { + op_alloy_consensus::OpTypedTransaction::Legacy(tx) => { + op_alloy_consensus::OpTxEnvelope::Legacy(tx.into_signed(signature)) + } + op_alloy_consensus::OpTypedTransaction::Eip2930(tx) => { + op_alloy_consensus::OpTxEnvelope::Eip2930(tx.into_signed(signature)) + } + op_alloy_consensus::OpTypedTransaction::Eip1559(tx) => { + op_alloy_consensus::OpTxEnvelope::Eip1559(tx.into_signed(signature)) + } + op_alloy_consensus::OpTypedTransaction::Eip7702(tx) => { + op_alloy_consensus::OpTxEnvelope::Eip7702(tx.into_signed(signature)) + } + op_alloy_consensus::OpTypedTransaction::Deposit(_) => { + return Err(SignTxRequestError::InvalidTransactionRequest); + } + }; + Ok(signed) + } +} + +#[cfg(feature = "scroll")] +impl SignableTxRequest + for scroll_alloy_rpc_types::ScrollTransactionRequest +{ + async fn try_build_and_sign( + self, + signer: impl TxSigner + Send, + ) -> Result { + let mut tx = + self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?; + let signature = signer.sign_transaction(&mut tx).await?; + let signed = match tx { + scroll_alloy_consensus::ScrollTypedTransaction::Legacy(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Legacy(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::Eip2930(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Eip2930(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::Eip1559(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Eip1559(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::Eip7702(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Eip7702(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::L1Message(_) => { + return Err(SignTxRequestError::InvalidTransactionRequest); + } + }; + Ok(signed) + } +} diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 68dc1a2974e..835ef19dedb 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -2,9 +2,12 @@ use crate::{ fees::{CallFees, CallFeesError}, - RpcTransaction, RpcTxReq, RpcTypes, + RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, }; -use alloy_consensus::{error::ValueError, transaction::Recovered, EthereumTxEnvelope, TxEip4844}; +use alloy_consensus::{ + error::ValueError, transaction::Recovered, EthereumTxEnvelope, Sealable, TxEip4844, +}; +use alloy_network::Network; use alloy_primitives::{Address, TxKind, U256}; use alloy_rpc_types_eth::{ request::{TransactionInputError, TransactionRequest}, @@ -15,11 +18,73 @@ use reth_evm::{ revm::context_interface::{either::Either, Block}, ConfigureEvm, TxEnvFor, }; -use reth_primitives_traits::{NodePrimitives, TxTy}; +use reth_primitives_traits::{ + HeaderTy, NodePrimitives, SealedHeader, SealedHeaderFor, TransactionMeta, TxTy, +}; use revm_context::{BlockEnv, CfgEnv, TxEnv}; -use std::{convert::Infallible, error::Error, fmt::Debug, marker::PhantomData}; +use std::{borrow::Cow, convert::Infallible, error::Error, fmt::Debug, marker::PhantomData}; use thiserror::Error; +/// Input for [`RpcConvert::convert_receipts`]. +#[derive(Debug, Clone)] +pub struct ConvertReceiptInput<'a, N: NodePrimitives> { + /// Primitive receipt. + pub receipt: Cow<'a, N::Receipt>, + /// Transaction the receipt corresponds to. + pub tx: Recovered<&'a N::SignedTx>, + /// Gas used by the transaction. + pub gas_used: u64, + /// Number of logs emitted before this transaction. + pub next_log_index: usize, + /// Metadata for the transaction. + pub meta: TransactionMeta, +} + +/// A type that knows how to convert primitive receipts to RPC representations. +pub trait ReceiptConverter: Debug + 'static { + /// RPC representation. + type RpcReceipt; + + /// Error that may occur during conversion. + type Error; + + /// Converts a set of primitive receipts to RPC representations. It is guaranteed that all + /// receipts are from the same block. + fn convert_receipts( + &self, + receipts: Vec>, + ) -> Result, Self::Error>; +} + +/// A type that knows how to convert a consensus header into an RPC header. +pub trait HeaderConverter: Debug + Send + Sync + Unpin + Clone + 'static { + /// Converts a consensus header into an RPC header. + fn convert_header(&self, header: SealedHeader, block_size: usize) -> Rpc; +} + +/// Default implementation of [`HeaderConverter`] that uses [`FromConsensusHeader`] to convert +/// headers. +impl HeaderConverter for () +where + Rpc: FromConsensusHeader, +{ + fn convert_header(&self, header: SealedHeader, block_size: usize) -> Rpc { + Rpc::from_consensus_header(header, block_size) + } +} + +/// Conversion trait for obtaining RPC header from a consensus header. +pub trait FromConsensusHeader { + /// Takes a consensus header and converts it into `self`. + fn from_consensus_header(header: SealedHeader, block_size: usize) -> Self; +} + +impl FromConsensusHeader for alloy_rpc_types_eth::Header { + fn from_consensus_header(header: SealedHeader, block_size: usize) -> Self { + Self::from_consensus(header.into(), None, Some(U256::from(block_size))) + } +} + /// Responsible for the conversions from and into RPC requests and responses. /// /// The JSON-RPC schema and the Node primitives are configurable using the [`RpcConvert::Network`] @@ -28,7 +93,7 @@ use thiserror::Error; /// A generic implementation [`RpcConverter`] should be preferred over a manual implementation. As /// long as its trait bound requirements are met, the implementation is created automatically and /// can be used in RPC method handlers for all the conversions. -pub trait RpcConvert: Send + Sync + Unpin + Clone + Debug { +pub trait RpcConvert: Send + Sync + Unpin + Clone + Debug + 'static { /// Associated lower layer consensus types to convert from and into types of [`Self::Network`]. type Primitives: NodePrimitives; @@ -78,6 +143,20 @@ pub trait RpcConvert: Send + Sync + Unpin + Clone + Debug { cfg_env: &CfgEnv, block_env: &BlockEnv, ) -> Result; + + /// Converts a set of primitive receipts to RPC representations. It is guaranteed that all + /// receipts are from the same block. + fn convert_receipts( + &self, + receipts: Vec>, + ) -> Result>, Self::Error>; + + /// Converts a primitive header to an RPC header. + fn convert_header( + &self, + header: SealedHeaderFor, + block_size: usize, + ) -> Result, Self::Error>; } /// Converts `self` into `T`. The opposite of [`FromConsensusTx`]. @@ -308,7 +387,7 @@ impl TryIntoTxEnv for TransactionRequest { #[error("Failed to convert transaction into RPC response: {0}")] pub struct TransactionConversionError(String); -/// Generic RPC response object converter for `Evm` and network `E`. +/// Generic RPC response object converter for `Evm` and network `Network`. /// /// The main purpose of this struct is to provide an implementation of [`RpcConvert`] for generic /// associated types. This struct can then be used for conversions in RPC method handlers. @@ -323,107 +402,172 @@ pub struct TransactionConversionError(String); /// is [`TransactionInfo`] then `()` can be used as `Map` which trivially passes over the input /// object. #[derive(Debug)] -pub struct RpcConverter { - phantom: PhantomData<(E, Evm, Err)>, +pub struct RpcConverter { + network: PhantomData, + evm: PhantomData, + receipt_converter: Receipt, + header_converter: Header, mapper: Map, } -impl RpcConverter { - /// Creates a new [`RpcConverter`] with the default mapper. - pub const fn new() -> Self { - Self::with_mapper(()) +impl RpcConverter { + /// Creates a new [`RpcConverter`] with `receipt_converter` and `mapper`. + pub const fn new(receipt_converter: Receipt) -> Self { + Self { + network: PhantomData, + evm: PhantomData, + receipt_converter, + header_converter: (), + mapper: (), + } } } -impl RpcConverter { - /// Creates a new [`RpcConverter`] with `mapper`. - pub const fn with_mapper(mapper: Map) -> Self { - Self { phantom: PhantomData, mapper } - } - - /// Converts the generic types. - pub fn convert(self) -> RpcConverter { - RpcConverter::with_mapper(self.mapper) +impl RpcConverter { + /// Converts the network type + pub fn with_network(self) -> RpcConverter { + let Self { receipt_converter, header_converter, mapper, evm, .. } = self; + RpcConverter { + receipt_converter, + header_converter, + mapper, + network: Default::default(), + evm, + } } - /// Swaps the inner `mapper`. - pub fn map(self, mapper: Map2) -> RpcConverter { - RpcConverter::with_mapper(mapper) + /// Configures the header converter. + pub fn with_header_converter( + self, + header_converter: HeaderNew, + ) -> RpcConverter { + let Self { receipt_converter, header_converter: _, mapper, network, evm } = self; + RpcConverter { receipt_converter, header_converter, mapper, network, evm } } - /// Converts the generic types and swaps the inner `mapper`. - pub fn convert_map( + /// Configures the mapper. + pub fn with_mapper( self, - mapper: Map2, - ) -> RpcConverter { - self.convert().map(mapper) + mapper: MapNew, + ) -> RpcConverter { + let Self { receipt_converter, header_converter, mapper: _, network, evm } = self; + RpcConverter { receipt_converter, header_converter, mapper, network, evm } } } -impl Clone for RpcConverter { - fn clone(&self) -> Self { - Self::with_mapper(self.mapper.clone()) +impl Default + for RpcConverter +where + Receipt: Default, + Header: Default, + Map: Default, +{ + fn default() -> Self { + Self { + network: Default::default(), + evm: Default::default(), + receipt_converter: Default::default(), + header_converter: Default::default(), + mapper: Default::default(), + } } } -impl Default for RpcConverter { - fn default() -> Self { - Self::new() +impl Clone + for RpcConverter +{ + fn clone(&self) -> Self { + Self { + network: Default::default(), + evm: Default::default(), + receipt_converter: self.receipt_converter.clone(), + header_converter: self.header_converter.clone(), + mapper: self.mapper.clone(), + } } } -impl RpcConvert for RpcConverter +impl RpcConvert + for RpcConverter where N: NodePrimitives, - E: RpcTypes + Send + Sync + Unpin + Clone + Debug, - Evm: ConfigureEvm, - TxTy: IntoRpcTx + Clone + Debug, - RpcTxReq: TryIntoSimTx> + TryIntoTxEnv>, - Err: From - + From< as TryIntoTxEnv>>::Err> - + for<'a> From<>>::Err> - + Error - + Unpin + Network: RpcTypes + Send + Sync + Unpin + Clone + Debug, + Evm: ConfigureEvm + 'static, + TxTy: IntoRpcTx + Clone + Debug, + RpcTxReq: TryIntoSimTx> + TryIntoTxEnv>, + Receipt: ReceiptConverter< + N, + RpcReceipt = RpcReceipt, + Error: From + + From< as TryIntoTxEnv>>::Err> + + for<'a> From<>>::Err> + + Error + + Unpin + + Sync + + Send + + Into>, + > + Send + Sync - + Send - + Into>, + + Unpin + + Clone + + Debug, + Header: HeaderConverter, RpcHeader>, Map: for<'a> TxInfoMapper< &'a TxTy, - Out = as IntoRpcTx>::TxInfo, + Out = as IntoRpcTx>::TxInfo, > + Clone + Debug + Unpin + Send - + Sync, + + Sync + + 'static, { type Primitives = N; - type Network = E; + type Network = Network; type TxEnv = TxEnvFor; - type Error = Err; + type Error = Receipt::Error; fn fill( &self, tx: Recovered>, tx_info: TransactionInfo, - ) -> Result { + ) -> Result { let (tx, signer) = tx.into_parts(); let tx_info = self.mapper.try_map(&tx, tx_info)?; Ok(tx.into_rpc_tx(signer, tx_info)) } - fn build_simulate_v1_transaction(&self, request: RpcTxReq) -> Result, Self::Error> { + fn build_simulate_v1_transaction( + &self, + request: RpcTxReq, + ) -> Result, Self::Error> { Ok(request.try_into_sim_tx().map_err(|e| TransactionConversionError(e.to_string()))?) } fn tx_env( &self, - request: RpcTxReq, + request: RpcTxReq, cfg_env: &CfgEnv, block_env: &BlockEnv, ) -> Result { Ok(request.try_into_tx_env(cfg_env, block_env)?) } + + fn convert_receipts( + &self, + receipts: Vec>, + ) -> Result>, Self::Error> { + self.receipt_converter.convert_receipts(receipts) + } + + fn convert_header( + &self, + header: SealedHeaderFor, + block_size: usize, + ) -> Result, Self::Error> { + Ok(self.header_converter.convert_header(header, block_size)) + } } /// Scroll specific RPC transaction compatibility implementations. @@ -511,17 +655,22 @@ pub mod op { use op_alloy_rpc_types::OpTransactionRequest; use op_revm::OpTransaction; use reth_optimism_primitives::DepositReceipt; + use reth_primitives_traits::SignedTransaction; use reth_storage_api::{errors::ProviderError, ReceiptProvider}; /// Creates [`OpTransactionInfo`] by adding [`OpDepositInfo`] to [`TransactionInfo`] if `tx` is /// a deposit. - pub fn try_into_op_tx_info>( + pub fn try_into_op_tx_info( provider: &T, - tx: &OpTxEnvelope, + tx: &Tx, tx_info: TransactionInfo, - ) -> Result { + ) -> Result + where + Tx: op_alloy_consensus::OpTransaction + SignedTransaction, + T: ReceiptProvider, + { let deposit_meta = if tx.is_deposit() { - provider.receipt_by_hash(tx.tx_hash())?.and_then(|receipt| { + provider.receipt_by_hash(*tx.tx_hash())?.and_then(|receipt| { receipt.as_deposit_receipt().map(|receipt| OpDepositInfo { deposit_receipt_version: receipt.deposit_receipt_version, deposit_nonce: receipt.deposit_nonce, @@ -574,3 +723,108 @@ pub mod op { } } } + +/// Trait for converting network transaction responses to primitive transaction types. +pub trait TryFromTransactionResponse { + /// The error type returned if the conversion fails. + type Error: core::error::Error + Send + Sync + Unpin; + + /// Converts a network transaction response to a primitive transaction type. + /// + /// # Returns + /// + /// Returns `Ok(Self)` on successful conversion, or `Err(Self::Error)` if the conversion fails. + fn from_transaction_response( + transaction_response: N::TransactionResponse, + ) -> Result + where + Self: Sized; +} + +impl TryFromTransactionResponse + for reth_ethereum_primitives::TransactionSigned +{ + type Error = Infallible; + + fn from_transaction_response(transaction_response: Transaction) -> Result { + Ok(transaction_response.into_inner().into()) + } +} + +#[cfg(feature = "op")] +impl TryFromTransactionResponse + for reth_optimism_primitives::OpTransactionSigned +{ + type Error = Infallible; + + fn from_transaction_response( + transaction_response: op_alloy_rpc_types::Transaction, + ) -> Result { + Ok(transaction_response.inner.into_inner()) + } +} + +#[cfg(test)] +mod transaction_response_tests { + use super::*; + use alloy_consensus::{transaction::Recovered, EthereumTxEnvelope, Signed, TxLegacy}; + use alloy_network::Ethereum; + use alloy_primitives::{Address, Signature, B256, U256}; + use alloy_rpc_types_eth::Transaction; + + #[test] + fn test_ethereum_transaction_conversion() { + let signed_tx = Signed::new_unchecked( + TxLegacy::default(), + Signature::new(U256::ONE, U256::ONE, false), + B256::ZERO, + ); + let envelope = EthereumTxEnvelope::Legacy(signed_tx); + + let tx_response = Transaction { + inner: Recovered::new_unchecked(envelope, Address::ZERO), + block_hash: None, + block_number: None, + transaction_index: None, + effective_gas_price: None, + }; + + let result = >::from_transaction_response(tx_response); + assert!(result.is_ok()); + } + + #[cfg(feature = "op")] + #[test] + fn test_optimism_transaction_conversion() { + use op_alloy_consensus::OpTxEnvelope; + use op_alloy_network::Optimism; + use reth_optimism_primitives::OpTransactionSigned; + + let signed_tx = Signed::new_unchecked( + TxLegacy::default(), + Signature::new(U256::ONE, U256::ONE, false), + B256::ZERO, + ); + let envelope = OpTxEnvelope::Legacy(signed_tx); + + let inner_tx = Transaction { + inner: Recovered::new_unchecked(envelope, Address::ZERO), + block_hash: None, + block_number: None, + transaction_index: None, + effective_gas_price: None, + }; + + let tx_response = op_alloy_rpc_types::Transaction { + inner: inner_tx, + deposit_nonce: None, + deposit_receipt_version: None, + }; + + let result = >::from_transaction_response(tx_response); + + assert!(result.is_ok()); + } +} diff --git a/crates/rpc/rpc-e2e-tests/Cargo.toml b/crates/rpc/rpc-e2e-tests/Cargo.toml new file mode 100644 index 00000000000..78c04740497 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "reth-rpc-e2e-tests" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "RPC end-to-end tests including execution-apis compatibility testing" + +[lints] +workspace = true + +[dependencies] +# reth +reth-e2e-test-utils.workspace = true +reth-rpc-api = { workspace = true, features = ["client"] } + +# ethereum +alloy-rpc-types-engine.workspace = true + +# async +tokio.workspace = true +futures-util.workspace = true + +# misc +eyre.workspace = true +serde_json.workspace = true +tracing.workspace = true +jsonrpsee.workspace = true + +# required for the Action trait +reth-node-api.workspace = true + +[dev-dependencies] +reth-tracing.workspace = true +reth-chainspec.workspace = true +reth-node-ethereum.workspace = true +alloy-genesis.workspace = true + +[[test]] +name = "e2e_testsuite" +path = "tests/e2e-testsuite/main.rs" diff --git a/crates/rpc/rpc-e2e-tests/README.md b/crates/rpc/rpc-e2e-tests/README.md new file mode 100644 index 00000000000..44e9806f05d --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/README.md @@ -0,0 +1,170 @@ +# Reth RPC E2E Tests + +This crate contains end-to-end tests for Reth's RPC implementation, including compatibility testing against the official execution-apis test suite. + +## Overview + +The RPC compatibility testing framework enables: +1. Importing pre-built blockchain data from RLP files +2. Initializing nodes with specific forkchoice states +3. Running standardized RPC test cases from the execution-apis repository +4. Comparing responses against expected results + +## Architecture + +### Key Components + +1. **`RunRpcCompatTests` Action**: Executes RPC test cases from .io files +2. **`InitializeFromExecutionApis` Action**: Applies forkchoice state from JSON files with automatic retry for syncing nodes +3. **Test Data Format**: Uses execution-apis .io file format for test cases + +### Test Data Structure + +Expected directory structure: +``` +test_data_path/ +├── chain.rlp # Pre-built blockchain data +├── headfcu.json # Initial forkchoice state +├── genesis.json # Genesis configuration (optional) +└── eth_getLogs/ # Test cases for eth_getLogs + ├── contract-addr.io + ├── no-topics.io + ├── topic-exact-match.io + └── topic-wildcard.io +``` + +### .io File Format + +Test files use a simple request-response format: +``` +// Optional comment describing the test +// speconly: marks test as specification-only +>> {"jsonrpc":"2.0","id":1,"method":"eth_getLogs","params":[...]} +<< {"jsonrpc":"2.0","id":1,"result":[...]} +``` + +## Usage + +### Basic Example + +```rust +use alloy_genesis::Genesis; +use reth_chainspec::ChainSpec; +use reth_e2e_test_utils::testsuite::{ + actions::{MakeCanonical, UpdateBlockInfo}, + setup::{NetworkSetup, Setup}, + TestBuilder, +}; +use reth_rpc_e2e_tests::rpc_compat::{InitializeFromExecutionApis, RunRpcCompatTests}; + +#[tokio::test] +async fn test_eth_get_logs_compat() -> Result<()> { + let test_data_path = "../execution-apis/tests"; + let chain_rlp_path = PathBuf::from(&test_data_path).join("chain.rlp"); + let fcu_json_path = PathBuf::from(&test_data_path).join("headfcu.json"); + let genesis_path = PathBuf::from(&test_data_path).join("genesis.json"); + + // Parse genesis.json to get chain spec with all hardfork configuration + let genesis_json = std::fs::read_to_string(&genesis_path)?; + let genesis: Genesis = serde_json::from_str(&genesis_json)?; + let chain_spec: ChainSpec = genesis.into(); + let chain_spec = Arc::new(chain_spec); + + let setup = Setup::::default() + .with_chain_spec(chain_spec) + .with_network(NetworkSetup::single_node()); + + let test = TestBuilder::new() + .with_setup_and_import(setup, chain_rlp_path) + .with_action(UpdateBlockInfo::default()) + .with_action( + InitializeFromExecutionApis::new() + .with_fcu_json(fcu_json_path.to_string_lossy()), + ) + .with_action(MakeCanonical::new()) + .with_action(RunRpcCompatTests::new( + vec!["eth_getLogs".to_string()], + test_data_path.to_string_lossy(), + )); + + test.run::().await?; + Ok(()) +} +``` + +### Running Tests + +1. Clone the execution-apis repository: + ```bash + git clone https://github.com/ethereum/execution-apis.git + ``` + +2. Set the test data path: + ```bash + export EXECUTION_APIS_TEST_PATH=../execution-apis/tests + ``` + +3. Run the test: + ```bash + cargo test --test rpc_compat test_eth_get_logs_compat -- --nocapture + ``` + +### Custom Test Data + +You can create custom test cases following the same format: + +1. Create a directory structure matching the execution-apis format +2. Write .io files with request-response pairs +3. Use the same testing framework with your custom path + +### Test Multiple RPC Methods + +```rust +let methods_to_test = vec![ + "eth_blockNumber".to_string(), + "eth_call".to_string(), + "eth_getLogs".to_string(), + "eth_getTransactionReceipt".to_string(), +]; + +RunRpcCompatTests::new(methods_to_test, test_data_path) + .with_fail_fast(true) // Stop on first failure +``` + +## Implementation Details + +### JSON-RPC Request Handling + +The framework handles various parameter formats: +- Empty parameters: `[]` +- Array parameters: `[param1, param2, ...]` +- Object parameters: Wrapped in array `[{...}]` + +### Response Comparison + +- **Numbers**: Compared with floating-point tolerance +- **Arrays**: Element-by-element comparison +- **Objects**: Key-by-key comparison (extra fields in actual response are allowed) +- **Errors**: Only presence is checked, not exact message + +### Error Handling + +- Parse errors are reported with context +- RPC errors are captured and compared +- Test failures include detailed diffs + +## Benefits + +1. **Standardization**: Uses official execution-apis test format +2. **Flexibility**: Works with custom test data +3. **Integration**: Seamlessly integrates with e2e test framework +4. **Extensibility**: Easy to add new RPC methods +5. **Debugging**: Detailed error reporting with fail-fast option + +## Future Enhancements + +- Support for batch requests +- WebSocket testing +- Performance benchmarking +- Automatic test discovery +- Parallel test execution \ No newline at end of file diff --git a/crates/rpc/rpc-e2e-tests/src/lib.rs b/crates/rpc/rpc-e2e-tests/src/lib.rs new file mode 100644 index 00000000000..c8c6dfe280e --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/src/lib.rs @@ -0,0 +1,12 @@ +//! RPC end-to-end tests including execution-apis compatibility testing. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +/// RPC compatibility test actions for the e2e test framework +pub mod rpc_compat; diff --git a/crates/rpc/rpc-e2e-tests/src/rpc_compat.rs b/crates/rpc/rpc-e2e-tests/src/rpc_compat.rs new file mode 100644 index 00000000000..436ace0eeb0 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/src/rpc_compat.rs @@ -0,0 +1,514 @@ +//! RPC compatibility test actions for testing RPC methods against execution-apis test data. + +use eyre::{eyre, Result}; +use futures_util::future::BoxFuture; +use jsonrpsee::core::client::ClientT; +use reth_e2e_test_utils::testsuite::{actions::Action, BlockInfo, Environment}; +use reth_node_api::EngineTypes; +use serde_json::Value; +use std::path::Path; +use tracing::{debug, info}; + +/// Test case from execution-apis .io file format +#[derive(Debug, Clone)] +pub struct RpcTestCase { + /// The test name (filename without .io extension) + pub name: String, + /// Request to send (as JSON value) + pub request: Value, + /// Expected response (as JSON value) + pub expected_response: Value, + /// Whether this test is spec-only + pub spec_only: bool, +} + +/// Action that runs RPC compatibility tests from execution-apis test data +#[derive(Debug)] +pub struct RunRpcCompatTests { + /// RPC methods to test (e.g., ["`eth_getLogs`"]) + pub methods: Vec, + /// Path to the execution-apis tests directory + pub test_data_path: String, + /// Whether to stop on first failure + pub fail_fast: bool, +} + +impl RunRpcCompatTests { + /// Create a new RPC compatibility test runner + pub fn new(methods: Vec, test_data_path: impl Into) -> Self { + Self { methods, test_data_path: test_data_path.into(), fail_fast: false } + } + + /// Set whether to stop on first failure + pub const fn with_fail_fast(mut self, fail_fast: bool) -> Self { + self.fail_fast = fail_fast; + self + } + + /// Parse a .io test file + fn parse_io_file(content: &str) -> Result { + let mut lines = content.lines(); + let mut spec_only = false; + let mut request_line = None; + let mut response_line = None; + + // Skip comments and look for spec_only marker + for line in lines.by_ref() { + let line = line.trim(); + if line.starts_with("//") { + if line.contains("speconly:") { + spec_only = true; + } + } else if let Some(stripped) = line.strip_prefix(">>") { + request_line = Some(stripped.trim()); + break; + } + } + + // Look for response + for line in lines { + let line = line.trim(); + if let Some(stripped) = line.strip_prefix("<<") { + response_line = Some(stripped.trim()); + break; + } + } + + let request_str = + request_line.ok_or_else(|| eyre!("No request found in test file (>> marker)"))?; + let response_str = + response_line.ok_or_else(|| eyre!("No response found in test file (<< marker)"))?; + + // Parse request + let request: Value = serde_json::from_str(request_str) + .map_err(|e| eyre!("Failed to parse request: {}", e))?; + + // Parse response + let expected_response: Value = serde_json::from_str(response_str) + .map_err(|e| eyre!("Failed to parse response: {}", e))?; + + Ok(RpcTestCase { name: String::new(), request, expected_response, spec_only }) + } + + /// Compare JSON values with special handling for numbers and errors + /// Uses iterative approach to avoid stack overflow with deeply nested structures + fn compare_json_values(actual: &Value, expected: &Value, path: &str) -> Result<()> { + // Stack to hold work items: (actual, expected, path) + let mut work_stack = vec![(actual, expected, path.to_string())]; + + while let Some((actual, expected, current_path)) = work_stack.pop() { + match (actual, expected) { + // Number comparison: handle different representations + (Value::Number(a), Value::Number(b)) => { + let a_f64 = a.as_f64().ok_or_else(|| eyre!("Invalid number"))?; + let b_f64 = b.as_f64().ok_or_else(|| eyre!("Invalid number"))?; + // Use a reasonable epsilon for floating point comparison + const EPSILON: f64 = 1e-10; + if (a_f64 - b_f64).abs() > EPSILON { + return Err(eyre!("Number mismatch at {}: {} != {}", current_path, a, b)); + } + } + // Array comparison + (Value::Array(a), Value::Array(b)) => { + if a.len() != b.len() { + return Err(eyre!( + "Array length mismatch at {}: {} != {}", + current_path, + a.len(), + b.len() + )); + } + // Add array elements to work stack in reverse order + // so they are processed in correct order + for (i, (av, bv)) in a.iter().zip(b.iter()).enumerate().rev() { + work_stack.push((av, bv, format!("{current_path}[{i}]"))); + } + } + // Object comparison + (Value::Object(a), Value::Object(b)) => { + // Check all keys in expected are present in actual + for (key, expected_val) in b { + if let Some(actual_val) = a.get(key) { + work_stack.push(( + actual_val, + expected_val, + format!("{current_path}.{key}"), + )); + } else { + return Err(eyre!("Missing key at {}.{}", current_path, key)); + } + } + } + // Direct value comparison + (a, b) => { + if a != b { + return Err(eyre!("Value mismatch at {}: {:?} != {:?}", current_path, a, b)); + } + } + } + } + Ok(()) + } + + /// Execute a single test case + async fn execute_test_case( + &self, + test_case: &RpcTestCase, + env: &Environment, + ) -> Result<()> { + let node_client = &env.node_clients[env.active_node_idx]; + + // Extract method and params from request + let method = test_case + .request + .get("method") + .and_then(|v| v.as_str()) + .ok_or_else(|| eyre!("Request missing method field"))?; + + let params = test_case.request.get("params").cloned().unwrap_or(Value::Array(vec![])); + + // Make the RPC request using jsonrpsee + // We need to handle the case where the RPC might return an error + use jsonrpsee::core::params::ArrayParams; + + let response_result: Result = match params { + Value::Array(ref arr) => { + // Use ArrayParams for array parameters + let mut array_params = ArrayParams::new(); + for param in arr { + array_params + .insert(param.clone()) + .map_err(|e| eyre!("Failed to insert param: {}", e))?; + } + node_client.rpc.request(method, array_params).await + } + _ => { + // For non-array params, wrap in an array + let mut array_params = ArrayParams::new(); + array_params.insert(params).map_err(|e| eyre!("Failed to insert param: {}", e))?; + node_client.rpc.request(method, array_params).await + } + }; + + // Build actual response object to match execution-apis format + let actual_response = match response_result { + Ok(response) => { + serde_json::json!({ + "jsonrpc": "2.0", + "id": test_case.request.get("id").cloned().unwrap_or(Value::Null), + "result": response + }) + } + Err(err) => { + // RPC error - build error response + serde_json::json!({ + "jsonrpc": "2.0", + "id": test_case.request.get("id").cloned().unwrap_or(Value::Null), + "error": { + "code": -32000, // Generic error code + "message": err.to_string() + } + }) + } + }; + + // Compare responses + let expected_result = test_case.expected_response.get("result"); + let expected_error = test_case.expected_response.get("error"); + let actual_result = actual_response.get("result"); + let actual_error = actual_response.get("error"); + + match (expected_result, expected_error) { + (Some(expected), None) => { + // Expected success response + if let Some(actual) = actual_result { + Self::compare_json_values(actual, expected, "result")?; + } else if let Some(error) = actual_error { + return Err(eyre!("Expected success response but got error: {}", error)); + } else { + return Err(eyre!("Expected success response but got neither result nor error")); + } + } + (None, Some(_)) => { + // Expected error response - just check that we got an error + if actual_error.is_none() { + return Err(eyre!("Expected error response but got success")); + } + debug!("Both responses are errors (expected behavior)"); + } + _ => { + return Err(eyre!("Invalid expected response format")); + } + } + + Ok(()) + } +} + +impl Action for RunRpcCompatTests +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let mut total_tests = 0; + let mut passed_tests = 0; + + for method in &self.methods { + info!("Running RPC compatibility tests for {}", method); + + let method_dir = Path::new(&self.test_data_path).join(method); + if !method_dir.exists() { + return Err(eyre!("Test directory does not exist: {}", method_dir.display())); + } + + // Read all .io files in the method directory + let entries = std::fs::read_dir(&method_dir) + .map_err(|e| eyre!("Failed to read directory: {}", e))?; + + for entry in entries { + let entry = entry?; + let path = entry.path(); + + if path.extension().and_then(|s| s.to_str()) == Some("io") { + let test_name = path + .file_stem() + .and_then(|s| s.to_str()) + .unwrap_or("unknown") + .to_string(); + + let content = std::fs::read_to_string(&path) + .map_err(|e| eyre!("Failed to read test file: {}", e))?; + + match Self::parse_io_file(&content) { + Ok(mut test_case) => { + test_case.name = test_name.clone(); + total_tests += 1; + + match self.execute_test_case(&test_case, env).await { + Ok(_) => { + info!("✓ {}/{}: PASS", method, test_name); + passed_tests += 1; + } + Err(e) => { + info!("✗ {}/{}: FAIL - {}", method, test_name, e); + + if self.fail_fast { + return Err(eyre!("Test failed (fail-fast enabled)")); + } + } + } + } + Err(e) => { + info!("✗ {}/{}: PARSE ERROR - {}", method, test_name, e); + if self.fail_fast { + return Err(e); + } + } + } + } + } + } + + info!("RPC compatibility test results: {}/{} passed", passed_tests, total_tests); + + if passed_tests < total_tests { + return Err(eyre!("Some tests failed: {}/{} passed", passed_tests, total_tests)); + } + + Ok(()) + }) + } +} + +/// Action to initialize the chain from execution-apis test data +#[derive(Debug)] +pub struct InitializeFromExecutionApis { + /// Path to the base.rlp file (if different from default) + pub chain_rlp_path: Option, + /// Path to the headfcu.json file (if different from default) + pub fcu_json_path: Option, +} + +impl Default for InitializeFromExecutionApis { + fn default() -> Self { + Self::new() + } +} + +impl InitializeFromExecutionApis { + /// Create with default paths (assumes execution-apis/tests structure) + pub const fn new() -> Self { + Self { chain_rlp_path: None, fcu_json_path: None } + } + + /// Set custom chain RLP path + pub fn with_chain_rlp(mut self, path: impl Into) -> Self { + self.chain_rlp_path = Some(path.into()); + self + } + + /// Set custom FCU JSON path + pub fn with_fcu_json(mut self, path: impl Into) -> Self { + self.fcu_json_path = Some(path.into()); + self + } +} + +impl Action for InitializeFromExecutionApis +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + // Load forkchoice state + let fcu_path = self + .fcu_json_path + .as_ref() + .map(Path::new) + .ok_or_else(|| eyre!("FCU JSON path is required"))?; + + let fcu_state = reth_e2e_test_utils::setup_import::load_forkchoice_state(fcu_path)?; + + info!( + "Applying forkchoice state - head: {}, safe: {}, finalized: {}", + fcu_state.head_block_hash, + fcu_state.safe_block_hash, + fcu_state.finalized_block_hash + ); + + // Apply forkchoice update to each node + for (idx, client) in env.node_clients.iter().enumerate() { + debug!("Applying forkchoice update to node {}", idx); + + // Wait for the node to finish syncing imported blocks + let mut retries = 0; + const MAX_RETRIES: u32 = 10; + const RETRY_DELAY_MS: u64 = 500; + + loop { + let response = + reth_rpc_api::clients::EngineApiClient::::fork_choice_updated_v3( + &client.engine.http_client(), + fcu_state, + None, + ) + .await + .map_err(|e| eyre!("Failed to update forkchoice on node {}: {}", idx, e))?; + + match response.payload_status.status { + alloy_rpc_types_engine::PayloadStatusEnum::Valid => { + debug!("Forkchoice update successful on node {}", idx); + break; + } + alloy_rpc_types_engine::PayloadStatusEnum::Syncing => { + if retries >= MAX_RETRIES { + return Err(eyre!( + "Node {} still syncing after {} retries", + idx, + MAX_RETRIES + )); + } + debug!("Node {} is syncing, retrying in {}ms...", idx, RETRY_DELAY_MS); + tokio::time::sleep(std::time::Duration::from_millis(RETRY_DELAY_MS)) + .await; + retries += 1; + } + _ => { + return Err(eyre!( + "Invalid forkchoice state on node {}: {:?}", + idx, + response.payload_status + )); + } + } + } + } + + // Update environment state + env.active_node_state_mut()?.current_block_info = Some(BlockInfo { + hash: fcu_state.head_block_hash, + number: 0, // Will be updated when we fetch the actual block + timestamp: 0, + }); + + info!("Successfully initialized chain from execution-apis test data"); + Ok(()) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_compare_json_values_deeply_nested() { + // Test that the iterative comparison handles deeply nested structures + // without stack overflow + let mut nested = json!({"value": 0}); + let mut expected = json!({"value": 0}); + + // Create a deeply nested structure + for i in 1..1000 { + nested = json!({"level": i, "nested": nested}); + expected = json!({"level": i, "nested": expected}); + } + + // Should not panic with stack overflow + RunRpcCompatTests::compare_json_values(&nested, &expected, "root").unwrap(); + } + + #[test] + fn test_compare_json_values_arrays() { + // Test array comparison + let actual = json!([1, 2, 3, 4, 5]); + let expected = json!([1, 2, 3, 4, 5]); + + RunRpcCompatTests::compare_json_values(&actual, &expected, "root").unwrap(); + + // Test array length mismatch + let actual = json!([1, 2, 3]); + let expected = json!([1, 2, 3, 4, 5]); + + let result = RunRpcCompatTests::compare_json_values(&actual, &expected, "root"); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Array length mismatch")); + } + + #[test] + fn test_compare_json_values_objects() { + // Test object comparison + let actual = json!({"a": 1, "b": 2, "c": 3}); + let expected = json!({"a": 1, "b": 2, "c": 3}); + + RunRpcCompatTests::compare_json_values(&actual, &expected, "root").unwrap(); + + // Test missing key + let actual = json!({"a": 1, "b": 2}); + let expected = json!({"a": 1, "b": 2, "c": 3}); + + let result = RunRpcCompatTests::compare_json_values(&actual, &expected, "root"); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Missing key")); + } + + #[test] + fn test_compare_json_values_numbers() { + // Test number comparison with floating point + let actual = json!({"value": 1.00000000001}); + let expected = json!({"value": 1.0}); + + // Should be equal within epsilon (1e-10) + RunRpcCompatTests::compare_json_values(&actual, &expected, "root").unwrap(); + + // Test significant difference + let actual = json!({"value": 1.1}); + let expected = json!({"value": 1.0}); + + let result = RunRpcCompatTests::compare_json_values(&actual, &expected, "root"); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Number mismatch")); + } +} diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/chain.rlp b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/chain.rlp new file mode 100644 index 00000000000..ae681adf9f0 Binary files /dev/null and b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/chain.rlp differ diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/contract-addr.io b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/contract-addr.io new file mode 100644 index 00000000000..674a7eb4f81 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/contract-addr.io @@ -0,0 +1,3 @@ +// queries for logs from a specific contract across a range of blocks +>> {"jsonrpc":"2.0","id":1,"method":"eth_getLogs","params":[{"address":["0x7dcd17433742f4c0ca53122ab541d0ba67fc27df"],"fromBlock":"0x1","toBlock":"0x4","topics":null}]} +<< {"jsonrpc":"2.0","id":1,"result":[{"address":"0x7dcd17433742f4c0ca53122ab541d0ba67fc27df","topics":["0x00000000000000000000000000000000000000000000000000000000656d6974","0xf4da19d6c17928e683661a52829cf391d3dc26d581152b81ce595a1207944f09"],"data":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x2","transactionHash":"0x5bc704d4eb4ce7fe319705d2f888516961426a177f2799c9f934b5df7466dd33","transactionIndex":"0x2","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0xa","removed":false},{"address":"0x7dcd17433742f4c0ca53122ab541d0ba67fc27df","topics":["0x00000000000000000000000000000000000000000000000000000000656d6974","0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","blockNumber":"0x4","transactionHash":"0xf047c5133c96c405a79d01038b4ccf8208c03e296dd9f6bea083727c9513f805","transactionIndex":"0x0","blockHash":"0x94540b21748e45497c41518ed68b2a0c16d728e917b665ae50d51f6895242e53","logIndex":"0x0","removed":false}]} diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/no-topics.io b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/no-topics.io new file mode 100644 index 00000000000..89ec5bcd058 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/no-topics.io @@ -0,0 +1,3 @@ +// queries for all logs across a range of blocks +>> {"jsonrpc":"2.0","id":1,"method":"eth_getLogs","params":[{"address":null,"fromBlock":"0x1","toBlock":"0x3","topics":null}]} +<< {"jsonrpc":"2.0","id":1,"result":[{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0xabbb5caa7dda850e60932de0934eb1f9d0f59695050f761dc64e443e5030a569"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x0","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0xd9d16d34ffb15ba3a3d852f0d403e2ce1d691fb54de27ac87cd2f993f3ec330f"],"data":"0x0000000000000000000000000000000000000000000000000000000000000002","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x1","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x679795a0195a1b76cdebb7c51d74e058aee92919b8c3389af86ef24535e8a28c"],"data":"0x0000000000000000000000000000000000000000000000000000000000000003","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x2","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0xc3a24b0501bd2c13a7e57f2db4369ec4c223447539fc0724a9d55ac4a06ebd4d"],"data":"0x0000000000000000000000000000000000000000000000000000000000000004","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x3","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x91da3fd0782e51c6b3986e9e672fd566868e71f3dbc2d6c2cd6fbb3e361af2a7"],"data":"0x0000000000000000000000000000000000000000000000000000000000000005","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x4","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x89832631fb3c3307a103ba2c84ab569c64d6182a18893dcd163f0f1c2090733a"],"data":"0x0000000000000000000000000000000000000000000000000000000000000006","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x5","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x8819ef417987f8ae7a81f42cdfb18815282fe989326fbff903d13cf0e03ace29"],"data":"0x0000000000000000000000000000000000000000000000000000000000000007","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x6","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0xb7c774451310d1be4108bc180d1b52823cb0ee0274a6c0081bcaf94f115fb96d"],"data":"0x0000000000000000000000000000000000000000000000000000000000000008","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x7","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x6add646517a5b0f6793cd5891b7937d28a5b2981a5d88ebc7cd776088fea9041"],"data":"0x0000000000000000000000000000000000000000000000000000000000000009","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x8","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x6cde3cea4b3a3fb2488b2808bae7556f4a405e50f65e1794383bc026131b13c3"],"data":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x9","removed":false},{"address":"0x7dcd17433742f4c0ca53122ab541d0ba67fc27df","topics":["0x00000000000000000000000000000000000000000000000000000000656d6974","0xf4da19d6c17928e683661a52829cf391d3dc26d581152b81ce595a1207944f09"],"data":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x2","transactionHash":"0x5bc704d4eb4ce7fe319705d2f888516961426a177f2799c9f934b5df7466dd33","transactionIndex":"0x2","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0xa","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x101e368776582e57ab3d116ffe2517c0a585cd5b23174b01e275c2d8329c3d83"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x0","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x7dfe757ecd65cbd7922a9c0161e935dd7fdbcc0e999689c7d31633896b1fc60b"],"data":"0x0000000000000000000000000000000000000000000000000000000000000002","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x1","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x88601476d11616a71c5be67555bd1dff4b1cbf21533d2669b768b61518cfe1c3"],"data":"0x0000000000000000000000000000000000000000000000000000000000000003","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x2","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0xcbc4e5fb02c3d1de23a9f1e014b4d2ee5aeaea9505df5e855c9210bf472495af"],"data":"0x0000000000000000000000000000000000000000000000000000000000000004","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x3","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x2e174c10e159ea99b867ce3205125c24a42d128804e4070ed6fcc8cc98166aa0"],"data":"0x0000000000000000000000000000000000000000000000000000000000000005","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x4","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0xa9bc9a3a348c357ba16b37005d7e6b3236198c0e939f4af8c5f19b8deeb8ebc0"],"data":"0x0000000000000000000000000000000000000000000000000000000000000006","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x5","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x75f96ab15d697e93042dc45b5c896c4b27e89bb6eaf39475c5c371cb2513f7d2"],"data":"0x0000000000000000000000000000000000000000000000000000000000000007","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x6","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x3be6fd20d5acfde5b873b48692cd31f4d3c7e8ee8a813af4696af8859e5ca6c6"],"data":"0x0000000000000000000000000000000000000000000000000000000000000008","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x7","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x625b35f5e76f098dd7c3a05b10e2e5e78a4a01228d60c3b143426cdf36d26455"],"data":"0x0000000000000000000000000000000000000000000000000000000000000009","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x8","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0xc575c31fea594a6eb97c8e9d3f9caee4c16218c6ef37e923234c0fe9014a61e7"],"data":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x9","removed":false}]} diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-exact-match.io b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-exact-match.io new file mode 100644 index 00000000000..30366e8005e --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-exact-match.io @@ -0,0 +1,3 @@ +// queries for logs with two topics, with both topics set explicitly +>> {"jsonrpc":"2.0","id":1,"method":"eth_getLogs","params":[{"address":null,"fromBlock":"0x3","toBlock":"0x6","topics":[["0x00000000000000000000000000000000000000000000000000000000656d6974"],["0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"]]}]} +<< {"jsonrpc":"2.0","id":1,"result":[{"address":"0x7dcd17433742f4c0ca53122ab541d0ba67fc27df","topics":["0x00000000000000000000000000000000000000000000000000000000656d6974","0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","blockNumber":"0x4","transactionHash":"0xf047c5133c96c405a79d01038b4ccf8208c03e296dd9f6bea083727c9513f805","transactionIndex":"0x0","blockHash":"0x94540b21748e45497c41518ed68b2a0c16d728e917b665ae50d51f6895242e53","logIndex":"0x0","removed":false}]} diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-wildcard.io b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-wildcard.io new file mode 100644 index 00000000000..9a798698c25 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-wildcard.io @@ -0,0 +1,3 @@ +// queries for logs with two topics, performing a wildcard match in topic position zero +>> {"jsonrpc":"2.0","id":1,"method":"eth_getLogs","params":[{"address":null,"fromBlock":"0x3","toBlock":"0x6","topics":[[],["0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"]]}]} +<< {"jsonrpc":"2.0","id":1,"result":[{"address":"0x7dcd17433742f4c0ca53122ab541d0ba67fc27df","topics":["0x00000000000000000000000000000000000000000000000000000000656d6974","0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","blockNumber":"0x4","transactionHash":"0xf047c5133c96c405a79d01038b4ccf8208c03e296dd9f6bea083727c9513f805","transactionIndex":"0x0","blockHash":"0x94540b21748e45497c41518ed68b2a0c16d728e917b665ae50d51f6895242e53","logIndex":"0x0","removed":false}]} diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/forkenv.json b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/forkenv.json new file mode 100644 index 00000000000..3da23534337 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/forkenv.json @@ -0,0 +1,27 @@ +{ + "HIVE_CANCUN_BLOB_BASE_FEE_UPDATE_FRACTION": "3338477", + "HIVE_CANCUN_BLOB_MAX": "6", + "HIVE_CANCUN_BLOB_TARGET": "3", + "HIVE_CANCUN_TIMESTAMP": "420", + "HIVE_CHAIN_ID": "3503995874084926", + "HIVE_FORK_ARROW_GLACIER": "30", + "HIVE_FORK_BERLIN": "24", + "HIVE_FORK_BYZANTIUM": "9", + "HIVE_FORK_CONSTANTINOPLE": "12", + "HIVE_FORK_GRAY_GLACIER": "33", + "HIVE_FORK_HOMESTEAD": "0", + "HIVE_FORK_ISTANBUL": "18", + "HIVE_FORK_LONDON": "27", + "HIVE_FORK_MUIR_GLACIER": "21", + "HIVE_FORK_PETERSBURG": "15", + "HIVE_FORK_SPURIOUS": "6", + "HIVE_FORK_TANGERINE": "3", + "HIVE_MERGE_BLOCK_ID": "36", + "HIVE_NETWORK_ID": "3503995874084926", + "HIVE_PRAGUE_BLOB_BASE_FEE_UPDATE_FRACTION": "5007716", + "HIVE_PRAGUE_BLOB_MAX": "9", + "HIVE_PRAGUE_BLOB_TARGET": "6", + "HIVE_PRAGUE_TIMESTAMP": "450", + "HIVE_SHANGHAI_TIMESTAMP": "390", + "HIVE_TERMINAL_TOTAL_DIFFICULTY": "4732736" +} \ No newline at end of file diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/genesis.json b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/genesis.json new file mode 100644 index 00000000000..0c29edcb252 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/genesis.json @@ -0,0 +1,141 @@ +{ + "config": { + "chainId": 3503995874084926, + "homesteadBlock": 0, + "eip150Block": 3, + "eip155Block": 6, + "eip158Block": 6, + "byzantiumBlock": 9, + "constantinopleBlock": 12, + "petersburgBlock": 15, + "istanbulBlock": 18, + "muirGlacierBlock": 21, + "berlinBlock": 24, + "londonBlock": 27, + "arrowGlacierBlock": 30, + "grayGlacierBlock": 33, + "mergeNetsplitBlock": 36, + "shanghaiTime": 390, + "cancunTime": 420, + "pragueTime": 450, + "terminalTotalDifficulty": 4732736, + "depositContractAddress": "0x0000000000000000000000000000000000000000", + "ethash": {}, + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "prague": { + "target": 6, + "max": 9, + "baseFeeUpdateFraction": 5007716 + } + } + }, + "nonce": "0x0", + "timestamp": "0x0", + "extraData": "0x68697665636861696e", + "gasLimit": "0x23f3e20", + "difficulty": "0x20000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "00000961ef480eb55e80d19ad83579a64c007002": { + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460cb5760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff146101f457600182026001905f5b5f82111560685781019083028483029004916001019190604d565b909390049250505036603814608857366101f457346101f4575f5260205ff35b34106101f457600154600101600155600354806003026004013381556001015f35815560010160203590553360601b5f5260385f601437604c5fa0600101600355005b6003546002548082038060101160df575060105b5f5b8181146101835782810160030260040181604c02815460601b8152601401816001015481526020019060020154807fffffffffffffffffffffffffffffffff00000000000000000000000000000000168252906010019060401c908160381c81600701538160301c81600601538160281c81600501538160201c81600401538160181c81600301538160101c81600201538160081c81600101535360010160e1565b910180921461019557906002556101a0565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14156101cd57505f5b6001546002828201116101e25750505f6101e8565b01600290035b5f555f600155604c025ff35b5f5ffd", + "balance": "0x1" + }, + "0000bbddc7ce488642fb579f8b00f3a590007251": { + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460d35760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1461019a57600182026001905f5b5f82111560685781019083028483029004916001019190604d565b9093900492505050366060146088573661019a573461019a575f5260205ff35b341061019a57600154600101600155600354806004026004013381556001015f358155600101602035815560010160403590553360601b5f5260605f60143760745fa0600101600355005b6003546002548082038060021160e7575060025b5f5b8181146101295782810160040260040181607402815460601b815260140181600101548152602001816002015481526020019060030154905260010160e9565b910180921461013b5790600255610146565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff141561017357505f5b6001546001828201116101885750505f61018e565b01600190035b5f555f6001556074025ff35b5f5ffd", + "balance": "0x1" + }, + "0000f90827f1c53a10cb7a02335b175320002935": { + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604657602036036042575f35600143038111604257611fff81430311604257611fff9006545f5260205ff35b5f5ffd5b5f35611fff60014303065500", + "balance": "0x1" + }, + "000f3df6d732807ef1319fb7b8bb8522d0beac02": { + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500", + "balance": "0x2a" + }, + "0c2c51a0990aee1d73c1228de158688341557508": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "14e46043e63d0e3cdcf2530519f4cfaf35058cb2": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "16c57edf7fa9d9525378b0b81bf8a3ced0620c1c": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "1f4924b14f34e24159387c0a4cdbaa32f3ddb0cf": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "1f5bde34b4afc686f136c7a3cb6ec376f7357759": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "2d389075be5be9f2246ad654ce152cf05990b209": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "3ae75c08b4c907eb63a8960c45b86e1e9ab6123c": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "4340ee1b812acb40a1eb561c019c327b243b92df": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "4a0f1452281bcec5bd90c3dce6162a5995bfe9df": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "4dde844b71bcdf95512fb4dc94e84fb67b512ed8": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "5f552da00dfb4d3749d9e62dcee3c918855a86a0": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "654aa64f5fbefb84c270ec74211b81ca8c44a72e": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "717f8aa2b982bee0e29f573d31df288663e1ce16": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "7435ed30a8b4aeb0877cef0c6e8cffe834eb865f": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "7dcd17433742f4c0ca53122ab541d0ba67fc27df": { + "code": "0x3680600080376000206000548082558060010160005560005263656d697460206000a2", + "balance": "0x0" + }, + "83c7e323d189f18725ac510004fdc2941f8c4a78": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "84e75c28348fb86acea1a93a39426d7d60f4cc46": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "8bebc8ba651aee624937e7d897853ac30c95a067": { + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + "balance": "0x1", + "nonce": "0x1" + }, + "c7b99a164efd027a93f147376cc7da7c67c6bbe0": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "d803681e487e6ac18053afc5a6cd813c86ec3e4d": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "e7d13f7aa2a838d24c59b40186a0aca1e21cffcc": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "eda8645ba6948855e3b3cd596bbb07596d59c603": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + } + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas": null, + "excessBlobGas": null, + "blobGasUsed": null +} \ No newline at end of file diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/headfcu.json b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/headfcu.json new file mode 100644 index 00000000000..cc39610b4f1 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/headfcu.json @@ -0,0 +1,13 @@ +{ + "jsonrpc": "2.0", + "id": "fcu45", + "method": "engine_forkchoiceUpdatedV3", + "params": [ + { + "headBlockHash": "0xaf51811799f22260e5b4e1f95504dae760505f102dcb2e9ca7d897d8a40124a1", + "safeBlockHash": "0xaf51811799f22260e5b4e1f95504dae760505f102dcb2e9ca7d897d8a40124a1", + "finalizedBlockHash": "0xaf51811799f22260e5b4e1f95504dae760505f102dcb2e9ca7d897d8a40124a1" + }, + null + ] +} \ No newline at end of file diff --git a/crates/rpc/rpc-e2e-tests/tests/e2e-testsuite/main.rs b/crates/rpc/rpc-e2e-tests/tests/e2e-testsuite/main.rs new file mode 100644 index 00000000000..994cd714405 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/tests/e2e-testsuite/main.rs @@ -0,0 +1,79 @@ +//! RPC compatibility tests using execution-apis test data + +use alloy_genesis::Genesis; +use eyre::Result; +use reth_chainspec::ChainSpec; +use reth_e2e_test_utils::testsuite::{ + actions::{MakeCanonical, UpdateBlockInfo}, + setup::{NetworkSetup, Setup}, + TestBuilder, +}; +use reth_node_ethereum::{EthEngineTypes, EthereumNode}; +use reth_rpc_e2e_tests::rpc_compat::{InitializeFromExecutionApis, RunRpcCompatTests}; +use std::{path::PathBuf, sync::Arc}; +use tracing::info; + +/// Test `eth_getLogs` RPC method compatibility with execution-apis test data +/// +/// This test: +/// 1. Initializes a node with chain data from testdata (chain.rlp) +/// 2. Applies the forkchoice state from headfcu.json +/// 3. Runs all `eth_getLogs` test cases from the execution-apis test suite +#[tokio::test(flavor = "multi_thread")] +async fn test_eth_get_logs_compat() -> Result<()> { + reth_tracing::init_test_tracing(); + + // Use local test data + let test_data_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/rpc-compat"); + + assert!(test_data_path.exists(), "Test data path does not exist: {}", test_data_path.display()); + + info!("Using test data from: {}", test_data_path.display()); + + // Paths to test files + let chain_rlp_path = test_data_path.join("chain.rlp"); + let fcu_json_path = test_data_path.join("headfcu.json"); + let genesis_path = test_data_path.join("genesis.json"); + + // Verify required files exist + if !chain_rlp_path.exists() { + return Err(eyre::eyre!("chain.rlp not found at {}", chain_rlp_path.display())); + } + if !fcu_json_path.exists() { + return Err(eyre::eyre!("headfcu.json not found at {}", fcu_json_path.display())); + } + if !genesis_path.exists() { + return Err(eyre::eyre!("genesis.json not found at {}", genesis_path.display())); + } + + // Load genesis from test data + let genesis_json = std::fs::read_to_string(&genesis_path)?; + + // Parse the Genesis struct from JSON and convert it to ChainSpec + // This properly handles all the hardfork configuration from the config section + let genesis: Genesis = serde_json::from_str(&genesis_json)?; + let chain_spec: ChainSpec = genesis.into(); + let chain_spec = Arc::new(chain_spec); + + // Create test setup with imported chain + let setup = Setup::::default() + .with_chain_spec(chain_spec) + .with_network(NetworkSetup::single_node()); + + // Build and run the test + let test = TestBuilder::new() + .with_setup_and_import(setup, chain_rlp_path) + .with_action(UpdateBlockInfo::default()) + .with_action( + InitializeFromExecutionApis::new().with_fcu_json(fcu_json_path.to_string_lossy()), + ) + .with_action(MakeCanonical::new()) + .with_action(RunRpcCompatTests::new( + vec!["eth_getLogs".to_string()], + test_data_path.to_string_lossy(), + )); + + test.run::().await?; + + Ok(()) +} diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 4304f17f707..825eb485fc2 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -49,7 +49,6 @@ parking_lot.workspace = true reth-ethereum-engine-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-ethereum-primitives.workspace = true -reth-primitives-traits.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true alloy-rlp.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 3066b440a45..8738e94abe9 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -61,6 +61,15 @@ pub struct EngineApi>, } +impl + EngineApi +{ + /// Returns the configured chainspec. + pub fn chain_spec(&self) -> &Arc { + &self.inner.chain_spec + } +} + impl EngineApi where @@ -147,7 +156,7 @@ where } /// Metered version of `new_payload_v1`. - async fn new_payload_v1_metered( + pub async fn new_payload_v1_metered( &self, payload: PayloadT::ExecutionData, ) -> EngineApiResult { @@ -271,6 +280,11 @@ where self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); Ok(res?) } + + /// Returns whether the engine accepts execution requests hash. + pub fn accept_execution_requests_hash(&self) -> bool { + self.inner.accept_execution_requests_hash + } } impl @@ -754,7 +768,8 @@ where .map_err(|err| EngineApiError::Internal(Box::new(err))) } - fn get_blobs_v1_metered( + /// Metered version of `get_blobs_v1`. + pub fn get_blobs_v1_metered( &self, versioned_hashes: Vec, ) -> EngineApiResult>> { @@ -788,7 +803,8 @@ where .map_err(|err| EngineApiError::Internal(Box::new(err))) } - fn get_blobs_v2_metered( + /// Metered version of `get_blobs_v2`. + pub fn get_blobs_v2_metered( &self, versioned_hashes: Vec, ) -> EngineApiResult>> { diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index af8bcb90def..a2293b46309 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -30,7 +30,9 @@ reth-rpc-server-types.workspace = true reth-network-api.workspace = true reth-node-api.workspace = true reth-trie-common = { workspace = true, features = ["eip1186"] } -reth-payload-builder.workspace = true + +# scroll +reth-scroll-evm = { workspace = true, optional = true } # ethereum alloy-evm = { workspace = true, features = ["overrides", "call-util"] } @@ -69,3 +71,4 @@ op = [ "reth-rpc-convert/op", "alloy-evm/op", ] +scroll = ["reth-scroll-evm"] diff --git a/crates/rpc/rpc-eth-api/src/bundle.rs b/crates/rpc/rpc-eth-api/src/bundle.rs index 1197d6afe50..b47ef1b3bb3 100644 --- a/crates/rpc/rpc-eth-api/src/bundle.rs +++ b/crates/rpc/rpc-eth-api/src/bundle.rs @@ -4,8 +4,8 @@ use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_mev::{ - CancelBundleRequest, CancelPrivateTransactionRequest, EthBundleHash, EthCallBundle, - EthCallBundleResponse, EthSendBundle, PrivateTransactionRequest, + EthBundleHash, EthCallBundle, EthCallBundleResponse, EthCancelBundle, + EthCancelPrivateTransaction, EthSendBundle, EthSendPrivateTransaction, }; use jsonrpsee::proc_macros::rpc; @@ -43,13 +43,13 @@ pub trait EthBundleApi { /// `eth_cancelBundle` is used to prevent a submitted bundle from being included on-chain. See [bundle cancellations](https://docs.flashbots.net/flashbots-auction/advanced/bundle-cancellations) for more information. #[method(name = "cancelBundle")] - async fn cancel_bundle(&self, request: CancelBundleRequest) -> jsonrpsee::core::RpcResult<()>; + async fn cancel_bundle(&self, request: EthCancelBundle) -> jsonrpsee::core::RpcResult<()>; /// `eth_sendPrivateTransaction` is used to send a single transaction to Flashbots. Flashbots will attempt to build a block including the transaction for the next 25 blocks. See [Private Transactions](https://docs.flashbots.net/flashbots-protect/additional-documentation/eth-sendPrivateTransaction) for more info. #[method(name = "sendPrivateTransaction")] async fn send_private_transaction( &self, - request: PrivateTransactionRequest, + request: EthSendPrivateTransaction, ) -> jsonrpsee::core::RpcResult; /// The `eth_sendPrivateRawTransaction` method can be used to send private transactions to @@ -67,6 +67,6 @@ pub trait EthBundleApi { #[method(name = "cancelPrivateTransaction")] async fn cancel_private_transaction( &self, - request: CancelPrivateTransactionRequest, + request: EthCancelPrivateTransaction, ) -> jsonrpsee::core::RpcResult; } diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 0f2b9eb3896..3e6b85bdee9 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -12,7 +12,7 @@ use alloy_rpc_types_eth::{ simulate::{SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Index, - StateContext, SyncStatus, TransactionRequest, Work, + StateContext, SyncStatus, Work, }; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; @@ -214,7 +214,7 @@ pub trait EthApi, block_number: Option, ) -> RpcResult>>; @@ -222,7 +222,7 @@ pub trait EthApi, state_overrides: Option, block_overrides: Option>, @@ -233,7 +233,7 @@ pub trait EthApi, + bundles: Vec>, state_context: Option, state_override: Option, ) -> RpcResult>>; @@ -255,7 +255,7 @@ pub trait EthApi, state_override: Option, ) -> RpcResult; @@ -265,7 +265,7 @@ pub trait EthApi, state_override: Option, ) -> RpcResult; @@ -333,7 +333,7 @@ pub trait EthApi RpcResult; + async fn send_transaction(&self, request: TxReq) -> RpcResult; /// Sends signed transaction, returning its hash. #[method(name = "sendRawTransaction")] @@ -353,7 +353,7 @@ pub trait EthApi RpcResult; + async fn sign_transaction(&self, transaction: TxReq) -> RpcResult; /// Signs data via [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md). #[method(name = "signTypedData")] @@ -656,7 +656,7 @@ where /// Handler for: `eth_simulateV1` async fn simulate_v1( &self, - payload: SimulatePayload, + payload: SimulatePayload>, block_number: Option, ) -> RpcResult>>> { trace!(target: "rpc::eth", ?block_number, "Serving eth_simulateV1"); @@ -667,7 +667,7 @@ where /// Handler for: `eth_call` async fn call( &self, - request: TransactionRequest, + request: RpcTxReq, block_number: Option, state_overrides: Option, block_overrides: Option>, @@ -685,7 +685,7 @@ where /// Handler for: `eth_callMany` async fn call_many( &self, - bundles: Vec, + bundles: Vec>>, state_context: Option, state_override: Option, ) -> RpcResult>> { @@ -696,7 +696,7 @@ where /// Handler for: `eth_createAccessList` async fn create_access_list( &self, - request: TransactionRequest, + request: RpcTxReq, block_number: Option, state_override: Option, ) -> RpcResult { @@ -707,7 +707,7 @@ where /// Handler for: `eth_estimateGas` async fn estimate_gas( &self, - request: TransactionRequest, + request: RpcTxReq, block_number: Option, state_override: Option, ) -> RpcResult { @@ -799,7 +799,7 @@ where } /// Handler for: `eth_sendTransaction` - async fn send_transaction(&self, request: TransactionRequest) -> RpcResult { + async fn send_transaction(&self, request: RpcTxReq) -> RpcResult { trace!(target: "rpc::eth", ?request, "Serving eth_sendTransaction"); Ok(EthTransactions::send_transaction(self, request).await?) } @@ -823,7 +823,7 @@ where } /// Handler for: `eth_signTransaction` - async fn sign_transaction(&self, request: TransactionRequest) -> RpcResult { + async fn sign_transaction(&self, request: RpcTxReq) -> RpcResult { trace!(target: "rpc::eth", ?request, "Serving eth_signTransaction"); Ok(EthTransactions::sign_transaction(self, request).await?) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 91a6739b8b3..badffeda7b8 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -5,25 +5,26 @@ use crate::{ node::RpcNodeCoreExt, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, RpcReceipt, }; +use alloy_consensus::TxReceipt; use alloy_eips::BlockId; -use alloy_primitives::{Sealable, U256}; use alloy_rlp::Encodable; -use alloy_rpc_types_eth::{Block, BlockTransactions, Header, Index}; +use alloy_rpc_types_eth::{Block, BlockTransactions, Index}; use futures::Future; -use reth_evm::ConfigureEvm; use reth_node_api::BlockBody; -use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock}; -use reth_rpc_convert::RpcConvert; +use reth_primitives_traits::{ + AlloyBlockHeader, RecoveredBlock, SealedHeader, SignedTransaction, TransactionMeta, +}; +use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert, RpcHeader}; use reth_storage_api::{BlockIdReader, BlockReader, ProviderHeader, ProviderReceipt, ProviderTx}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use std::sync::Arc; +use std::{borrow::Cow, sync::Arc}; /// Result type of the fetched block receipts. pub type BlockReceiptsResult = Result>>, E>; /// Result type of the fetched block and its receipts. pub type BlockAndReceiptsResult = Result< Option<( - SealedBlock<<::Provider as BlockReader>::Block>, + Arc::Provider as BlockReader>::Block>>, Arc::Provider>>>, )>, ::Error, @@ -31,13 +32,14 @@ pub type BlockAndReceiptsResult = Result< /// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the /// `eth_` namespace. -pub trait EthBlocks: LoadBlock { +pub trait EthBlocks: + LoadBlock> +{ /// Returns the block header for the given block id. - #[expect(clippy::type_complexity)] fn rpc_block_header( &self, block_id: BlockId, - ) -> impl Future>>, Self::Error>> + Send + ) -> impl Future>, Self::Error>> + Send where Self: FullEthApiTypes, { @@ -59,9 +61,11 @@ pub trait EthBlocks: LoadBlock { async move { let Some(block) = self.recovered_block(block_id).await? else { return Ok(None) }; - let block = block.clone_into_rpc_block(full.into(), |tx, tx_info| { - self.tx_resp_builder().fill(tx, tx_info) - })?; + let block = block.clone_into_rpc_block( + full.into(), + |tx, tx_info| self.tx_resp_builder().fill(tx, tx_info), + |header, size| self.tx_resp_builder().convert_header(header, size), + )?; Ok(Some(block)) } } @@ -80,7 +84,7 @@ pub trait EthBlocks: LoadBlock { .provider() .pending_block() .map_err(Self::Error::from_eth_err)? - .map(|block| block.body().transactions().len())) + .map(|block| block.body().transaction_count())); } let block_hash = match self @@ -109,7 +113,54 @@ pub trait EthBlocks: LoadBlock { block_id: BlockId, ) -> impl Future> + Send where - Self: LoadReceipt; + Self: LoadReceipt, + { + async move { + if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { + let block_number = block.number(); + let base_fee = block.base_fee_per_gas(); + let block_hash = block.hash(); + let excess_blob_gas = block.excess_blob_gas(); + let timestamp = block.timestamp(); + let mut gas_used = 0; + let mut next_log_index = 0; + + let inputs = block + .transactions_recovered() + .zip(receipts.iter()) + .enumerate() + .map(|(idx, (tx, receipt))| { + let meta = TransactionMeta { + tx_hash: *tx.tx_hash(), + index: idx as u64, + block_hash, + block_number, + base_fee, + excess_blob_gas, + timestamp, + }; + + let input = ConvertReceiptInput { + receipt: Cow::Borrowed(receipt), + tx, + gas_used: receipt.cumulative_gas_used() - gas_used, + next_log_index, + meta, + }; + + gas_used = receipt.cumulative_gas_used(); + next_log_index += receipt.logs().len(); + + input + }) + .collect::>(); + + return self.tx_resp_builder().convert_receipts(inputs).map(Some) + } + + Ok(None) + } + } /// Helper method that loads a block and all its receipts. fn load_block_and_receipts( @@ -130,24 +181,26 @@ pub trait EthBlocks: LoadBlock { .pending_block_and_receipts() .map_err(Self::Error::from_eth_err)? { - return Ok(Some((block, Arc::new(receipts)))); + return Ok(Some((Arc::new(block), Arc::new(receipts)))); } // If no pending block from provider, build the pending block locally. if let Some((block, receipts)) = self.local_pending_block().await? { - return Ok(Some((block.into_sealed_block(), Arc::new(receipts)))); + return Ok(Some((block, receipts))); } } if let Some(block_hash) = self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)? { - return self + if let Some((block, receipts)) = self .cache() .get_block_and_receipts(block_hash) .await - .map_err(Self::Error::from_eth_err) - .map(|b| b.map(|(b, r)| (b.clone_sealed_block(), r))) + .map_err(Self::Error::from_eth_err)? + { + return Ok(Some((block, receipts))); + } } Ok(None) @@ -195,16 +248,24 @@ pub trait EthBlocks: LoadBlock { } .unwrap_or_default(); - Ok(uncles.into_iter().nth(index.into()).map(|header| { - let block = alloy_consensus::Block::::uncle(header); - let size = U256::from(block.length()); - Block { - uncles: vec![], - header: Header::from_consensus(block.header.seal_slow(), None, Some(size)), - transactions: BlockTransactions::Uncle, - withdrawals: None, - } - })) + uncles + .into_iter() + .nth(index.into()) + .map(|header| { + let block = + alloy_consensus::Block::::uncle(header); + let size = block.length(); + let header = self + .tx_resp_builder() + .convert_header(SealedHeader::new_unhashed(block.header), size)?; + Ok(Block { + uncles: vec![], + header, + transactions: BlockTransactions::Uncle, + withdrawals: None, + }) + }) + .transpose() } } } @@ -212,15 +273,7 @@ pub trait EthBlocks: LoadBlock { /// Loads a block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -pub trait LoadBlock: - LoadPendingBlock - + SpawnBlocking - + RpcNodeCoreExt< - Pool: TransactionPool>>, - Primitives: NodePrimitives>, - Evm: ConfigureEvm::Primitives>, - > -{ +pub trait LoadBlock: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt { /// Returns the block object for the given block id. #[expect(clippy::type_complexity)] fn recovered_block( @@ -243,7 +296,7 @@ pub trait LoadBlock: // If no pending block from provider, try to get local pending block return match self.local_pending_block().await? { - Some((block, _)) => Ok(Some(Arc::new(block))), + Some((block, _)) => Ok(Some(block)), None => Ok(None), }; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 12d63243f1c..5cf101ba00a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -1,6 +1,8 @@ //! Loads a pending block from database. Helper trait for `eth_` transaction, call and trace RPC //! methods. +use core::fmt; + use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; use crate::{ helpers::estimate::EstimateCall, FromEvmError, FullEthApiTypes, RpcBlock, RpcNodeCore, @@ -9,13 +11,13 @@ use alloy_consensus::BlockHeader; use alloy_eips::eip2930::AccessListResult; use alloy_evm::{ call::caller_gas_allowance, - overrides::{apply_block_overrides, apply_state_overrides}, + overrides::{apply_block_overrides, apply_state_overrides, OverrideBlockHashes}, }; +use alloy_network::TransactionBuilder; use alloy_primitives::{Bytes, B256, U256}; use alloy_rpc_types_eth::{ simulate::{SimBlock, SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, - transaction::TransactionRequest, BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, }; use futures::Future; @@ -24,21 +26,20 @@ use reth_evm::{ ConfigureEvm, Evm, EvmEnv, EvmEnvFor, HaltReasonFor, InspectorFor, SpecFor, TransactionEnv, TxEnvFor, }; -use reth_node_api::{BlockBody, NodePrimitives}; -use reth_primitives_traits::{Recovered, SealedHeader, SignedTransaction}; +use reth_node_api::BlockBody; +use reth_primitives_traits::{Recovered, SignedTransaction}; use reth_revm::{ database::StateProviderDatabase, db::{CacheDB, State}, - DatabaseRef, }; -use reth_rpc_convert::{RpcConvert, RpcTypes}; +use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, error::{api::FromEvmHalt, ensure_success, FromEthApiError}, simulate::{self, EthSimulateError}, EthApiError, RevertError, RpcInvalidTransactionError, StateCacheDb, }; -use reth_storage_api::{BlockIdReader, ProviderHeader, ProviderTx}; +use reth_storage_api::{BlockIdReader, ProviderTx}; use revm::{ context_interface::{ result::{ExecutionResult, ResultAndState}, @@ -58,7 +59,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// Estimate gas needed for execution of the `request` at the [`BlockId`]. fn estimate_gas_at( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, at: BlockId, state_override: Option, ) -> impl Future> + Send { @@ -71,7 +72,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// See also: fn simulate_v1( &self, - payload: SimulatePayload, + payload: SimulatePayload::Network>>, block: Option, ) -> impl Future> + Send { async move { @@ -143,9 +144,10 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let chain_id = evm_env.cfg_env.chain_id; let default_gas_limit = { - let total_specified_gas = calls.iter().filter_map(|tx| tx.gas).sum::(); + let total_specified_gas = + calls.iter().filter_map(|tx| tx.as_ref().gas_limit()).sum::(); let txs_without_gas_limit = - calls.iter().filter(|tx| tx.gas.is_none()).count(); + calls.iter().filter(|tx| tx.as_ref().gas_limit().is_none()).count(); if total_specified_gas > block_gas_limit { return Err(EthApiError::Other(Box::new( @@ -191,6 +193,8 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA )? }; + parent = result.block.clone_sealed_header(); + let block = simulate::build_simulated_block( result.block, results, @@ -198,11 +202,6 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA this.tx_resp_builder(), )?; - parent = SealedHeader::new( - block.inner.header.inner.clone(), - block.inner.header.hash, - ); - blocks.push(block); } @@ -215,7 +214,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// Executes the call request (`eth_call`) and returns the output fn call( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, block_number: Option, overrides: EvmOverrides, ) -> impl Future> + Send { @@ -231,7 +230,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// optionality of state overrides fn call_many( &self, - bundles: Vec, + bundles: Vec::Network>>>, state_context: Option, mut state_override: Option, ) -> impl Future>, Self::Error>> + Send { @@ -347,11 +346,11 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } } - /// Creates [`AccessListResult`] for the [`TransactionRequest`] at the given + /// Creates [`AccessListResult`] for the [`RpcTxReq`] at the given /// [`BlockId`], or latest block. fn create_access_list_at( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, block_number: Option, state_override: Option, ) -> impl Future> + Send @@ -369,13 +368,13 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } } - /// Creates [`AccessListResult`] for the [`TransactionRequest`] at the given + /// Creates [`AccessListResult`] for the [`RpcTxReq`] at the given /// [`BlockId`]. fn create_access_list_with( &self, mut evm_env: EvmEnvFor, at: BlockId, - mut request: TransactionRequest, + request: RpcTxReq<::Network>, state_override: Option, ) -> Result where @@ -402,14 +401,14 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA // Disabled because eth_createAccessList is sometimes used with non-eoa senders evm_env.cfg_env.disable_eip3607 = true; - if request.gas.is_none() && tx_env.gas_price() > 0 { + if request.as_ref().gas_limit().is_none() && tx_env.gas_price() > 0 { let cap = caller_gas_allowance(&mut db, &tx_env).map_err(Self::Error::from_eth_err)?; // no gas limit was provided in the request, so we need to cap the request's gas limit tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); } // can consume the list since we're not using the request anymore - let initial = request.access_list.take().unwrap_or_default(); + let initial = request.as_ref().access_list().cloned().unwrap_or_default(); let mut inspector = AccessListInspector::new(initial); @@ -454,16 +453,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// Executes code on state. pub trait Call: LoadState< - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, - RpcConvert: RpcConvert< - TxEnv = TxEnvFor, - Network: RpcTypes>, - >, + RpcConvert: RpcConvert>, Error: FromEvmError + From<::Error> + From, @@ -495,7 +485,7 @@ pub trait Call: tx_env: TxEnvFor, ) -> Result>, Self::Error> where - DB: Database, + DB: Database + fmt::Debug, { let mut evm = self.evm_config().evm_with_env(db, evm_env); let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?; @@ -513,7 +503,7 @@ pub trait Call: inspector: I, ) -> Result>, Self::Error> where - DB: Database, + DB: Database + fmt::Debug, I: InspectorFor, { let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector); @@ -525,7 +515,7 @@ pub trait Call: /// Executes the call request at the given [`BlockId`]. fn transact_call_at( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, at: BlockId, overrides: EvmOverrides, ) -> impl Future>, Self::Error>> + Send @@ -554,10 +544,10 @@ pub trait Call: }) } - /// Prepares the state and env for the given [`TransactionRequest`] at the given [`BlockId`] and + /// Prepares the state and env for the given [`RpcTxReq`] at the given [`BlockId`] and /// executes the closure on a new task returning the result of the closure. /// - /// This returns the configured [`EvmEnv`] for the given [`TransactionRequest`] at + /// This returns the configured [`EvmEnv`] for the given [`RpcTxReq`] at /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. /// /// This is primarily used by `eth_call`. @@ -571,7 +561,7 @@ pub trait Call: /// instead, where blocking IO is less problematic. fn spawn_with_call_at( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, at: BlockId, overrides: EvmOverrides, f: F, @@ -675,7 +665,7 @@ pub trait Call: target_tx_hash: B256, ) -> Result where - DB: Database + DatabaseCommit, + DB: Database + DatabaseCommit + core::fmt::Debug, I: IntoIterator>>, { let mut evm = self.evm_config().evm_with_env(db, evm_env); @@ -693,26 +683,25 @@ pub trait Call: Ok(index) } - /// Configures a new `TxEnv` for the [`TransactionRequest`] /// - /// All `TxEnv` fields are derived from the given [`TransactionRequest`], if fields are + /// All `TxEnv` fields are derived from the given [`RpcTxReq`], if fields are /// `None`, they fall back to the [`EvmEnv`]'s settings. fn create_txn_env( &self, evm_env: &EvmEnv>, - mut request: TransactionRequest, + mut request: RpcTxReq<::Network>, mut db: impl Database>, ) -> Result, Self::Error> { - if request.nonce.is_none() { - request.nonce.replace( - db.basic(request.from.unwrap_or_default()) - .map_err(Into::into)? - .map(|acc| acc.nonce) - .unwrap_or_default(), - ); + if request.as_ref().nonce().is_none() { + let nonce = db + .basic(request.as_ref().from().unwrap_or_default()) + .map_err(Into::into)? + .map(|acc| acc.nonce) + .unwrap_or_default(); + request.as_mut().set_nonce(nonce); } - Ok(self.tx_resp_builder().tx_env(request.into(), &evm_env.cfg_env, &evm_env.block_env)?) + Ok(self.tx_resp_builder().tx_env(request, &evm_env.cfg_env, &evm_env.block_env)?) } /// Prepares the [`EvmEnv`] for execution of calls. @@ -732,15 +721,15 @@ pub trait Call: fn prepare_call_env( &self, mut evm_env: EvmEnvFor, - mut request: TransactionRequest, - db: &mut CacheDB, + mut request: RpcTxReq<::Network>, + db: &mut DB, overrides: EvmOverrides, ) -> Result<(EvmEnvFor, TxEnvFor), Self::Error> where - DB: DatabaseRef, - EthApiError: From<::Error>, + DB: Database + DatabaseCommit + OverrideBlockHashes, + EthApiError: From<::Error>, { - if request.gas > Some(self.call_gas_limit()) { + if request.as_ref().gas_limit() > Some(self.call_gas_limit()) { // configured gas exceeds limit return Err( EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into() @@ -760,7 +749,7 @@ pub trait Call: evm_env.cfg_env.disable_base_fee = true; // set nonce to None so that the correct nonce is chosen by the EVM - request.nonce = None; + request.as_mut().take_nonce(); if let Some(block_overrides) = overrides.block { apply_block_overrides(*block_overrides, db, &mut evm_env.block_env); @@ -770,9 +759,14 @@ pub trait Call: .map_err(EthApiError::from_state_overrides_err)?; } - let request_gas = request.gas; + let request_gas = request.as_ref().gas_limit(); let mut tx_env = self.create_txn_env(&evm_env, request, &mut *db)?; + // lower the basefee to 0 to avoid breaking EVM invariants (basefee < gasprice): + if tx_env.gas_price() == 0 { + evm_env.block_env.basefee = 0; + } + if request_gas.is_none() { // No gas limit was provided in the request, so we need to cap the transaction gas limit if tx_env.gas_price() > 0 { diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index 91af2c37e4c..87945c3f4ad 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -3,13 +3,15 @@ use super::{Call, LoadPendingBlock}; use crate::{AsEthApiError, FromEthApiError, IntoEthApiError}; use alloy_evm::{call::caller_gas_allowance, overrides::apply_state_overrides}; +use alloy_network::TransactionBuilder; use alloy_primitives::{TxKind, U256}; -use alloy_rpc_types_eth::{state::StateOverride, transaction::TransactionRequest, BlockId}; +use alloy_rpc_types_eth::{state::StateOverride, BlockId}; use futures::Future; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_errors::ProviderError; use reth_evm::{ConfigureEvm, Database, Evm, EvmEnvFor, EvmFor, TransactionEnv, TxEnvFor}; use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ error::{api::FromEvmHalt, FromEvmError}, EthApiError, RevertError, RpcInvalidTransactionError, @@ -23,7 +25,7 @@ use tracing::trace; pub trait EstimateCall: Call { /// Estimates the gas usage of the `request` with the state. /// - /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search. + /// This will execute the [`RpcTxReq`] and find the best gas limit via binary search. /// /// ## EVM settings /// @@ -35,7 +37,7 @@ pub trait EstimateCall: Call { fn estimate_gas_with( &self, mut evm_env: EvmEnvFor, - mut request: TransactionRequest, + mut request: RpcTxReq<::Network>, state: S, state_override: Option, ) -> Result @@ -52,11 +54,11 @@ pub trait EstimateCall: Call { evm_env.cfg_env.disable_base_fee = true; // set nonce to None so that the correct nonce is chosen by the EVM - request.nonce = None; + request.as_mut().take_nonce(); // Keep a copy of gas related request values - let tx_request_gas_limit = request.gas; - let tx_request_gas_price = request.gas_price; + let tx_request_gas_limit = request.as_ref().gas_limit(); + let tx_request_gas_price = request.as_ref().gas_price(); // the gas limit of the corresponding block let block_env_gas_limit = evm_env.block_env.gas_limit; @@ -268,7 +270,7 @@ pub trait EstimateCall: Call { /// Estimate gas needed for execution of the `request` at the [`BlockId`]. fn estimate_gas_at( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, at: BlockId, state_override: Option, ) -> impl Future> + Send diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 272f3c18f1f..4296157e2bf 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -2,21 +2,20 @@ //! RPC methods. use super::SpawnBlocking; -use crate::{types::RpcTypes, EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; +use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; use alloy_consensus::{BlockHeader, Transaction}; use alloy_eips::eip7840::BlobParams; -use alloy_primitives::U256; +use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockNumberOrTag; use futures::Future; -use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError, RethError}; use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome}, - ConfigureEvm, Evm, SpecFor, + ConfigureEvm, Evm, NextBlockEnvAttributes, SpecFor, }; -use reth_node_api::NodePrimitives; use reth_primitives_traits::{ - transaction::error::InvalidTransactionError, Receipt, RecoveredBlock, SealedHeader, + transaction::error::InvalidTransactionError, HeaderTy, RecoveredBlock, SealedHeader, }; use reth_revm::{database::StateProviderDatabase, db::State}; use reth_rpc_convert::RpcConvert; @@ -30,7 +29,10 @@ use reth_transaction_pool::{ TransactionPool, }; use revm::context_interface::Block; -use std::time::{Duration, Instant}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; use tokio::sync::Mutex; use tracing::debug; @@ -39,31 +41,17 @@ use tracing::debug; /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. pub trait LoadPendingBlock: EthApiTypes< - NetworkTypes: RpcTypes< - Header = alloy_rpc_types_eth::Header>, - >, Error: FromEvmError, RpcConvert: RpcConvert, - > + RpcNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, - Evm: ConfigureEvm::Primitives>, - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, - > + > + RpcNodeCore { /// Returns a handle to the pending block. /// /// Data access in default (L1) trait method implementations. - #[expect(clippy::type_complexity)] - fn pending_block( - &self, - ) -> &Mutex, ProviderReceipt>>>; + fn pending_block(&self) -> &Mutex>>; + + /// Returns a [`PendingEnvBuilder`] for the pending block. + fn pending_env_builder(&self) -> &dyn PendingEnvBuilder; /// Configures the [`PendingBlockEnv`] for the pending block /// @@ -92,7 +80,7 @@ pub trait LoadPendingBlock: return Ok(PendingBlockEnv::new( evm_env, - PendingBlockEnvOrigin::ActualPending(block, receipts), + PendingBlockEnvOrigin::ActualPending(Arc::new(block), Arc::new(receipts)), )); } } @@ -118,7 +106,9 @@ pub trait LoadPendingBlock: fn next_env_attributes( &self, parent: &SealedHeader>, - ) -> Result<::NextBlockEnvCtx, Self::Error>; + ) -> Result<::NextBlockEnvCtx, Self::Error> { + Ok(self.pending_env_builder().pending_env_attributes(parent)?) + } /// Returns the locally built pending block #[expect(clippy::type_complexity)] @@ -127,8 +117,8 @@ pub trait LoadPendingBlock: ) -> impl Future< Output = Result< Option<( - RecoveredBlock<::Block>, - Vec>, + Arc::Block>>, + Arc>>, )>, Self::Error, >, @@ -178,6 +168,9 @@ pub trait LoadPendingBlock: } }; + let sealed_block = Arc::new(sealed_block); + let receipts = Arc::new(receipts); + let now = Instant::now(); *lock = Some(PendingBlock::new( now + Duration::from_secs(1), @@ -335,3 +328,61 @@ pub trait LoadPendingBlock: Ok((block, execution_result.receipts)) } } + +/// A type that knows how to build a [`ConfigureEvm::NextBlockEnvCtx`] for a pending block. +pub trait PendingEnvBuilder: Send + Sync + Unpin + 'static { + /// Builds a [`ConfigureEvm::NextBlockEnvCtx`] for pending block. + fn pending_env_attributes( + &self, + parent: &SealedHeader>, + ) -> Result; +} + +/// Trait that should be implemented on [`ConfigureEvm::NextBlockEnvCtx`] to provide a way for it to +/// build an environment for pending block. +/// +/// This assumes that next environment building doesn't require any additional context, for more +/// complex implementations one should implement [`PendingEnvBuilder`] on their custom type. +pub trait BuildPendingEnv
{ + /// Builds a [`ConfigureEvm::NextBlockEnvCtx`] for pending block. + fn build_pending_env(parent: &SealedHeader
) -> Self; +} + +impl PendingEnvBuilder for () +where + Evm: ConfigureEvm>>, +{ + fn pending_env_attributes( + &self, + parent: &SealedHeader>, + ) -> Result { + Ok(Evm::NextBlockEnvCtx::build_pending_env(parent)) + } +} + +impl BuildPendingEnv for NextBlockEnvAttributes { + fn build_pending_env(parent: &SealedHeader) -> Self { + Self { + timestamp: parent.timestamp().saturating_add(12), + suggested_fee_recipient: parent.beneficiary(), + prev_randao: B256::random(), + gas_limit: parent.gas_limit(), + parent_beacon_block_root: parent.parent_beacon_block_root().map(|_| B256::ZERO), + withdrawals: parent.withdrawals_root().map(|_| Default::default()), + } + } +} + +#[cfg(feature = "scroll")] +impl BuildPendingEnv + for reth_scroll_evm::ScrollNextBlockEnvAttributes +{ + fn build_pending_env(parent: &reth_primitives_traits::SealedHeader) -> Self { + Self { + timestamp: parent.timestamp().saturating_add(1), + suggested_fee_recipient: parent.beneficiary(), + gas_limit: parent.gas_limit(), + base_fee: parent.base_fee_per_gas().unwrap_or_default(), + } + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 4f1b5ebe16a..7ff64be65de 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -1,17 +1,29 @@ //! Loads a receipt from database. Helper trait for `eth_` block and transaction RPC methods, that //! loads receipt data w.r.t. network. -use alloy_consensus::transaction::TransactionMeta; -use futures::Future; -use reth_storage_api::{ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider}; - use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; +use alloy_consensus::{transaction::TransactionMeta, TxReceipt}; +use futures::Future; +use reth_primitives_traits::SignerRecoverable; +use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert}; +use reth_rpc_eth_types::{error::FromEthApiError, EthApiError}; +use reth_storage_api::{ProviderReceipt, ProviderTx}; +use std::borrow::Cow; /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. pub trait LoadReceipt: - EthApiTypes + RpcNodeCoreExt + Send + Sync + EthApiTypes< + RpcConvert: RpcConvert< + Primitives = Self::Primitives, + Error = Self::Error, + Network = Self::NetworkTypes, + >, + Error: FromEthApiError, + > + RpcNodeCoreExt + + Send + + Sync { /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( @@ -19,5 +31,41 @@ pub trait LoadReceipt: tx: ProviderTx, meta: TransactionMeta, receipt: ProviderReceipt, - ) -> impl Future, Self::Error>> + Send; + ) -> impl Future, Self::Error>> + Send { + async move { + let hash = meta.block_hash; + // get all receipts for the block + let all_receipts = self + .cache() + .get_receipts(hash) + .await + .map_err(Self::Error::from_eth_err)? + .ok_or(EthApiError::HeaderNotFound(hash.into()))?; + + let mut gas_used = 0; + let mut next_log_index = 0; + + if meta.index > 0 { + for receipt in all_receipts.iter().take(meta.index as usize) { + gas_used = receipt.cumulative_gas_used(); + next_log_index += receipt.logs().len(); + } + } + + Ok(self + .tx_resp_builder() + .convert_receipts(vec![ConvertReceiptInput { + tx: tx + .try_into_recovered_unchecked() + .map_err(Self::Error::from_eth_err)? + .as_recovered_ref(), + gas_used: receipt.cumulative_gas_used() - gas_used, + receipt: Cow::Owned(receipt), + next_log_index, + meta, + }])? + .pop() + .unwrap()) + } + } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index 62f8b75b869..4060be138e0 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -12,7 +12,7 @@ pub type Result = result::Result; /// An Ethereum Signer used via RPC. #[async_trait::async_trait] -pub trait EthSigner: Send + Sync + DynClone { +pub trait EthSigner: Send + Sync + DynClone { /// Returns the available accounts for this signer. fn accounts(&self) -> Vec
; @@ -25,7 +25,7 @@ pub trait EthSigner: Send + Sync + DynClone { async fn sign(&self, address: Address, message: &[u8]) -> Result; /// signs a transaction request using the given account in request - async fn sign_transaction(&self, request: TransactionRequest, address: &Address) -> Result; + async fn sign_transaction(&self, request: TxReq, address: &Address) -> Result; /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result; diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs index de446d8fb2d..fd3e13620c5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/spec.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -6,7 +6,8 @@ use futures::Future; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthereumHardforks}; use reth_errors::{RethError, RethResult}; use reth_network_api::NetworkInfo; -use reth_storage_api::{BlockNumReader, StageCheckpointReader}; +use reth_rpc_convert::{RpcTxReq, RpcTypes}; +use reth_storage_api::{BlockNumReader, StageCheckpointReader, TransactionsProvider}; use crate::{helpers::EthSigner, RpcNodeCore}; @@ -25,11 +26,14 @@ pub trait EthApiSpec: /// The transaction type signers are using. type Transaction; + /// The RPC requests and responses. + type Rpc: RpcTypes; + /// Returns the block node is started on. fn starting_block(&self) -> U256; /// Returns a handle to the signers owned by provider. - fn signers(&self) -> &parking_lot::RwLock>>>; + fn signers(&self) -> &SignersForApi; /// Returns the current ethereum protocol version. fn protocol_version(&self) -> impl Future> + Send { @@ -88,3 +92,14 @@ pub trait EthApiSpec: Ok(status) } } + +/// A handle to [`EthSigner`]s with its generics set from [`EthApiSpec`]. +pub type SignersForApi = parking_lot::RwLock< + Vec::Transaction, RpcTxReq<::Rpc>>>>, +>; + +/// A handle to [`EthSigner`]s with its generics set from [`TransactionsProvider`] and +/// [`reth_rpc_convert::RpcTypes`]. +pub type SignersForRpc = parking_lot::RwLock< + Vec::Transaction, RpcTxReq>>>, +>; diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 4fa4edee8bc..c9daa1790dc 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -8,7 +8,6 @@ use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types_eth::{Account, AccountInfo, EIP1186AccountProofResponse}; use alloy_serde::JsonStorageKey; use futures::Future; -use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::{ConfigureEvm, EvmEnvFor}; use reth_rpc_eth_types::{EthApiError, PendingBlockEnv, RpcInvalidTransactionError}; @@ -192,14 +191,7 @@ pub trait EthState: LoadState + SpawnBlocking { /// Loads state from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods. -pub trait LoadState: - EthApiTypes - + RpcNodeCoreExt< - Provider: StateProviderFactory - + ChainSpecProvider, - Pool: TransactionPool, - > -{ +pub trait LoadState: EthApiTypes + RpcNodeCoreExt { /// Returns the state at the given block number fn state_at_hash(&self, block_hash: B256) -> Result { self.provider().history_by_block_hash(block_hash).map_err(Self::Error::from_eth_err) diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 92f2b4fe4cf..e640e4f8f0f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -12,31 +12,19 @@ use reth_evm::{ evm::EvmFactoryExt, system_calls::SystemCaller, tracing::TracingCtx, ConfigureEvm, Database, Evm, EvmEnvFor, EvmFor, HaltReasonFor, InspectorFor, TxEnvFor, }; -use reth_node_api::NodePrimitives; use reth_primitives_traits::{BlockBody, Recovered, RecoveredBlock, SignedTransaction}; use reth_revm::{database::StateProviderDatabase, db::CacheDB}; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, EthApiError, }; -use reth_storage_api::{BlockReader, ProviderBlock, ProviderHeader, ProviderTx}; +use reth_storage_api::{ProviderBlock, ProviderTx}; use revm::{context_interface::result::ResultAndState, DatabaseCommit}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::sync::Arc; /// Executes CPU heavy tasks. -pub trait Trace: - LoadState< - Provider: BlockReader, - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, - Error: FromEvmError, -> -{ +pub trait Trace: LoadState> { /// Executes the [`reth_evm::EvmEnv`] against the given [Database] without committing state /// changes. #[expect(clippy::type_complexity)] @@ -366,7 +354,7 @@ pub trait Trace: /// /// This /// 1. fetches all transactions of the block - /// 2. configures the EVM evn + /// 2. configures the EVM env /// 3. loops over all transactions and executes them /// 4. calls the callback with the transaction info, the execution result, the changed state /// _after_ the transaction [`StateProviderDatabase`] and the database that points to the @@ -402,7 +390,7 @@ pub trait Trace: /// /// This /// 1. fetches all transactions of the block - /// 2. configures the EVM evn + /// 2. configures the EVM env /// 3. loops over all transactions and executes them /// 4. calls the callback with the transaction info, the execution result, the changed state /// _after_ the transaction `EvmState` and the database that points to the state right diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index c0c759d400d..168653e7c60 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -3,8 +3,9 @@ use super::{EthApiSpec, EthSigner, LoadBlock, LoadReceipt, LoadState, SpawnBlocking}; use crate::{ - helpers::estimate::EstimateCall, FromEthApiError, FullEthApiTypes, IntoEthApiError, - RpcNodeCore, RpcNodeCoreExt, RpcReceipt, RpcTransaction, + helpers::{estimate::EstimateCall, spec::SignersForRpc}, + FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt, + RpcTransaction, }; use alloy_consensus::{ transaction::{SignerRecoverable, TransactionMeta}, @@ -14,12 +15,12 @@ use alloy_dyn_abi::TypedData; use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; -use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; +use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionInfo}; use futures::{Future, StreamExt}; use reth_chain_state::CanonStateSubscriptions; use reth_node_api::BlockBody; use reth_primitives_traits::{RecoveredBlock, SignedTransaction}; -use reth_rpc_convert::transaction::RpcConvert; +use reth_rpc_convert::{transaction::RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ utils::binary_search, EthApiError, EthApiError::TransactionConfirmationTimeout, SignError, TransactionSource, @@ -28,7 +29,9 @@ use reth_storage_api::{ BlockNumReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider, }; -use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, +}; use std::sync::Arc; /// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in @@ -41,7 +44,7 @@ use std::sync::Arc; /// /// ## Calls /// -/// There are subtle differences between when transacting [`TransactionRequest`]: +/// There are subtle differences between when transacting [`RpcTxReq`]: /// /// The endpoints `eth_call` and `eth_estimateGas` and `eth_createAccessList` should always /// __disable__ the base fee check in the [`CfgEnv`](revm::context::CfgEnv). @@ -57,8 +60,7 @@ pub trait EthTransactions: LoadTransaction { /// Returns a handle for signing data. /// /// Signer access in default (L1) trait method implementations. - #[expect(clippy::type_complexity)] - fn signers(&self) -> &parking_lot::RwLock>>>>; + fn signers(&self) -> &SignersForRpc; /// Decodes and recovers the transaction and submits it to the pool. /// @@ -223,8 +225,8 @@ pub trait EthTransactions: LoadTransaction { where Self: 'static, { - let provider = self.provider().clone(); - self.spawn_blocking_io(move |_| { + self.spawn_blocking_io(move |this| { + let provider = this.provider(); let (tx, meta) = match provider .transaction_by_hash_with_meta(hash) .map_err(Self::Error::from_eth_err)? @@ -379,13 +381,13 @@ pub trait EthTransactions: LoadTransaction { /// Returns the hash of the signed transaction. fn send_transaction( &self, - mut request: TransactionRequest, + mut request: RpcTxReq, ) -> impl Future> + Send where Self: EthApiSpec + LoadBlock + EstimateCall, { async move { - let from = match request.from { + let from = match request.as_ref().from() { Some(from) => from, None => return Err(SignError::NoAccount.into_eth_err()), }; @@ -395,18 +397,18 @@ pub trait EthTransactions: LoadTransaction { } // set nonce if not already set before - if request.nonce.is_none() { + if request.as_ref().nonce().is_none() { let nonce = self.next_available_nonce(from).await?; - request.nonce = Some(nonce); + request.as_mut().set_nonce(nonce); } let chain_id = self.chain_id(); - request.chain_id = Some(chain_id.to()); + request.as_mut().set_chain_id(chain_id.to()); let estimated_gas = self.estimate_gas_at(request.clone(), BlockId::pending(), None).await?; let gas_limit = estimated_gas; - request.set_gas_limit(gas_limit.to()); + request.as_mut().set_gas_limit(gas_limit.to()); let transaction = self.sign_request(&from, request).await?.with_signer(from); @@ -417,7 +419,7 @@ pub trait EthTransactions: LoadTransaction { .map_err(|_| EthApiError::TransactionConversionError)?; // submit the transaction to the pool with a `Local` origin - let hash = self + let AddedTransactionOutcome { hash, .. } = self .pool() .add_transaction(TransactionOrigin::Local, pool_transaction) .await @@ -431,7 +433,7 @@ pub trait EthTransactions: LoadTransaction { fn sign_request( &self, from: &Address, - txn: TransactionRequest, + txn: RpcTxReq, ) -> impl Future, Self::Error>> + Send { async move { self.find_signer(from)? @@ -462,10 +464,10 @@ pub trait EthTransactions: LoadTransaction { /// Returns the EIP-2718 encoded signed transaction. fn sign_transaction( &self, - request: TransactionRequest, + request: RpcTxReq, ) -> impl Future> + Send { async move { - let from = match request.from { + let from = match request.as_ref().from() { Some(from) => from, None => return Err(SignError::NoAccount.into_eth_err()), }; @@ -489,7 +491,10 @@ pub trait EthTransactions: LoadTransaction { fn find_signer( &self, account: &Address, - ) -> Result> + 'static>, Self::Error> { + ) -> Result< + Box, RpcTxReq> + 'static>, + Self::Error, + > { self.signers() .read() .iter() diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs index 44e0cc812a2..0cd113d33eb 100644 --- a/crates/rpc/rpc-eth-api/src/node.rs +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -1,9 +1,16 @@ //! Helper trait for interfacing with [`FullNodeComponents`]. -use reth_node_api::{FullNodeComponents, NodeTypes, PrimitivesTy}; -use reth_payload_builder::PayloadBuilderHandle; +use reth_chain_state::CanonStateSubscriptions; +use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; +use reth_evm::ConfigureEvm; +use reth_network_api::NetworkInfo; +use reth_node_api::{FullNodeComponents, NodePrimitives, PrimitivesTy}; +use reth_primitives_traits::{BlockTy, HeaderTy, ReceiptTy, TxTy}; use reth_rpc_eth_types::EthStateCache; -use reth_storage_api::{BlockReader, ProviderBlock, ProviderReceipt}; +use reth_storage_api::{ + BlockReader, BlockReaderIdExt, StageCheckpointReader, StateProviderFactory, +}; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; /// Helper trait that provides the same interface as [`FullNodeComponents`] but without requiring /// implementation of trait bounds. @@ -14,20 +21,31 @@ use reth_storage_api::{BlockReader, ProviderBlock, ProviderReceipt}; /// where the full trait bounds of the components are not necessary. /// /// Every type that is a [`FullNodeComponents`] also implements this trait. -pub trait RpcNodeCore: Clone + Send + Sync { +pub trait RpcNodeCore: Clone + Send + Sync + Unpin + 'static { /// Blockchain data primitives. - type Primitives: Send + Sync + Clone + Unpin; + type Primitives: NodePrimitives; /// The provider type used to interact with the node. - type Provider: Send + Sync + Clone + Unpin; + type Provider: BlockReaderIdExt< + Block = BlockTy, + Receipt = ReceiptTy, + Header = HeaderTy, + Transaction = TxTy, + > + ChainSpecProvider< + ChainSpec: EthChainSpec
> + EthereumHardforks, + > + StateProviderFactory + + CanonStateSubscriptions + + StageCheckpointReader + + Send + + Sync + + Clone + + Unpin + + 'static; /// The transaction pool of the node. - type Pool: Send + Sync + Clone + Unpin; + type Pool: TransactionPool>>; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: Send + Sync + Clone + Unpin; + type Evm: ConfigureEvm + Send + Sync + 'static; /// Network API. - type Network: Send + Sync + Clone; - - /// Builds new blocks. - type PayloadBuilder: Send + Sync + Clone; + type Network: NetworkInfo + Clone; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -38,23 +56,19 @@ pub trait RpcNodeCore: Clone + Send + Sync { /// Returns the handle to the network fn network(&self) -> &Self::Network; - /// Returns the handle to the payload builder service. - fn payload_builder(&self) -> &Self::PayloadBuilder; - /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; } impl RpcNodeCore for T where - T: FullNodeComponents, + T: FullNodeComponents>, { type Primitives = PrimitivesTy; type Provider = T::Provider; type Pool = T::Pool; type Evm = T::Evm; type Network = T::Network; - type PayloadBuilder = PayloadBuilderHandle<::Payload>; #[inline] fn pool(&self) -> &Self::Pool { @@ -71,11 +85,6 @@ where FullNodeComponents::network(self) } - #[inline] - fn payload_builder(&self) -> &Self::PayloadBuilder { - FullNodeComponents::payload_builder_handle(self) - } - #[inline] fn provider(&self) -> &Self::Provider { FullNodeComponents::provider(self) @@ -86,7 +95,67 @@ where /// server. pub trait RpcNodeCoreExt: RpcNodeCore { /// Returns handle to RPC cache service. - fn cache( - &self, - ) -> &EthStateCache, ProviderReceipt>; + fn cache(&self) -> &EthStateCache; +} + +/// An adapter that allows to construct [`RpcNodeCore`] from components. +#[derive(Debug, Clone)] +pub struct RpcNodeCoreAdapter { + provider: Provider, + pool: Pool, + network: Network, + evm_config: Evm, +} + +impl RpcNodeCoreAdapter { + /// Creates a new `RpcNodeCoreAdapter` instance. + pub const fn new(provider: Provider, pool: Pool, network: Network, evm_config: Evm) -> Self { + Self { provider, pool, network, evm_config } + } +} + +impl RpcNodeCore for RpcNodeCoreAdapter +where + Provider: BlockReaderIdExt< + Block = BlockTy, + Receipt = ReceiptTy, + Header = HeaderTy, + Transaction = TxTy, + > + ChainSpecProvider< + ChainSpec: EthChainSpec
> + EthereumHardforks, + > + StateProviderFactory + + CanonStateSubscriptions + + StageCheckpointReader + + Send + + Sync + + Unpin + + Clone + + 'static, + Evm: ConfigureEvm + Clone + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, + Network: NetworkInfo + Clone + Unpin + 'static, +{ + type Primitives = Evm::Primitives; + type Provider = Provider; + type Pool = Pool; + type Evm = Evm; + type Network = Network; + + fn pool(&self) -> &Self::Pool { + &self.pool + } + + fn evm_config(&self) -> &Self::Evm { + &self.evm_config + } + + fn network(&self) -> &Self::Network { + &self.network + } + + fn provider(&self) -> &Self::Provider { + &self.provider + } } diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 7bb91af8258..4eb8b466ed3 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -1,9 +1,10 @@ //! Trait for specifying `eth` network dependent API types. use crate::{AsEthApiError, FromEthApiError, RpcNodeCore}; -use alloy_rpc_types_eth::{Block, TransactionRequest}; +use alloy_rpc_types_eth::Block; use reth_chain_state::CanonStateSubscriptions; use reth_rpc_convert::RpcConvert; +pub use reth_rpc_convert::{RpcTransaction, RpcTxReq, RpcTypes}; use reth_storage_api::{ProviderTx, ReceiptProvider, TransactionsProvider}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{ @@ -11,8 +12,6 @@ use std::{ fmt::{self}, }; -pub use reth_rpc_convert::{RpcTransaction, RpcTxReq, RpcTypes}; - /// Network specific `eth` API types. /// /// This trait defines the network specific rpc types and helpers required for the `eth_` and @@ -60,11 +59,10 @@ where >, > + EthApiTypes< RpcConvert: RpcConvert< - Primitives = ::Primitives, + Primitives = Self::Primitives, Network = Self::NetworkTypes, Error = RpcError, >, - NetworkTypes: RpcTypes>, >, { } @@ -81,7 +79,6 @@ impl FullEthApiTypes for T where Network = Self::NetworkTypes, Error = RpcError, >, - NetworkTypes: RpcTypes>, > { } diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 4a2104d9146..2148ba7e37b 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -35,6 +35,7 @@ alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-sol-types.workspace = true alloy-rpc-types-eth.workspace = true +alloy-network.workspace = true revm.workspace = true revm-inspectors.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 7c1bedb8224..abb8983485a 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -182,9 +182,14 @@ impl BytecodeReader for StateProviderTraitObjWrapper<'_> { /// Hack to get around 'higher-ranked lifetime error', see /// -#[expect(missing_debug_implementations)] pub struct StateCacheDbRefMutWrapper<'a, 'b>(pub &'b mut StateCacheDb<'a>); +impl<'a, 'b> core::fmt::Debug for StateCacheDbRefMutWrapper<'a, 'b> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("StateCacheDbRefMutWrapper").finish_non_exhaustive() + } +} + impl<'a> Database for StateCacheDbRefMutWrapper<'a, '_> { type Error = as Database>::Error; fn basic(&mut self, address: Address) -> Result, Self::Error> { diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index a055acac58a..6df612261d9 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -70,17 +70,17 @@ type HeaderLruCache = MultiConsumerLruCache { - to_service: UnboundedSender>, +pub struct EthStateCache { + to_service: UnboundedSender>, } -impl Clone for EthStateCache { +impl Clone for EthStateCache { fn clone(&self) -> Self { Self { to_service: self.to_service.clone() } } } -impl EthStateCache { +impl EthStateCache { /// Creates and returns both [`EthStateCache`] frontend and the memory bound service. fn create( provider: Provider, @@ -91,7 +91,7 @@ impl EthStateCache { max_concurrent_db_operations: usize, ) -> (Self, EthStateCacheService) where - Provider: BlockReader, + Provider: BlockReader, { let (to_service, rx) = unbounded_channel(); let service = EthStateCacheService { @@ -114,7 +114,7 @@ impl EthStateCache { /// See also [`Self::spawn_with`] pub fn spawn(provider: Provider, config: EthStateCacheConfig) -> Self where - Provider: BlockReader + Clone + Unpin + 'static, + Provider: BlockReader + Clone + Unpin + 'static, { Self::spawn_with(provider, config, TokioTaskExecutor::default()) } @@ -129,7 +129,7 @@ impl EthStateCache { executor: Tasks, ) -> Self where - Provider: BlockReader + Clone + Unpin + 'static, + Provider: BlockReader + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, { let EthStateCacheConfig { @@ -156,7 +156,7 @@ impl EthStateCache { pub async fn get_recovered_block( &self, block_hash: B256, - ) -> ProviderResult>>> { + ) -> ProviderResult>>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx }); rx.await.map_err(|_| CacheServiceUnavailable)? @@ -165,7 +165,10 @@ impl EthStateCache { /// Requests the receipts for the block hash /// /// Returns `None` if the block was not found. - pub async fn get_receipts(&self, block_hash: B256) -> ProviderResult>>> { + pub async fn get_receipts( + &self, + block_hash: B256, + ) -> ProviderResult>>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetReceipts { block_hash, response_tx }); rx.await.map_err(|_| CacheServiceUnavailable)? @@ -175,7 +178,7 @@ impl EthStateCache { pub async fn get_block_and_receipts( &self, block_hash: B256, - ) -> ProviderResult>, Arc>)>> { + ) -> ProviderResult>, Arc>)>> { let block = self.get_recovered_block(block_hash); let receipts = self.get_receipts(block_hash); @@ -188,7 +191,7 @@ impl EthStateCache { pub async fn get_receipts_and_maybe_block( &self, block_hash: B256, - ) -> ProviderResult>, Option>>)>> { + ) -> ProviderResult>, Option>>)>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetCachedBlock { block_hash, response_tx }); @@ -204,7 +207,7 @@ impl EthStateCache { pub async fn maybe_cached_block_and_receipts( &self, block_hash: B256, - ) -> ProviderResult<(Option>>, Option>>)> { + ) -> ProviderResult<(Option>>, Option>>)> { let (response_tx, rx) = oneshot::channel(); let _ = self .to_service @@ -217,8 +220,11 @@ impl EthStateCache { pub fn get_receipts_and_maybe_block_stream<'a>( &'a self, hashes: Vec, - ) -> impl Stream>, Option>>)>>> + 'a - { + ) -> impl Stream< + Item = ProviderResult< + Option<(Arc>, Option>>)>, + >, + > + 'a { let futures = hashes.into_iter().map(move |hash| self.get_receipts_and_maybe_block(hash)); futures.collect::>() @@ -227,7 +233,7 @@ impl EthStateCache { /// Requests the header for the given hash. /// /// Returns an error if the header is not found. - pub async fn get_header(&self, block_hash: B256) -> ProviderResult { + pub async fn get_header(&self, block_hash: B256) -> ProviderResult { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetHeader { block_hash, response_tx }); rx.await.map_err(|_| CacheServiceUnavailable)? @@ -244,7 +250,7 @@ impl EthStateCache { &self, block_hash: B256, max_blocks: usize, - ) -> Option>>> { + ) -> Option>>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetCachedParentBlocks { block_hash, @@ -777,7 +783,7 @@ impl Drop for ActionSender { /// /// Reorged blocks are removed from the cache. pub async fn cache_new_blocks_task( - eth_state_cache: EthStateCache, + eth_state_cache: EthStateCache, mut events: St, ) where St: Stream> + Unpin + 'static, diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 96adc4e67b2..fdb0ade248e 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -187,6 +187,14 @@ impl EthApiError { matches!(self, Self::InvalidTransaction(RpcInvalidTransactionError::GasTooLow)) } + /// Returns the [`RpcInvalidTransactionError`] if this is a [`EthApiError::InvalidTransaction`] + pub const fn as_invalid_transaction(&self) -> Option<&RpcInvalidTransactionError> { + match self { + Self::InvalidTransaction(e) => Some(e), + _ => None, + } + } + /// Converts the given [`StateOverrideError`] into a new [`EthApiError`] instance. pub fn from_state_overrides_err(err: StateOverrideError) -> Self where @@ -202,6 +210,11 @@ impl EthApiError { { err.into() } + + /// Converts this error into the rpc error object. + pub fn into_rpc_err(self) -> jsonrpsee_types::error::ErrorObject<'static> { + self.into() + } } impl From for jsonrpsee_types::error::ErrorObject<'static> { @@ -570,6 +583,12 @@ pub enum RpcInvalidTransactionError { /// EIP-7702 transaction has invalid fields set. #[error("EIP-7702 authorization list has invalid fields")] AuthorizationListInvalidFields, + /// Transaction priority fee is below the minimum required priority fee. + #[error("transaction priority fee below minimum required priority fee {minimum_priority_fee}")] + PriorityFeeBelowMinimum { + /// Minimum required priority fee. + minimum_priority_fee: u128, + }, /// Any other error #[error("{0}")] Other(Box), @@ -580,9 +599,7 @@ impl RpcInvalidTransactionError { pub fn other(err: E) -> Self { Self::Other(Box::new(err)) } -} -impl RpcInvalidTransactionError { /// Returns the rpc error code for this error. pub const fn error_code(&self) -> i32 { match self { @@ -621,6 +638,11 @@ impl RpcInvalidTransactionError { OutOfGasError::InvalidOperand => Self::InvalidOperandOutOfGas(gas_limit), } } + + /// Converts this error into the rpc error object. + pub fn into_rpc_err(self) -> jsonrpsee_types::error::ErrorObject<'static> { + self.into() + } } impl From for jsonrpsee_types::error::ErrorObject<'static> { @@ -801,6 +823,9 @@ pub enum RpcPoolError { /// When the transaction exceeds the block gas limit #[error("exceeds block gas limit")] ExceedsGasLimit, + /// When the transaction gas limit exceeds the maximum transaction gas limit + #[error("exceeds max transaction gas limit")] + MaxTxGasLimitExceeded, /// Thrown when a new transaction is added to the pool, but then immediately discarded to /// respect the tx fee exceeds the configured cap #[error("tx fee ({max_tx_fee_wei} wei) exceeds the configured cap ({tx_fee_cap_wei} wei)")] @@ -854,6 +879,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { RpcPoolError::Underpriced | RpcPoolError::ReplaceUnderpriced | RpcPoolError::ExceedsGasLimit | + RpcPoolError::MaxTxGasLimitExceeded | RpcPoolError::ExceedsFeeCap { .. } | RpcPoolError::NegativeValue | RpcPoolError::OversizedData | @@ -890,6 +916,7 @@ impl From for RpcPoolError { match err { InvalidPoolTransactionError::Consensus(err) => Self::Invalid(err.into()), InvalidPoolTransactionError::ExceedsGasLimit(_, _) => Self::ExceedsGasLimit, + InvalidPoolTransactionError::MaxTxGasLimitExceeded(_, _) => Self::MaxTxGasLimitExceeded, InvalidPoolTransactionError::ExceedsFeeCap { max_tx_fee_wei, tx_fee_cap_wei } => { Self::ExceedsFeeCap { max_tx_fee_wei, tx_fee_cap_wei } } @@ -910,6 +937,11 @@ impl From for RpcPoolError { InvalidPoolTransactionError::Overdraft { cost, balance } => { Self::Invalid(RpcInvalidTransactionError::InsufficientFunds { cost, balance }) } + InvalidPoolTransactionError::PriorityFeeBelowMinimum { minimum_priority_fee } => { + Self::Invalid(RpcInvalidTransactionError::PriorityFeeBelowMinimum { + minimum_priority_fee, + }) + } } } } diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 7262c1c44ca..0ae4da51913 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -219,7 +219,7 @@ pub async fn fee_history_cache_new_blocks_task( fee_history_cache: FeeHistoryCache, mut events: St, provider: Provider, - cache: EthStateCache, + cache: EthStateCache, ) where St: Stream> + Unpin + 'static, Provider: diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 8a6f18682d4..14ca4895a9b 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -15,7 +15,7 @@ use reth_rpc_server_types::{ DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, MAX_REWARD_PERCENTILE_COUNT, SAMPLE_NUMBER, }, }; -use reth_storage_api::{BlockReader, BlockReaderIdExt}; +use reth_storage_api::{BlockReaderIdExt, NodePrimitivesProvider}; use schnellru::{ByLength, LruMap}; use serde::{Deserialize, Serialize}; use std::fmt::{self, Debug, Formatter}; @@ -77,12 +77,12 @@ impl Default for GasPriceOracleConfig { #[derive(Debug)] pub struct GasPriceOracle where - Provider: BlockReader, + Provider: NodePrimitivesProvider, { /// The type used to subscribe to block events and get block info provider: Provider, /// The cache for blocks - cache: EthStateCache, + cache: EthStateCache, /// The config for the oracle oracle_config: GasPriceOracleConfig, /// The price under which the sample will be ignored. @@ -94,13 +94,13 @@ where impl GasPriceOracle where - Provider: BlockReaderIdExt, + Provider: BlockReaderIdExt + NodePrimitivesProvider, { /// Creates and returns the [`GasPriceOracle`]. pub fn new( provider: Provider, mut oracle_config: GasPriceOracleConfig, - cache: EthStateCache, + cache: EthStateCache, ) -> Self { // sanitize the percentile to be less than 100 if oracle_config.percentile > 100 { diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index 815160abf4e..eead8c5fc2a 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -33,5 +33,4 @@ pub use gas_oracle::{ }; pub use id_provider::EthSubscriptionIdProvider; pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -pub use receipt::EthReceiptBuilder; pub use transaction::TransactionSource; diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 7990e2334b1..a339b6b0730 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -2,7 +2,7 @@ //! //! Types used in block building. -use std::time::Instant; +use std::{sync::Arc, time::Instant}; use alloy_consensus::BlockHeader; use alloy_eips::{BlockId, BlockNumberOrTag}; @@ -10,7 +10,7 @@ use alloy_primitives::B256; use derive_more::Constructor; use reth_ethereum_primitives::Receipt; use reth_evm::EvmEnv; -use reth_primitives_traits::{Block, RecoveredBlock, SealedHeader}; +use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedHeader}; /// Configured [`EvmEnv`] for a pending block. #[derive(Debug, Clone, Constructor)] @@ -25,7 +25,7 @@ pub struct PendingBlockEnv { #[derive(Clone, Debug)] pub enum PendingBlockEnvOrigin { /// The pending block as received from the CL. - ActualPending(RecoveredBlock, Vec), + ActualPending(Arc>, Arc>), /// The _modified_ header of the latest block. /// /// This derives the pending state based on the latest header by modifying: @@ -42,7 +42,7 @@ impl PendingBlockEnvOrigin { } /// Consumes the type and returns the actual pending block. - pub fn into_actual_pending(self) -> Option> { + pub fn into_actual_pending(self) -> Option>> { match self { Self::ActualPending(block, _) => Some(block), _ => None, @@ -75,11 +75,11 @@ impl PendingBlockEnvOrigin { /// Locally built pending block for `pending` tag. #[derive(Debug, Constructor)] -pub struct PendingBlock { +pub struct PendingBlock { /// Timestamp when the pending block is considered outdated. pub expires_at: Instant, /// The locally built pending block. - pub block: RecoveredBlock, + pub block: Arc>, /// The receipts for the pending block - pub receipts: Vec, + pub receipts: Arc>, } diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index d10bb1d4a33..4ea4ad1daf5 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,82 +1,52 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. -use super::EthResult; -use alloy_consensus::{transaction::TransactionMeta, ReceiptEnvelope, TxReceipt}; +use crate::EthApiError; +use alloy_consensus::{ReceiptEnvelope, Transaction, TxReceipt}; use alloy_eips::eip7840::BlobParams; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt}; -use reth_ethereum_primitives::{Receipt, TransactionSigned, TxType}; -use reth_primitives_traits::SignedTransaction; +use reth_chainspec::EthChainSpec; +use reth_ethereum_primitives::Receipt; +use reth_primitives_traits::NodePrimitives; +use reth_rpc_convert::transaction::{ConvertReceiptInput, ReceiptConverter}; +use std::{borrow::Cow, sync::Arc}; /// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. -pub fn build_receipt( - transaction: &T, - meta: TransactionMeta, - receipt: &R, - all_receipts: &[R], +pub fn build_receipt( + input: &ConvertReceiptInput<'_, N>, blob_params: Option, build_envelope: impl FnOnce(ReceiptWithBloom>) -> E, -) -> EthResult> +) -> TransactionReceipt where - R: TxReceipt, - T: SignedTransaction, + N: NodePrimitives, { - // Note: we assume this transaction is valid, because it's mined (or part of pending block) - // and we don't need to check for pre EIP-2 - let from = transaction.recover_signer_unchecked()?; + let ConvertReceiptInput { tx, meta, receipt, gas_used, next_log_index } = input; + let from = tx.signer(); - // get the previous transaction cumulative gas used - let gas_used = if meta.index == 0 { - receipt.cumulative_gas_used() - } else { - let prev_tx_idx = (meta.index - 1) as usize; - all_receipts - .get(prev_tx_idx) - .map(|prev_receipt| receipt.cumulative_gas_used() - prev_receipt.cumulative_gas_used()) - .unwrap_or_default() - }; - - let blob_gas_used = transaction.blob_gas_used(); + let blob_gas_used = tx.blob_gas_used(); // Blob gas price should only be present if the transaction is a blob transaction let blob_gas_price = blob_gas_used.and_then(|_| Some(blob_params?.calc_blob_fee(meta.excess_blob_gas?))); + let status = receipt.status_or_post_state(); + let cumulative_gas_used = receipt.cumulative_gas_used(); let logs_bloom = receipt.bloom(); - // get number of logs in the block - let mut num_logs = 0; - for prev_receipt in all_receipts.iter().take(meta.index as usize) { - num_logs += prev_receipt.logs().len(); - } - - let logs: Vec = receipt - .logs() - .iter() - .enumerate() - .map(|(tx_log_idx, log)| Log { - inner: log.clone(), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - block_timestamp: Some(meta.timestamp), - transaction_hash: Some(meta.tx_hash), - transaction_index: Some(meta.index), - log_index: Some((num_logs + tx_log_idx) as u64), - removed: false, - }) - .collect(); - - let rpc_receipt = alloy_rpc_types_eth::Receipt { - status: receipt.status_or_post_state(), - cumulative_gas_used: receipt.cumulative_gas_used(), - logs, + let logs = match receipt { + Cow::Borrowed(r) => { + Log::collect_for_receipt(*next_log_index, *meta, r.logs().iter().cloned()) + } + Cow::Owned(r) => Log::collect_for_receipt(*next_log_index, *meta, r.into_logs()), }; - let (contract_address, to) = match transaction.kind() { - TxKind::Create => (Some(from.create(transaction.nonce())), None), + let rpc_receipt = alloy_rpc_types_eth::Receipt { status, cumulative_gas_used, logs }; + + let (contract_address, to) = match tx.kind() { + TxKind::Create => (Some(from.create(tx.nonce())), None), TxKind::Call(addr) => (None, Some(Address(*addr))), }; - Ok(TransactionReceipt { + TransactionReceipt { inner: build_envelope(ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }), transaction_hash: meta.tx_hash, transaction_index: Some(meta.index), @@ -84,54 +54,56 @@ where block_number: Some(meta.block_number), from, to, - gas_used, + gas_used: *gas_used, contract_address, - effective_gas_price: transaction.effective_gas_price(meta.base_fee), + effective_gas_price: tx.effective_gas_price(meta.base_fee), // EIP-4844 fields blob_gas_price, blob_gas_used, - }) + } } -/// Receipt response builder. +/// Converter for Ethereum receipts. #[derive(Debug)] -pub struct EthReceiptBuilder { - /// The base response body, contains L1 fields. - pub base: TransactionReceipt, +pub struct EthReceiptConverter { + chain_spec: Arc, } -impl EthReceiptBuilder { - /// Returns a new builder with the base response body (L1 fields) set. - /// - /// Note: This requires _all_ block receipts because we need to calculate the gas used by the - /// transaction. - pub fn new( - transaction: &TransactionSigned, - meta: TransactionMeta, - receipt: &Receipt, - all_receipts: &[Receipt], - blob_params: Option, - ) -> EthResult { - let base = build_receipt( - transaction, - meta, - receipt, - all_receipts, - blob_params, - |receipt_with_bloom| match receipt.tx_type { - TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom), - TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom), - TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom), - TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom), - TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom), - }, - )?; +impl Clone for EthReceiptConverter { + fn clone(&self) -> Self { + Self { chain_spec: self.chain_spec.clone() } + } +} - Ok(Self { base }) +impl EthReceiptConverter { + /// Creates a new converter with the given chain spec. + pub const fn new(chain_spec: Arc) -> Self { + Self { chain_spec } } +} + +impl ReceiptConverter for EthReceiptConverter +where + N: NodePrimitives, + ChainSpec: EthChainSpec + 'static, +{ + type RpcReceipt = TransactionReceipt; + type Error = EthApiError; + + fn convert_receipts( + &self, + inputs: Vec>, + ) -> Result, Self::Error> { + let mut receipts = Vec::with_capacity(inputs.len()); + + for input in inputs { + let tx_type = input.receipt.tx_type; + let blob_params = self.chain_spec.blob_params_at_timestamp(input.meta.timestamp); + receipts.push(build_receipt(&input, blob_params, |receipt_with_bloom| { + ReceiptEnvelope::from_typed(tx_type, receipt_with_bloom) + })); + } - /// Builds a receipt response from the base response body, and any set additional fields. - pub fn build(self) -> TransactionReceipt { - self.base + Ok(receipts) } } diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 988261b8179..733390a1965 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -7,12 +7,12 @@ use crate::{ }, EthApiError, RevertError, }; -use alloy_consensus::{BlockHeader, Transaction as _, TxType}; +use alloy_consensus::{BlockHeader, Transaction as _}; use alloy_eips::eip2718::WithEncoded; +use alloy_network::TransactionBuilder; use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, - transaction::TransactionRequest, - Block, BlockTransactionsKind, Header, + BlockTransactionsKind, }; use jsonrpsee_types::ErrorObject; use reth_evm::{ @@ -20,9 +20,9 @@ use reth_evm::{ Evm, }; use reth_primitives_traits::{ - block::BlockTx, BlockBody as _, NodePrimitives, Recovered, RecoveredBlock, SignedTransaction, + BlockBody as _, BlockTy, NodePrimitives, Recovered, RecoveredBlock, SignedTransaction, }; -use reth_rpc_convert::{RpcConvert, RpcTransaction, RpcTypes}; +use reth_rpc_convert::{RpcBlock, RpcConvert, RpcTxReq}; use reth_rpc_server_types::result::rpc_err; use reth_storage_api::noop::NoopProvider; use revm::{ @@ -61,10 +61,12 @@ impl ToRpcError for EthSimulateError { /// given [`BlockExecutor`]. /// /// Returns all executed transactions and the result of the execution. +/// +/// [`TransactionRequest`]: alloy_rpc_types_eth::TransactionRequest #[expect(clippy::type_complexity)] pub fn execute_transactions( mut builder: S, - calls: Vec, + calls: Vec>, default_gas_limit: u64, chain_id: u64, tx_resp_builder: &T, @@ -77,10 +79,7 @@ pub fn execute_transactions( > where S: BlockBuilder>>>>, - T: RpcConvert< - Primitives = S::Primitives, - Network: RpcTypes>, - >, + T: RpcConvert, { builder.apply_pre_execution_changes()?; @@ -114,8 +113,10 @@ where /// them into primitive transactions. /// /// This will set the defaults as defined in +/// +/// [`TransactionRequest`]: alloy_rpc_types_eth::TransactionRequest pub fn resolve_transaction( - mut tx: TransactionRequest, + mut tx: RpcTxReq, default_gas_limit: u64, block_base_fee_per_gas: u64, chain_id: u64, @@ -124,86 +125,76 @@ pub fn resolve_transaction( ) -> Result, EthApiError> where DB::Error: Into, - T: RpcConvert< - Primitives: NodePrimitives, - Network: RpcTypes>, - >, + T: RpcConvert>, { // If we're missing any fields we try to fill nonce, gas and // gas price. - let tx_type = tx.preferred_type(); + let tx_type = tx.as_ref().output_tx_type(); - let from = if let Some(from) = tx.from { + let from = if let Some(from) = tx.as_ref().from() { from } else { - tx.from = Some(Address::ZERO); + tx.as_mut().set_from(Address::ZERO); Address::ZERO }; - if tx.nonce.is_none() { - tx.nonce = - Some(db.basic(from).map_err(Into::into)?.map(|acc| acc.nonce).unwrap_or_default()); + if tx.as_ref().nonce().is_none() { + tx.as_mut().set_nonce( + db.basic(from).map_err(Into::into)?.map(|acc| acc.nonce).unwrap_or_default(), + ); } - if tx.gas.is_none() { - tx.gas = Some(default_gas_limit); + if tx.as_ref().gas_limit().is_none() { + tx.as_mut().set_gas_limit(default_gas_limit); } - if tx.chain_id.is_none() { - tx.chain_id = Some(chain_id); + if tx.as_ref().chain_id().is_none() { + tx.as_mut().set_chain_id(chain_id); } - if tx.to.is_none() { - tx.to = Some(TxKind::Create); + if tx.as_ref().kind().is_none() { + tx.as_mut().set_kind(TxKind::Create); } // if we can't build the _entire_ transaction yet, we need to check the fee values - if tx.buildable_type().is_none() { - match tx_type { - TxType::Legacy | TxType::Eip2930 => { - if tx.gas_price.is_none() { - tx.gas_price = Some(block_base_fee_per_gas as u128); - } + if tx.as_ref().output_tx_type_checked().is_none() { + if tx_type.is_legacy() || tx_type.is_eip2930() { + if tx.as_ref().gas_price().is_none() { + tx.as_mut().set_gas_price(block_base_fee_per_gas as u128); } - _ => { - // set dynamic 1559 fees - if tx.max_fee_per_gas.is_none() { - let mut max_fee_per_gas = block_base_fee_per_gas as u128; - if let Some(prio_fee) = tx.max_priority_fee_per_gas { - // if a prio fee is provided we need to select the max fee accordingly - // because the base fee must be higher than the prio fee. - max_fee_per_gas = prio_fee.max(max_fee_per_gas); - } - tx.max_fee_per_gas = Some(max_fee_per_gas); - } - if tx.max_priority_fee_per_gas.is_none() { - tx.max_priority_fee_per_gas = Some(0); + } else { + // set dynamic 1559 fees + if tx.as_ref().max_fee_per_gas().is_none() { + let mut max_fee_per_gas = block_base_fee_per_gas as u128; + if let Some(prio_fee) = tx.as_ref().max_priority_fee_per_gas() { + // if a prio fee is provided we need to select the max fee accordingly + // because the base fee must be higher than the prio fee. + max_fee_per_gas = prio_fee.max(max_fee_per_gas); } + tx.as_mut().set_max_fee_per_gas(max_fee_per_gas); + } + if tx.as_ref().max_priority_fee_per_gas().is_none() { + tx.as_mut().set_max_priority_fee_per_gas(0); } } } let tx = tx_resp_builder - .build_simulate_v1_transaction(tx.into()) + .build_simulate_v1_transaction(tx) .map_err(|e| EthApiError::other(e.into()))?; Ok(Recovered::new_unchecked(tx, from)) } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -#[expect(clippy::type_complexity)] -pub fn build_simulated_block( - block: RecoveredBlock, +pub fn build_simulated_block( + block: RecoveredBlock>, results: Vec>, txs_kind: BlockTransactionsKind, tx_resp_builder: &T, -) -> Result, Header>>, T::Error> +) -> Result>, T::Error> where - T: RpcConvert< - Primitives: NodePrimitives>, - Error: FromEthApiError + FromEvmHalt, - >, - B: reth_primitives_traits::Block, + T: RpcConvert>, { let mut calls: Vec = Vec::with_capacity(results.len()); @@ -262,6 +253,10 @@ where calls.push(call); } - let block = block.into_rpc_block(txs_kind, |tx, tx_info| tx_resp_builder.fill(tx, tx_info))?; + let block = block.into_rpc_block( + txs_kind, + |tx, tx_info| tx_resp_builder.fill(tx, tx_info), + |header, size| tx_resp_builder.convert_header(header, size), + )?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index 65b10feef98..2d074ef2368 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -36,4 +36,3 @@ similar-asserts.workspace = true tokio = { workspace = true, features = ["rt-multi-thread", "macros", "rt"] } reth-rpc-eth-api.workspace = true jsonrpsee-http-client.workspace = true -alloy-rpc-types-trace.workspace = true diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index 85b1bc4208c..4f91e7e63c0 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -78,7 +78,7 @@ pub trait DebugApiExt { impl DebugApiExt for T where T: EthApiClient - + DebugApiClient + + DebugApiClient + Sync, { type Provider = T; diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index b556a895045..8f71d1c4554 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -250,7 +250,7 @@ impl std::fmt::Debug for ReplayTransactionStream<'_> { } } -impl TraceApiExt for T { +impl + Sync> TraceApiExt for T { type Provider = T; fn trace_block_buffered(&self, params: I, n: usize) -> TraceBlockStream<'_> diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 2f41caa5480..e0d1fcb601f 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -18,7 +18,6 @@ reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true reth-engine-primitives.workspace = true -reth-ethereum-primitives.workspace = true reth-errors.workspace = true reth-metrics.workspace = true reth-storage-api.workspace = true @@ -60,7 +59,7 @@ alloy-rpc-types-trace.workspace = true alloy-rpc-types-mev.workspace = true alloy-rpc-types-txpool.workspace = true alloy-rpc-types-admin.workspace = true -alloy-rpc-types-engine.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["kzg"] } alloy-serde.workspace = true revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee"] } revm-primitives = { workspace = true, features = ["serde"] } @@ -90,17 +89,17 @@ serde.workspace = true sha2.workspace = true thiserror.workspace = true derive_more.workspace = true +itertools.workspace = true [dev-dependencies] -reth-evm-ethereum.workspace = true +reth-ethereum-primitives.workspace = true reth-testing-utils.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-db-api.workspace = true -alloy-consensus.workspace = true rand.workspace = true -jsonrpsee-types.workspace = true jsonrpsee = { workspace = true, features = ["client"] } [features] diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 1e2f107398e..e5fa07e0e51 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -5,8 +5,7 @@ use alloy_primitives::{uint, Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_eth::{ - state::EvmOverrides, transaction::TransactionRequest, Block as RpcBlock, BlockError, Bundle, - StateContext, TransactionInfo, + state::EvmOverrides, Block as RpcBlock, BlockError, Bundle, StateContext, TransactionInfo, }; use alloy_rpc_types_trace::geth::{ call::FlatCallFrame, BlockTraceResult, FourByteFrame, GethDebugBuiltInTracerType, @@ -18,7 +17,7 @@ use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_evm::{execute::Executor, ConfigureEvm, EvmEnvFor, TxEnvFor}; use reth_primitives_traits::{ - Block as _, BlockBody, NodePrimitives, ReceiptWithBloom, RecoveredBlock, SignedTransaction, + Block as _, BlockBody, ReceiptWithBloom, RecoveredBlock, SignedTransaction, }; use reth_revm::{ database::StateProviderDatabase, @@ -26,6 +25,7 @@ use reth_revm::{ witness::ExecutionWitnessRecord, }; use reth_rpc_api::DebugApiServer; +use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ helpers::{EthTransactions, TraceExt}, EthApiTypes, FromEthApiError, RpcNodeCore, @@ -48,16 +48,16 @@ use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `debug` API implementation. /// /// This type provides the functionality for handling `debug` related requests. -pub struct DebugApi { - inner: Arc>, +pub struct DebugApi { + inner: Arc>, } // === impl DebugApi === -impl DebugApi { +impl DebugApi { /// Create a new instance of the [`DebugApi`] - pub fn new(eth: Eth, blocking_task_guard: BlockingTaskGuard, evm_config: Evm) -> Self { - let inner = Arc::new(DebugApiInner { eth_api: eth, blocking_task_guard, evm_config }); + pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { + let inner = Arc::new(DebugApiInner { eth_api, blocking_task_guard }); Self { inner } } @@ -68,7 +68,7 @@ impl DebugApi { } } -impl DebugApi { +impl DebugApi { /// Access the underlying provider. pub fn provider(&self) -> &Eth::Provider { self.inner.eth_api.provider() @@ -77,10 +77,9 @@ impl DebugApi { // === impl DebugApi === -impl DebugApi +impl DebugApi where Eth: EthApiTypes + TraceExt + 'static, - Evm: ConfigureEvm>> + 'static, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -266,7 +265,7 @@ where /// - `debug_traceCall` executes with __enabled__ basefee check, `eth_call` does not: pub async fn debug_trace_call( &self, - call: TransactionRequest, + call: RpcTxReq, block_id: Option, opts: GethDebugTracingCallOptions, ) -> Result { @@ -278,6 +277,7 @@ where let this = self.clone(); if let Some(tracer) = tracer { + #[allow(unreachable_patterns)] return match tracer { GethDebugTracerType::BuiltInTracer(tracer) => match tracer { GethDebugBuiltInTracerType::FourByteTracer => { @@ -445,6 +445,11 @@ where Ok(GethTrace::JS(res)) } + _ => { + // Note: this match is non-exhaustive in case we need to add support for + // additional tracers + Err(EthApiError::Unsupported("unsupported tracer").into()) + } } } @@ -476,7 +481,7 @@ where /// Each following bundle increments block number by 1 and block timestamp by 12 seconds pub async fn debug_trace_call_many( &self, - bundles: Vec, + bundles: Vec>>, state_context: Option, opts: Option, ) -> Result>, Eth::Error> { @@ -631,7 +636,7 @@ where .eth_api() .spawn_with_state_at_block(block.parent_hash().into(), move |state_provider| { let db = StateProviderDatabase::new(&state_provider); - let block_executor = this.inner.evm_config.batch_executor(db); + let block_executor = this.eth_api().evm_config().batch_executor(db); let mut witness_record = ExecutionWitnessRecord::default(); @@ -733,6 +738,7 @@ where }; if let Some(tracer) = tracer { + #[allow(unreachable_patterns)] return match tracer { GethDebugTracerType::BuiltInTracer(tracer) => match tracer { GethDebugBuiltInTracerType::FourByteTracer => { @@ -848,6 +854,11 @@ where .map_err(Eth::Error::from_eth_err)?; Ok((GethTrace::JS(result), state)) } + _ => { + // Note: this match is non-exhaustive in case we need to add support for + // additional tracers + Err(EthApiError::Unsupported("unsupported tracer").into()) + } } } @@ -886,10 +897,9 @@ where } #[async_trait] -impl DebugApiServer for DebugApi +impl DebugApiServer> for DebugApi where Eth: EthApiTypes + EthTransactions + TraceExt + 'static, - Evm: ConfigureEvm>> + 'static, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { @@ -1024,7 +1034,7 @@ where /// Handler for `debug_traceCall` async fn debug_trace_call( &self, - request: TransactionRequest, + request: RpcTxReq, block_id: Option, opts: Option, ) -> RpcResult { @@ -1036,7 +1046,7 @@ where async fn debug_trace_call_many( &self, - bundles: Vec, + bundles: Vec>>, state_context: Option, opts: Option, ) -> RpcResult>> { @@ -1294,23 +1304,21 @@ where } } -impl std::fmt::Debug for DebugApi { +impl std::fmt::Debug for DebugApi { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DebugApi").finish_non_exhaustive() } } -impl Clone for DebugApi { +impl Clone for DebugApi { fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -struct DebugApiInner { +struct DebugApiInner { /// The implementation of `eth` API eth_api: Eth, // restrict the number of concurrent calls to blocking calls blocking_task_guard: BlockingTaskGuard, - /// block executor for debug & trace apis - evm_config: Evm, } diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index 33ef2b3e5fe..a0e0bd30931 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -1,8 +1,7 @@ use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256, U64}; use alloy_rpc_types_eth::{ - state::StateOverride, transaction::TransactionRequest, BlockOverrides, - EIP1186AccountProofResponse, Filter, Log, SyncStatus, + state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; use alloy_serde::JsonStorageKey; use jsonrpsee::core::RpcResult as Result; @@ -37,8 +36,12 @@ impl EngineEthApi { } #[async_trait::async_trait] -impl EngineEthApiServer, RpcReceipt> - for EngineEthApi +impl + EngineEthApiServer< + RpcTxReq, + RpcBlock, + RpcReceipt, + > for EngineEthApi where Eth: EthApiServer< RpcTxReq, @@ -73,7 +76,7 @@ where /// Handler for: `eth_call` async fn call( &self, - request: TransactionRequest, + request: RpcTxReq, block_id: Option, state_overrides: Option, block_overrides: Option>, diff --git a/crates/rpc/rpc/src/eth/builder.rs b/crates/rpc/rpc/src/eth/builder.rs index 732ae1edf11..2e6a6dcf91f 100644 --- a/crates/rpc/rpc/src/eth/builder.rs +++ b/crates/rpc/rpc/src/eth/builder.rs @@ -1,17 +1,22 @@ //! `EthApiBuilder` implementation use crate::{eth::core::EthApiInner, EthApi}; +use alloy_network::Ethereum; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; -use reth_node_api::NodePrimitives; +use reth_primitives_traits::HeaderTy; +use reth_rpc_convert::{RpcConvert, RpcConverter}; +use reth_rpc_eth_api::{ + helpers::pending_block::PendingEnvBuilder, node::RpcNodeCoreAdapter, RpcNodeCore, +}; use reth_rpc_eth_types::{ - fee_history::fee_history_cache_new_blocks_task, EthStateCache, EthStateCacheConfig, - FeeHistoryCache, FeeHistoryCacheConfig, GasCap, GasPriceOracle, GasPriceOracleConfig, + fee_history::fee_history_cache_new_blocks_task, receipt::EthReceiptConverter, EthStateCache, + EthStateCacheConfig, FeeHistoryCache, FeeHistoryCacheConfig, GasCap, GasPriceOracle, + GasPriceOracleConfig, }; use reth_rpc_server_types::constants::{ DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, }; -use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner, TokioTaskExecutor}; use std::sync::Arc; @@ -20,41 +25,90 @@ use std::sync::Arc; /// This builder type contains all settings to create an [`EthApiInner`] or an [`EthApi`] instance /// directly. #[derive(Debug)] -pub struct EthApiBuilder -where - Provider: BlockReaderIdExt, -{ - provider: Provider, - pool: Pool, - network: Network, - evm_config: EvmConfig, +pub struct EthApiBuilder { + components: N, + rpc_converter: Rpc, gas_cap: GasCap, max_simulate_blocks: u64, eth_proof_window: u64, fee_history_cache_config: FeeHistoryCacheConfig, proof_permits: usize, eth_state_cache_config: EthStateCacheConfig, - eth_cache: Option>, + eth_cache: Option>, gas_oracle_config: GasPriceOracleConfig, - gas_oracle: Option>, + gas_oracle: Option>, blocking_task_pool: Option, task_spawner: Box, + next_env: NextEnv, } -impl EthApiBuilder +impl + EthApiBuilder< + RpcNodeCoreAdapter, + RpcConverter>, + > where - Provider: BlockReaderIdExt, + RpcNodeCoreAdapter: + RpcNodeCore, Evm = EvmConfig>, { /// Creates a new `EthApiBuilder` instance. - pub fn new(provider: Provider, pool: Pool, network: Network, evm_config: EvmConfig) -> Self + pub fn new(provider: Provider, pool: Pool, network: Network, evm_config: EvmConfig) -> Self { + Self::new_with_components(RpcNodeCoreAdapter::new(provider, pool, network, evm_config)) + } +} + +impl EthApiBuilder { + /// Converts the RPC converter type of this builder + pub fn map_converter(self, f: F) -> EthApiBuilder where - Provider: BlockReaderIdExt, + F: FnOnce(Rpc) -> R, { + let Self { + components, + rpc_converter, + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle_config, + gas_oracle, + blocking_task_pool, + task_spawner, + next_env, + } = self; + EthApiBuilder { + components, + rpc_converter: f(rpc_converter), + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle_config, + gas_oracle, + blocking_task_pool, + task_spawner, + next_env, + } + } +} + +impl EthApiBuilder>> +where + N: RpcNodeCore>, +{ + /// Creates a new `EthApiBuilder` instance with the provided components. + pub fn new_with_components(components: N) -> Self { + let rpc_converter = + RpcConverter::new(EthReceiptConverter::new(components.provider().chain_spec())); Self { - provider, - pool, - network, - evm_config, + components, + rpc_converter, eth_cache: None, gas_oracle: None, gas_cap: GasCap::default(), @@ -66,15 +120,99 @@ where task_spawner: TokioTaskExecutor::default().boxed(), gas_oracle_config: Default::default(), eth_state_cache_config: Default::default(), + next_env: Default::default(), } } +} +impl EthApiBuilder +where + N: RpcNodeCore, +{ /// Configures the task spawner used to spawn additional tasks. pub fn task_spawner(mut self, spawner: impl TaskSpawner + 'static) -> Self { self.task_spawner = Box::new(spawner); self } + /// Changes the configured converter. + pub fn with_rpc_converter( + self, + rpc_converter: RpcNew, + ) -> EthApiBuilder { + let Self { + components, + rpc_converter: _, + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle, + blocking_task_pool, + task_spawner, + gas_oracle_config, + next_env, + } = self; + EthApiBuilder { + components, + rpc_converter, + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle, + blocking_task_pool, + task_spawner, + gas_oracle_config, + next_env, + } + } + + /// Changes the configured pending environment builder. + pub fn with_pending_env_builder( + self, + next_env: NextEnvNew, + ) -> EthApiBuilder { + let Self { + components, + rpc_converter, + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle, + blocking_task_pool, + task_spawner, + gas_oracle_config, + next_env: _, + } = self; + EthApiBuilder { + components, + rpc_converter, + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle, + blocking_task_pool, + task_spawner, + gas_oracle_config, + next_env, + } + } + /// Sets `eth_cache` config for the cache that will be used if no [`EthStateCache`] is /// configured. pub const fn eth_state_cache_config( @@ -86,10 +224,7 @@ where } /// Sets `eth_cache` instance - pub fn eth_cache( - mut self, - eth_cache: EthStateCache, - ) -> Self { + pub fn eth_cache(mut self, eth_cache: EthStateCache) -> Self { self.eth_cache = Some(eth_cache); self } @@ -102,7 +237,7 @@ where } /// Sets `gas_oracle` instance - pub fn gas_oracle(mut self, gas_oracle: GasPriceOracle) -> Self { + pub fn gas_oracle(mut self, gas_oracle: GasPriceOracle) -> Self { self.gas_oracle = Some(gas_oracle); self } @@ -154,26 +289,14 @@ where /// /// This function panics if the blocking task pool cannot be built. /// This will panic if called outside the context of a Tokio runtime. - pub fn build_inner(self) -> EthApiInner + pub fn build_inner(self) -> EthApiInner where - Provider: BlockReaderIdExt - + StateProviderFactory - + ChainSpecProvider - + CanonStateSubscriptions< - Primitives: NodePrimitives< - Block = Provider::Block, - Receipt = Provider::Receipt, - BlockHeader = Provider::Header, - >, - > + Clone - + Unpin - + 'static, + Rpc: RpcConvert, + NextEnv: PendingEnvBuilder, { let Self { - provider, - pool, - network, - evm_config, + components, + rpc_converter, eth_state_cache_config, gas_oracle_config, eth_cache, @@ -185,29 +308,30 @@ where fee_history_cache_config, proof_permits, task_spawner, + next_env, } = self; + let provider = components.provider().clone(); + let eth_cache = eth_cache .unwrap_or_else(|| EthStateCache::spawn(provider.clone(), eth_state_cache_config)); let gas_oracle = gas_oracle.unwrap_or_else(|| { GasPriceOracle::new(provider.clone(), gas_oracle_config, eth_cache.clone()) }); - let fee_history_cache = FeeHistoryCache::::new(fee_history_cache_config); + let fee_history_cache = + FeeHistoryCache::>::new(fee_history_cache_config); let new_canonical_blocks = provider.canonical_state_stream(); let fhc = fee_history_cache.clone(); let cache = eth_cache.clone(); - let prov = provider.clone(); task_spawner.spawn_critical( "cache canonical blocks for fee history task", Box::pin(async move { - fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, prov, cache).await; + fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider, cache).await; }), ); EthApiInner::new( - provider, - pool, - network, + components, eth_cache, gas_oracle, gas_cap, @@ -217,9 +341,10 @@ where BlockingTaskPool::build().expect("failed to build blocking task pool") }), fee_history_cache, - evm_config, task_spawner, proof_permits, + rpc_converter, + next_env, ) } @@ -231,21 +356,11 @@ where /// /// This function panics if the blocking task pool cannot be built. /// This will panic if called outside the context of a Tokio runtime. - pub fn build(self) -> EthApi + pub fn build(self) -> EthApi where - Provider: BlockReaderIdExt - + StateProviderFactory - + CanonStateSubscriptions< - Primitives: NodePrimitives< - Block = Provider::Block, - Receipt = Provider::Receipt, - BlockHeader = Provider::Header, - >, - > + ChainSpecProvider - + Clone - + Unpin - + 'static, + Rpc: RpcConvert, + NextEnv: PendingEnvBuilder, { - EthApi { inner: Arc::new(self.build_inner()), tx_resp_builder: Default::default() } + EthApi { inner: Arc::new(self.build_inner()) } } } diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 8a0683b7636..f43d787f169 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -274,7 +274,7 @@ where } } -/// Container type for `EthBundle` internals +/// Container type for `EthBundle` internals #[derive(Debug)] struct EthBundleInner { /// Access to commonly used code of the `eth` namespace diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index f6cceee46e0..ffad40b0117 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -9,42 +9,43 @@ use alloy_eips::BlockNumberOrTag; use alloy_network::Ethereum; use alloy_primitives::{Bytes, U256}; use derive_more::Deref; +use reth_chainspec::{ChainSpec, ChainSpecProvider}; +use reth_evm_ethereum::EthEvmConfig; +use reth_network_api::noop::NoopNetwork; use reth_node_api::{FullNodeComponents, FullNodeTypes}; +use reth_rpc_convert::{RpcConvert, RpcConverter}; use reth_rpc_eth_api::{ - helpers::{EthSigner, SpawnBlocking}, - node::RpcNodeCoreExt, + helpers::{pending_block::PendingEnvBuilder, spec::SignersForRpc, SpawnBlocking}, + node::{RpcNodeCoreAdapter, RpcNodeCoreExt}, EthApiTypes, RpcNodeCore, }; use reth_rpc_eth_types::{ - EthApiError, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, PendingBlock, -}; -use reth_storage_api::{ - BlockReader, BlockReaderIdExt, NodePrimitivesProvider, ProviderBlock, ProviderHeader, - ProviderReceipt, + receipt::EthReceiptConverter, EthApiError, EthStateCache, FeeHistoryCache, GasCap, + GasPriceOracle, PendingBlock, }; +use reth_storage_api::{noop::NoopProvider, BlockReaderIdExt, ProviderHeader}; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, TokioTaskExecutor, }; +use reth_transaction_pool::noop::NoopTransactionPool; use tokio::sync::{broadcast, Mutex}; const DEFAULT_BROADCAST_CAPACITY: usize = 2000; -/// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. -pub type EthApiFor = EthApi< - ::Provider, - ::Pool, - ::Network, +/// Helper type alias for [`RpcConverter`] with components from the given [`FullNodeComponents`]. +pub type EthRpcConverterFor = RpcConverter< + NetworkT, ::Evm, + EthReceiptConverter<<::Provider as ChainSpecProvider>::ChainSpec>, >; /// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. -pub type EthApiBuilderFor = EthApiBuilder< - ::Provider, - ::Pool, - ::Network, - ::Evm, ->; +pub type EthApiFor = EthApi>; + +/// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. +pub type EthApiBuilderFor = + EthApiBuilder>; /// `Eth` API implementation. /// @@ -61,26 +62,27 @@ pub type EthApiBuilderFor = EthApiBuilder< /// While this type requires various unrestricted generic components, trait bounds are enforced when /// additional traits are implemented for this type. #[derive(Deref)] -pub struct EthApi { +pub struct EthApi { /// All nested fields bundled together. #[deref] - pub(super) inner: Arc>, - /// Transaction RPC response builder. - pub tx_resp_builder: EthRpcConverter, + pub(super) inner: Arc>, } -impl Clone for EthApi +impl Clone for EthApi where - Provider: BlockReader, + N: RpcNodeCore, + Rpc: RpcConvert, { fn clone(&self) -> Self { - Self { inner: self.inner.clone(), tx_resp_builder: self.tx_resp_builder.clone() } + Self { inner: self.inner.clone() } } } -impl EthApi -where - Provider: BlockReaderIdExt, +impl + EthApi< + RpcNodeCoreAdapter, + EthRpcConverter, + > { /// Convenience fn to obtain a new [`EthApiBuilder`] instance with mandatory components. /// @@ -94,6 +96,7 @@ where /// # Create an instance with noop ethereum implementations /// /// ```no_run + /// use alloy_network::Ethereum; /// use reth_evm_ethereum::EthEvmConfig; /// use reth_network_api::noop::NoopNetwork; /// use reth_provider::noop::NoopProvider; @@ -107,35 +110,46 @@ where /// ) /// .build(); /// ``` - pub fn builder( + #[expect(clippy::type_complexity)] + pub fn builder( provider: Provider, pool: Pool, network: Network, evm_config: EvmConfig, - ) -> EthApiBuilder { + ) -> EthApiBuilder< + RpcNodeCoreAdapter, + RpcConverter>, + > + where + RpcNodeCoreAdapter: + RpcNodeCore, Evm = EvmConfig>, + { EthApiBuilder::new(provider, pool, network, evm_config) } +} +impl EthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, + (): PendingEnvBuilder, +{ /// Creates a new, shareable instance using the default tokio task spawner. #[expect(clippy::too_many_arguments)] pub fn new( - provider: Provider, - pool: Pool, - network: Network, - eth_cache: EthStateCache, - gas_oracle: GasPriceOracle, + components: N, + eth_cache: EthStateCache, + gas_oracle: GasPriceOracle, gas_cap: impl Into, max_simulate_blocks: u64, eth_proof_window: u64, blocking_task_pool: BlockingTaskPool, - fee_history_cache: FeeHistoryCache>, - evm_config: EvmConfig, + fee_history_cache: FeeHistoryCache>, proof_permits: usize, + rpc_converter: Rpc, ) -> Self { let inner = EthApiInner::new( - provider, - pool, - network, + components, eth_cache, gas_oracle, gas_cap, @@ -143,42 +157,40 @@ where eth_proof_window, blocking_task_pool, fee_history_cache, - evm_config, TokioTaskExecutor::default().boxed(), proof_permits, + rpc_converter, + (), ); - Self { inner: Arc::new(inner), tx_resp_builder: Default::default() } + Self { inner: Arc::new(inner) } } } -impl EthApiTypes for EthApi +impl EthApiTypes for EthApi where - Self: Send + Sync, - Provider: BlockReader, + N: RpcNodeCore, + Rpc: RpcConvert, { type Error = EthApiError; - type NetworkTypes = Ethereum; - type RpcConvert = EthRpcConverter; + type NetworkTypes = Rpc::Network; + type RpcConvert = Rpc; fn tx_resp_builder(&self) -> &Self::RpcConvert { &self.tx_resp_builder } } -impl RpcNodeCore for EthApi +impl RpcNodeCore for EthApi where - Provider: BlockReader + NodePrimitivesProvider + Clone + Unpin, - Pool: Send + Sync + Clone + Unpin, - Network: Send + Sync + Clone, - EvmConfig: Send + Sync + Clone + Unpin, + N: RpcNodeCore, + Rpc: RpcConvert, { - type Primitives = Provider::Primitives; - type Provider = Provider; - type Pool = Pool; - type Evm = EvmConfig; - type Network = Network; - type PayloadBuilder = (); + type Primitives = N::Primitives; + type Provider = N::Provider; + type Pool = N::Pool; + type Evm = N::Evm; + type Network = N::Network; fn pool(&self) -> &Self::Pool { self.inner.pool() @@ -192,44 +204,36 @@ where self.inner.network() } - fn payload_builder(&self) -> &Self::PayloadBuilder { - &() - } - fn provider(&self) -> &Self::Provider { self.inner.provider() } } -impl RpcNodeCoreExt - for EthApi +impl RpcNodeCoreExt for EthApi where - Provider: BlockReader + NodePrimitivesProvider + Clone + Unpin, - Pool: Send + Sync + Clone + Unpin, - Network: Send + Sync + Clone, - EvmConfig: Send + Sync + Clone + Unpin, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] - fn cache(&self) -> &EthStateCache, ProviderReceipt> { + fn cache(&self) -> &EthStateCache { self.inner.cache() } } -impl std::fmt::Debug - for EthApi +impl std::fmt::Debug for EthApi where - Provider: BlockReader, + N: RpcNodeCore, + Rpc: RpcConvert, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthApi").finish_non_exhaustive() } } -impl SpawnBlocking - for EthApi +impl SpawnBlocking for EthApi where - Self: Clone + Send + Sync + 'static, - Provider: BlockReader, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -249,19 +253,15 @@ where /// Container type `EthApi` #[expect(missing_debug_implementations)] -pub struct EthApiInner { - /// The transaction pool. - pool: Pool, - /// The provider that can interact with the chain. - provider: Provider, - /// An interface to interact with the network - network: Network, +pub struct EthApiInner { + /// The components of the node. + components: N, /// All configured Signers - signers: parking_lot::RwLock>>>, + signers: SignersForRpc, /// The async cache frontend for eth related data - eth_cache: EthStateCache, + eth_cache: EthStateCache, /// The async gas oracle frontend for gas price suggestions - gas_oracle: GasPriceOracle, + gas_oracle: GasPriceOracle, /// Maximum gas limit for `eth_call` and call tracing RPC methods. gas_cap: u64, /// Maximum number of blocks for `eth_simulateV1`. @@ -273,46 +273,51 @@ pub struct EthApiInner { /// The type that can spawn tasks which would otherwise block. task_spawner: Box, /// Cached pending block if any - pending_block: Mutex>>, + pending_block: Mutex>>, /// A pool dedicated to CPU heavy blocking tasks. blocking_task_pool: BlockingTaskPool, /// Cache for block fees history - fee_history_cache: FeeHistoryCache>, - /// The type that defines how to configure the EVM - evm_config: EvmConfig, + fee_history_cache: FeeHistoryCache>, /// Guard for getproof calls blocking_task_guard: BlockingTaskGuard, /// Transaction broadcast channel raw_tx_sender: broadcast::Sender, + + /// Converter for RPC types. + tx_resp_builder: Rpc, + + /// Builder for pending block environment. + next_env_builder: Box>, } -impl EthApiInner +impl EthApiInner where - Provider: BlockReaderIdExt, + N: RpcNodeCore, + Rpc: RpcConvert, { /// Creates a new, shareable instance using the default tokio task spawner. #[expect(clippy::too_many_arguments)] pub fn new( - provider: Provider, - pool: Pool, - network: Network, - eth_cache: EthStateCache, - gas_oracle: GasPriceOracle, + components: N, + eth_cache: EthStateCache, + gas_oracle: GasPriceOracle, gas_cap: impl Into, max_simulate_blocks: u64, eth_proof_window: u64, blocking_task_pool: BlockingTaskPool, - fee_history_cache: FeeHistoryCache>, - evm_config: EvmConfig, + fee_history_cache: FeeHistoryCache>, task_spawner: Box, proof_permits: usize, + tx_resp_builder: Rpc, + next_env: impl PendingEnvBuilder, ) -> Self { let signers = parking_lot::RwLock::new(Default::default()); // get the block number of the latest block let starting_block = U256::from( - provider + components + .provider() .header_by_number_or_tag(BlockNumberOrTag::Latest) .ok() .flatten() @@ -323,9 +328,7 @@ where let (raw_tx_sender, _) = broadcast::channel(DEFAULT_BROADCAST_CAPACITY); Self { - provider, - pool, - network, + components, signers, eth_cache, gas_oracle, @@ -337,37 +340,50 @@ where pending_block: Default::default(), blocking_task_pool, fee_history_cache, - evm_config, blocking_task_guard: BlockingTaskGuard::new(proof_permits), raw_tx_sender, + tx_resp_builder, + next_env_builder: Box::new(next_env), } } } -impl EthApiInner +impl EthApiInner where - Provider: BlockReader, + N: RpcNodeCore, + Rpc: RpcConvert, { /// Returns a handle to data on disk. #[inline] - pub const fn provider(&self) -> &Provider { - &self.provider + pub fn provider(&self) -> &N::Provider { + self.components.provider() + } + + /// Returns a handle to the transaction response builder. + #[inline] + pub const fn tx_resp_builder(&self) -> &Rpc { + &self.tx_resp_builder } /// Returns a handle to data in memory. #[inline] - pub const fn cache(&self) -> &EthStateCache { + pub const fn cache(&self) -> &EthStateCache { &self.eth_cache } /// Returns a handle to the pending block. #[inline] - pub const fn pending_block( - &self, - ) -> &Mutex>> { + pub const fn pending_block(&self) -> &Mutex>> { &self.pending_block } + /// Returns a type that knows how to build a [`reth_evm::ConfigureEvm::NextBlockEnvCtx`] for a + /// pending block. + #[inline] + pub const fn pending_env_builder(&self) -> &dyn PendingEnvBuilder { + &*self.next_env_builder + } + /// Returns a handle to the task spawner. #[inline] pub const fn task_spawner(&self) -> &dyn TaskSpawner { @@ -382,14 +398,14 @@ where /// Returns a handle to the EVM config. #[inline] - pub const fn evm_config(&self) -> &EvmConfig { - &self.evm_config + pub fn evm_config(&self) -> &N::Evm { + self.components.evm_config() } /// Returns a handle to the transaction pool. #[inline] - pub const fn pool(&self) -> &Pool { - &self.pool + pub fn pool(&self) -> &N::Pool { + self.components.pool() } /// Returns the gas cap. @@ -406,21 +422,19 @@ where /// Returns a handle to the gas oracle. #[inline] - pub const fn gas_oracle(&self) -> &GasPriceOracle { + pub const fn gas_oracle(&self) -> &GasPriceOracle { &self.gas_oracle } /// Returns a handle to the fee history cache. #[inline] - pub const fn fee_history_cache(&self) -> &FeeHistoryCache> { + pub const fn fee_history_cache(&self) -> &FeeHistoryCache> { &self.fee_history_cache } /// Returns a handle to the signers. #[inline] - pub const fn signers( - &self, - ) -> &parking_lot::RwLock>>> { + pub const fn signers(&self) -> &SignersForRpc { &self.signers } @@ -432,8 +446,8 @@ where /// Returns the inner `Network` #[inline] - pub const fn network(&self) -> &Network { - &self.network + pub fn network(&self) -> &N::Network { + self.components.network() } /// The maximum number of blocks into the past for generating state proofs. @@ -463,7 +477,7 @@ where #[cfg(test)] mod tests { - use crate::{EthApi, EthApiBuilder}; + use crate::{eth::helpers::types::EthRpcConverter, EthApi, EthApiBuilder}; use alloy_consensus::{Block, BlockBody, Header}; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{Signature, B256, U64}; @@ -475,27 +489,37 @@ mod tests { use reth_ethereum_primitives::TransactionSigned; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_provider::test_utils::{MockEthProvider, NoopProvider}; - use reth_rpc_eth_api::EthApiServer; + use reth_provider::{ + test_utils::{MockEthProvider, NoopProvider}, + StageCheckpointReader, + }; + use reth_rpc_eth_api::{node::RpcNodeCoreAdapter, EthApiServer}; use reth_storage_api::{BlockReader, BlockReaderIdExt, StateProviderFactory}; use reth_testing_utils::generators; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; + type FakeEthApi

= EthApi< + RpcNodeCoreAdapter, + EthRpcConverter, + >; + fn build_test_eth_api< P: BlockReaderIdExt< Block = reth_ethereum_primitives::Block, Receipt = reth_ethereum_primitives::Receipt, Header = alloy_consensus::Header, + Transaction = reth_ethereum_primitives::TransactionSigned, > + BlockReader + ChainSpecProvider + StateProviderFactory + CanonStateSubscriptions + + StageCheckpointReader + Unpin + Clone + 'static, >( provider: P, - ) -> EthApi { + ) -> FakeEthApi

{ EthApiBuilder::new( provider.clone(), testing_pool(), @@ -511,7 +535,7 @@ mod tests { mut oldest_block: Option, block_count: u64, mock_provider: MockEthProvider, - ) -> (EthApi, Vec, Vec) { + ) -> (FakeEthApi, Vec, Vec) { let mut rng = generators::rng(); // Build mock data @@ -597,7 +621,7 @@ mod tests { /// Invalid block range #[tokio::test] async fn test_fee_history_empty() { - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( &build_test_eth_api(NoopProvider::default()), U64::from(1), BlockNumberOrTag::Latest, @@ -619,7 +643,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(newest_block + 1), newest_block.into(), @@ -642,7 +666,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(1), (newest_block + 1000).into(), @@ -665,7 +689,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(0), newest_block.into(), diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index d672fd10f6e..b995d389dce 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1,19 +1,20 @@ //! `eth_` `Filter` RPC handler implementation use alloy_consensus::BlockHeader; -use alloy_primitives::TxHash; +use alloy_primitives::{Sealable, TxHash}; use alloy_rpc_types_eth::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, Log, PendingTransactionFilterKind, }; use async_trait::async_trait; use futures::future::TryFutureExt; +use itertools::Itertools; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_errors::ProviderError; -use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::{NodePrimitives, SealedHeader}; use reth_rpc_eth_api::{ EngineEthFilter, EthApiTypes, EthFilterApiServer, FullEthApiTypes, QueryLimits, RpcConvert, - RpcNodeCore, RpcNodeCoreExt, RpcTransaction, + RpcNodeCoreExt, RpcTransaction, }; use reth_rpc_eth_types::{ logs_utils::{self, append_matching_block_logs, ProviderOrBlock}, @@ -22,15 +23,15 @@ use reth_rpc_eth_types::{ use reth_rpc_server_types::{result::rpc_error_with_code, ToRpcResult}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, HeaderProvider, ProviderBlock, - ProviderReceipt, TransactionsProvider, + ProviderReceipt, ReceiptProvider, }; use reth_tasks::TaskSpawner; use reth_transaction_pool::{NewSubpoolTransactionStream, PoolTransaction, TransactionPool}; use std::{ - collections::HashMap, + collections::{HashMap, VecDeque}, fmt, future::Future, - iter::StepBy, + iter::{Peekable, StepBy}, ops::RangeInclusive, sync::Arc, time::{Duration, Instant}, @@ -39,7 +40,7 @@ use tokio::{ sync::{mpsc::Receiver, oneshot, Mutex}, time::MissedTickBehavior, }; -use tracing::{error, trace}; +use tracing::{debug, error, trace}; impl EngineEthFilter for EthFilter where @@ -56,9 +57,27 @@ where } } +/// Threshold for deciding between cached and range mode processing +const CACHED_MODE_BLOCK_THRESHOLD: u64 = 250; + +/// Threshold for bloom filter matches that triggers reduced caching +const HIGH_BLOOM_MATCH_THRESHOLD: usize = 20; + +/// Threshold for bloom filter matches that triggers moderately reduced caching +const MODERATE_BLOOM_MATCH_THRESHOLD: usize = 10; + +/// Minimum block count to apply bloom filter match adjustments +const BLOOM_ADJUSTMENT_MIN_BLOCKS: u64 = 100; + /// The maximum number of headers we read at once when handling a range filter. const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb +/// Threshold for enabling parallel processing in range mode +const PARALLEL_PROCESSING_THRESHOLD: u64 = 1000; + +/// Default concurrency for parallel processing +const DEFAULT_PARALLEL_CONCURRENCY: usize = 4; + /// `Eth` filter RPC implementation. /// /// This type handles `eth_` rpc requests related to filters (`eth_getLogs`). @@ -293,13 +312,7 @@ where #[async_trait] impl EthFilterApiServer> for EthFilter where - Eth: FullEthApiTypes - + RpcNodeCoreExt< - Provider: BlockIdReader, - Primitives: NodePrimitives< - SignedTx = <::Provider as TransactionsProvider>::Transaction, - >, - > + 'static, + Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, { /// Handler for `eth_newFilter` async fn new_filter(&self, filter: Filter) -> RpcResult { @@ -426,9 +439,7 @@ where } /// Access the underlying [`EthStateCache`]. - fn eth_cache( - &self, - ) -> &EthStateCache, ProviderReceipt> { + fn eth_cache(&self) -> &EthStateCache { self.eth_api.cache() } @@ -563,63 +574,93 @@ where /// Returns an error if: /// - underlying database error async fn get_logs_in_block_range_inner( - &self, + self: Arc, filter: &Filter, from_block: u64, to_block: u64, limits: QueryLimits, ) -> Result, EthFilterError> { let mut all_logs = Vec::new(); + let mut matching_headers = Vec::new(); - // loop over the range of new blocks and check logs if the filter matches the log's bloom - // filter + // get current chain tip to determine processing mode + let chain_tip = self.provider().best_block_number()?; + + // first collect all headers that match the bloom filter for cached mode decision for (from, to) in BlockRangeInclusiveIter::new(from_block..=to_block, self.max_headers_range) { let headers = self.provider().headers_range(from..=to)?; - for (idx, header) in headers - .iter() - .enumerate() - .filter(|(_, header)| filter.matches_bloom(header.logs_bloom())) - { - // these are consecutive headers, so we can use the parent hash of the next - // block to get the current header's hash - let block_hash = match headers.get(idx + 1) { - Some(child) => child.parent_hash(), - None => self - .provider() - .block_hash(header.number())? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?, - }; - let num_hash = BlockNumHash::new(header.number(), block_hash); - if let Some((receipts, maybe_block)) = - self.eth_cache().get_receipts_and_maybe_block(num_hash.hash).await? - { - append_matching_block_logs( - &mut all_logs, - maybe_block - .map(ProviderOrBlock::Block) - .unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())), - filter, - num_hash, - &receipts, - false, - header.timestamp(), - )?; + let mut headers_iter = headers.into_iter().peekable(); + + while let Some(header) = headers_iter.next() { + if !filter.matches_bloom(header.logs_bloom()) { + continue + } + + let current_number = header.number(); - // size check but only if range is multiple blocks, so we always return all - // logs of a single block - let is_multi_block_range = from_block != to_block; - if let Some(max_logs_per_response) = limits.max_logs_per_response { - if is_multi_block_range && all_logs.len() > max_logs_per_response { - return Err(EthFilterError::QueryExceedsMaxResults { - max_logs: max_logs_per_response, - from_block, - to_block: num_hash.number.saturating_sub(1), - }); - } + let block_hash = match headers_iter.peek() { + Some(next_header) if next_header.number() == current_number + 1 => { + // Headers are consecutive, use the more efficient parent_hash + next_header.parent_hash() } + _ => { + // Headers not consecutive or last header, calculate hash + header.hash_slow() + } + }; + + matching_headers.push(SealedHeader::new(header, block_hash)); + } + } + + // initialize the appropriate range mode based on collected headers + let mut range_mode = RangeMode::new( + self.clone(), + matching_headers, + from_block, + to_block, + self.max_headers_range, + chain_tip, + ); + + // iterate through the range mode to get receipts and blocks + while let Some(ReceiptBlockResult { receipts, recovered_block, header }) = + range_mode.next().await? + { + let num_hash = header.num_hash(); + append_matching_block_logs( + &mut all_logs, + recovered_block + .map(ProviderOrBlock::Block) + .unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())), + filter, + num_hash, + &receipts, + false, + header.timestamp(), + )?; + + // size check but only if range is multiple blocks, so we always return all + // logs of a single block + let is_multi_block_range = from_block != to_block; + if let Some(max_logs_per_response) = limits.max_logs_per_response { + if is_multi_block_range && all_logs.len() > max_logs_per_response { + debug!( + target: "rpc::eth::filter", + logs_found = all_logs.len(), + max_logs_per_response, + from_block, + to_block = num_hash.number.saturating_sub(1), + "Query exceeded max logs per response limit" + ); + return Err(EthFilterError::QueryExceedsMaxResults { + max_logs: max_logs_per_response, + from_block, + to_block: num_hash.number.saturating_sub(1), + }); } } } @@ -842,11 +883,298 @@ impl From for EthFilterError { } } +/// Helper type for the common pattern of returning receipts, block and the original header that is +/// a match for the filter. +struct ReceiptBlockResult

+where + P: ReceiptProvider + BlockReader, +{ + /// We always need the entire receipts for the matching block. + receipts: Arc>>, + /// Block can be optional and we can fetch it lazily when needed. + recovered_block: Option>>>, + /// The header of the block. + header: SealedHeader<

::Header>, +} + +/// Represents different modes for processing block ranges when filtering logs +enum RangeMode< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, +> { + /// Use cache-based processing for recent blocks + Cached(CachedMode), + /// Use range-based processing for older blocks + Range(RangeBlockMode), +} + +impl< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, + > RangeMode +{ + /// Creates a new `RangeMode`. + fn new( + filter_inner: Arc>, + sealed_headers: Vec::Header>>, + from_block: u64, + to_block: u64, + max_headers_range: u64, + chain_tip: u64, + ) -> Self { + let block_count = to_block - from_block + 1; + let distance_from_tip = chain_tip.saturating_sub(to_block); + + // Determine if we should use cached mode based on range characteristics + let use_cached_mode = + Self::should_use_cached_mode(&sealed_headers, block_count, distance_from_tip); + + if use_cached_mode && !sealed_headers.is_empty() { + Self::Cached(CachedMode { filter_inner, headers_iter: sealed_headers.into_iter() }) + } else { + Self::Range(RangeBlockMode { + filter_inner, + iter: sealed_headers.into_iter().peekable(), + next: VecDeque::new(), + max_range: max_headers_range as usize, + }) + } + } + + /// Determines whether to use cached mode based on bloom filter matches and range size + const fn should_use_cached_mode( + headers: &[SealedHeader<::Header>], + block_count: u64, + distance_from_tip: u64, + ) -> bool { + // Headers are already filtered by bloom, so count equals length + let bloom_matches = headers.len(); + + // Calculate adjusted threshold based on bloom matches + let adjusted_threshold = Self::calculate_adjusted_threshold(block_count, bloom_matches); + + block_count <= adjusted_threshold && distance_from_tip <= adjusted_threshold + } + + /// Calculates the adjusted cache threshold based on bloom filter matches + const fn calculate_adjusted_threshold(block_count: u64, bloom_matches: usize) -> u64 { + // Only apply adjustments for larger ranges + if block_count <= BLOOM_ADJUSTMENT_MIN_BLOCKS { + return CACHED_MODE_BLOCK_THRESHOLD; + } + + match bloom_matches { + n if n > HIGH_BLOOM_MATCH_THRESHOLD => CACHED_MODE_BLOCK_THRESHOLD / 2, + n if n > MODERATE_BLOOM_MATCH_THRESHOLD => (CACHED_MODE_BLOCK_THRESHOLD * 3) / 4, + _ => CACHED_MODE_BLOCK_THRESHOLD, + } + } + + /// Gets the next (receipts, `maybe_block`, header, `block_hash`) tuple. + async fn next(&mut self) -> Result>, EthFilterError> { + match self { + Self::Cached(cached) => cached.next().await, + Self::Range(range) => range.next().await, + } + } +} + +/// Mode for processing blocks using cache optimization for recent blocks +struct CachedMode< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, +> { + filter_inner: Arc>, + headers_iter: std::vec::IntoIter::Header>>, +} + +impl< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, + > CachedMode +{ + async fn next(&mut self) -> Result>, EthFilterError> { + for header in self.headers_iter.by_ref() { + // Use get_receipts_and_maybe_block which has automatic fallback to provider + if let Some((receipts, maybe_block)) = + self.filter_inner.eth_cache().get_receipts_and_maybe_block(header.hash()).await? + { + return Ok(Some(ReceiptBlockResult { + receipts, + recovered_block: maybe_block, + header, + })); + } + } + + Ok(None) // No more headers + } +} + +/// Mode for processing blocks using range queries for older blocks +struct RangeBlockMode< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, +> { + filter_inner: Arc>, + iter: Peekable::Header>>>, + next: VecDeque>, + max_range: usize, +} + +impl< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, + > RangeBlockMode +{ + async fn next(&mut self) -> Result>, EthFilterError> { + if let Some(result) = self.next.pop_front() { + return Ok(Some(result)); + } + + let Some(next_header) = self.iter.next() else { + return Ok(None); + }; + + let mut range_headers = Vec::with_capacity(self.max_range); + range_headers.push(next_header); + + // Collect consecutive blocks up to max_range size + while range_headers.len() < self.max_range { + let Some(peeked) = self.iter.peek() else { break }; + let Some(last_header) = range_headers.last() else { break }; + + let expected_next = last_header.header().number() + 1; + if peeked.header().number() != expected_next { + break; // Non-consecutive block, stop here + } + + let Some(next_header) = self.iter.next() else { break }; + range_headers.push(next_header); + } + + // Check if we should use parallel processing for large ranges + let remaining_headers = self.iter.len() + range_headers.len(); + if remaining_headers >= PARALLEL_PROCESSING_THRESHOLD as usize { + self.process_large_range(range_headers).await + } else { + self.process_small_range(range_headers).await + } + } + + /// Process small range headers + async fn process_small_range( + &mut self, + range_headers: Vec::Header>>, + ) -> Result>, EthFilterError> { + // Process each header individually to avoid queuing for all receipts + for header in range_headers { + // First check if already cached to avoid unnecessary provider calls + let (maybe_block, maybe_receipts) = self + .filter_inner + .eth_cache() + .maybe_cached_block_and_receipts(header.hash()) + .await?; + + let receipts = match maybe_receipts { + Some(receipts) => receipts, + None => { + // Not cached - fetch directly from provider without queuing + match self.filter_inner.provider().receipts_by_block(header.hash().into())? { + Some(receipts) => Arc::new(receipts), + None => continue, // No receipts found + } + } + }; + + if !receipts.is_empty() { + self.next.push_back(ReceiptBlockResult { + receipts, + recovered_block: maybe_block, + header, + }); + } + } + + Ok(self.next.pop_front()) + } + + /// Process large range headers + async fn process_large_range( + &mut self, + range_headers: Vec::Header>>, + ) -> Result>, EthFilterError> { + // Split headers into chunks + let chunk_size = std::cmp::max(range_headers.len() / DEFAULT_PARALLEL_CONCURRENCY, 1); + let header_chunks = range_headers + .into_iter() + .chunks(chunk_size) + .into_iter() + .map(|chunk| chunk.collect::>()) + .collect::>(); + + // Process chunks in parallel + let mut tasks = Vec::new(); + for chunk_headers in header_chunks { + let filter_inner = self.filter_inner.clone(); + let task = tokio::task::spawn_blocking(move || { + let mut chunk_results = Vec::new(); + + for header in chunk_headers { + // Fetch directly from provider - RangeMode is used for older blocks unlikely to + // be cached + let receipts = + match filter_inner.provider().receipts_by_block(header.hash().into())? { + Some(receipts) => Arc::new(receipts), + None => continue, // No receipts found + }; + + if !receipts.is_empty() { + chunk_results.push(ReceiptBlockResult { + receipts, + recovered_block: None, + header, + }); + } + } + + Ok::>, EthFilterError>(chunk_results) + }); + tasks.push(task); + } + + let results = futures::future::join_all(tasks).await; + for result in results { + match result { + Ok(Ok(chunk_results)) => { + for result in chunk_results { + self.next.push_back(result); + } + } + Ok(Err(e)) => return Err(e), + Err(_join_err) => { + return Err(EthFilterError::InternalError); + } + } + } + + Ok(self.next.pop_front()) + } +} + #[cfg(test)] mod tests { use super::*; + use crate::{eth::EthApi, EthApiBuilder}; + use alloy_network::Ethereum; + use alloy_primitives::FixedBytes; use rand::Rng; + use reth_chainspec::{ChainSpec, ChainSpecProvider}; + use reth_ethereum_primitives::TxType; + use reth_evm_ethereum::EthEvmConfig; + use reth_network_api::noop::NoopNetwork; + use reth_provider::test_utils::MockEthProvider; + use reth_rpc_convert::RpcConverter; + use reth_rpc_eth_api::node::RpcNodeCoreAdapter; + use reth_rpc_eth_types::receipt::EthReceiptConverter; + use reth_tasks::TokioTaskExecutor; use reth_testing_utils::generators; + use reth_transaction_pool::test_utils::{testing_pool, TestPool}; + use std::{collections::VecDeque, sync::Arc}; #[test] fn test_block_range_iter() { @@ -869,4 +1197,545 @@ mod tests { assert_eq!(end, *range.end()); } + + // Helper function to create a test EthApi instance + #[expect(clippy::type_complexity)] + fn build_test_eth_api( + provider: MockEthProvider, + ) -> EthApi< + RpcNodeCoreAdapter, + RpcConverter>, + > { + EthApiBuilder::new( + provider.clone(), + testing_pool(), + NoopNetwork::default(), + EthEvmConfig::new(provider.chain_spec()), + ) + .build() + } + + #[tokio::test] + async fn test_range_block_mode_empty_range() { + let provider = MockEthProvider::default(); + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![]; + let max_range = 100; + + let mut range_mode = RangeBlockMode { + filter_inner, + iter: headers.into_iter().peekable(), + next: VecDeque::new(), + max_range, + }; + + let result = range_mode.next().await; + assert!(result.is_ok()); + assert!(result.unwrap().is_none()); + } + + #[tokio::test] + async fn test_range_block_mode_queued_results_priority() { + let provider = MockEthProvider::default(); + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![ + SealedHeader::new( + alloy_consensus::Header { number: 100, ..Default::default() }, + FixedBytes::random(), + ), + SealedHeader::new( + alloy_consensus::Header { number: 101, ..Default::default() }, + FixedBytes::random(), + ), + ]; + + // create specific mock results to test ordering + let expected_block_hash_1 = FixedBytes::from([1u8; 32]); + let expected_block_hash_2 = FixedBytes::from([2u8; 32]); + + // create mock receipts to test receipt handling + let mock_receipt_1 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 100_000, + logs: vec![], + success: true, + }; + let mock_receipt_2 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Eip1559, + cumulative_gas_used: 200_000, + logs: vec![], + success: true, + }; + let mock_receipt_3 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Eip2930, + cumulative_gas_used: 150_000, + logs: vec![], + success: false, // Different success status + }; + + let mock_result_1 = ReceiptBlockResult { + receipts: Arc::new(vec![mock_receipt_1.clone(), mock_receipt_2.clone()]), + recovered_block: None, + header: SealedHeader::new( + alloy_consensus::Header { number: 42, ..Default::default() }, + expected_block_hash_1, + ), + }; + + let mock_result_2 = ReceiptBlockResult { + receipts: Arc::new(vec![mock_receipt_3.clone()]), + recovered_block: None, + header: SealedHeader::new( + alloy_consensus::Header { number: 43, ..Default::default() }, + expected_block_hash_2, + ), + }; + + let mut range_mode = RangeBlockMode { + filter_inner, + iter: headers.into_iter().peekable(), + next: VecDeque::from([mock_result_1, mock_result_2]), // Queue two results + max_range: 100, + }; + + // first call should return the first queued result (FIFO order) + let result1 = range_mode.next().await; + assert!(result1.is_ok()); + let receipt_result1 = result1.unwrap().unwrap(); + assert_eq!(receipt_result1.header.hash(), expected_block_hash_1); + assert_eq!(receipt_result1.header.number, 42); + + // verify receipts + assert_eq!(receipt_result1.receipts.len(), 2); + assert_eq!(receipt_result1.receipts[0].tx_type, mock_receipt_1.tx_type); + assert_eq!( + receipt_result1.receipts[0].cumulative_gas_used, + mock_receipt_1.cumulative_gas_used + ); + assert_eq!(receipt_result1.receipts[0].success, mock_receipt_1.success); + assert_eq!(receipt_result1.receipts[1].tx_type, mock_receipt_2.tx_type); + assert_eq!( + receipt_result1.receipts[1].cumulative_gas_used, + mock_receipt_2.cumulative_gas_used + ); + assert_eq!(receipt_result1.receipts[1].success, mock_receipt_2.success); + + // second call should return the second queued result + let result2 = range_mode.next().await; + assert!(result2.is_ok()); + let receipt_result2 = result2.unwrap().unwrap(); + assert_eq!(receipt_result2.header.hash(), expected_block_hash_2); + assert_eq!(receipt_result2.header.number, 43); + + // verify receipts + assert_eq!(receipt_result2.receipts.len(), 1); + assert_eq!(receipt_result2.receipts[0].tx_type, mock_receipt_3.tx_type); + assert_eq!( + receipt_result2.receipts[0].cumulative_gas_used, + mock_receipt_3.cumulative_gas_used + ); + assert_eq!(receipt_result2.receipts[0].success, mock_receipt_3.success); + + // queue should now be empty + assert!(range_mode.next.is_empty()); + + let result3 = range_mode.next().await; + assert!(result3.is_ok()); + } + + #[tokio::test] + async fn test_range_block_mode_single_block_no_receipts() { + let provider = MockEthProvider::default(); + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![SealedHeader::new( + alloy_consensus::Header { number: 100, ..Default::default() }, + FixedBytes::random(), + )]; + + let mut range_mode = RangeBlockMode { + filter_inner, + iter: headers.into_iter().peekable(), + next: VecDeque::new(), + max_range: 100, + }; + + let result = range_mode.next().await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_range_block_mode_provider_receipts() { + let provider = MockEthProvider::default(); + + let header_1 = alloy_consensus::Header { number: 100, ..Default::default() }; + let header_2 = alloy_consensus::Header { number: 101, ..Default::default() }; + let header_3 = alloy_consensus::Header { number: 102, ..Default::default() }; + + let block_hash_1 = FixedBytes::random(); + let block_hash_2 = FixedBytes::random(); + let block_hash_3 = FixedBytes::random(); + + provider.add_header(block_hash_1, header_1.clone()); + provider.add_header(block_hash_2, header_2.clone()); + provider.add_header(block_hash_3, header_3.clone()); + + // create mock receipts to test provider fetching with mock logs + let mock_log = alloy_primitives::Log { + address: alloy_primitives::Address::ZERO, + data: alloy_primitives::LogData::new_unchecked(vec![], alloy_primitives::Bytes::new()), + }; + + let receipt_100_1 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 21_000, + logs: vec![mock_log.clone()], + success: true, + }; + let receipt_100_2 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Eip1559, + cumulative_gas_used: 42_000, + logs: vec![mock_log.clone()], + success: true, + }; + let receipt_101_1 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Eip2930, + cumulative_gas_used: 30_000, + logs: vec![mock_log.clone()], + success: false, + }; + + provider.add_receipts(100, vec![receipt_100_1.clone(), receipt_100_2.clone()]); + provider.add_receipts(101, vec![receipt_101_1.clone()]); + + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![ + SealedHeader::new(header_1, block_hash_1), + SealedHeader::new(header_2, block_hash_2), + SealedHeader::new(header_3, block_hash_3), + ]; + + let mut range_mode = RangeBlockMode { + filter_inner, + iter: headers.into_iter().peekable(), + next: VecDeque::new(), + max_range: 3, // include the 3 blocks in the first queried results + }; + + // first call should fetch receipts from provider and return first block with receipts + let result = range_mode.next().await; + assert!(result.is_ok()); + let receipt_result = result.unwrap().unwrap(); + + assert_eq!(receipt_result.header.hash(), block_hash_1); + assert_eq!(receipt_result.header.number, 100); + assert_eq!(receipt_result.receipts.len(), 2); + + // verify receipts + assert_eq!(receipt_result.receipts[0].tx_type, receipt_100_1.tx_type); + assert_eq!( + receipt_result.receipts[0].cumulative_gas_used, + receipt_100_1.cumulative_gas_used + ); + assert_eq!(receipt_result.receipts[0].success, receipt_100_1.success); + + assert_eq!(receipt_result.receipts[1].tx_type, receipt_100_2.tx_type); + assert_eq!( + receipt_result.receipts[1].cumulative_gas_used, + receipt_100_2.cumulative_gas_used + ); + assert_eq!(receipt_result.receipts[1].success, receipt_100_2.success); + + // second call should return the second block with receipts + let result2 = range_mode.next().await; + assert!(result2.is_ok()); + let receipt_result2 = result2.unwrap().unwrap(); + + assert_eq!(receipt_result2.header.hash(), block_hash_2); + assert_eq!(receipt_result2.header.number, 101); + assert_eq!(receipt_result2.receipts.len(), 1); + + // verify receipts + assert_eq!(receipt_result2.receipts[0].tx_type, receipt_101_1.tx_type); + assert_eq!( + receipt_result2.receipts[0].cumulative_gas_used, + receipt_101_1.cumulative_gas_used + ); + assert_eq!(receipt_result2.receipts[0].success, receipt_101_1.success); + + // third call should return None since no more blocks with receipts + let result3 = range_mode.next().await; + assert!(result3.is_ok()); + assert!(result3.unwrap().is_none()); + } + + #[tokio::test] + async fn test_range_block_mode_iterator_exhaustion() { + let provider = MockEthProvider::default(); + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![ + SealedHeader::new( + alloy_consensus::Header { number: 100, ..Default::default() }, + FixedBytes::random(), + ), + SealedHeader::new( + alloy_consensus::Header { number: 101, ..Default::default() }, + FixedBytes::random(), + ), + ]; + + let mut range_mode = RangeBlockMode { + filter_inner, + iter: headers.into_iter().peekable(), + next: VecDeque::new(), + max_range: 1, + }; + + let result1 = range_mode.next().await; + assert!(result1.is_ok()); + + assert!(range_mode.iter.peek().is_some()); + + let result2 = range_mode.next().await; + assert!(result2.is_ok()); + + // now iterator should be exhausted + assert!(range_mode.iter.peek().is_none()); + + // further calls should return None + let result3 = range_mode.next().await; + assert!(result3.is_ok()); + assert!(result3.unwrap().is_none()); + } + + #[tokio::test] + async fn test_cached_mode_with_mock_receipts() { + // create test data + let test_hash = FixedBytes::from([42u8; 32]); + let test_block_number = 100u64; + let test_header = SealedHeader::new( + alloy_consensus::Header { + number: test_block_number, + gas_used: 50_000, + ..Default::default() + }, + test_hash, + ); + + // add a mock receipt to the provider with a mock log + let mock_log = alloy_primitives::Log { + address: alloy_primitives::Address::ZERO, + data: alloy_primitives::LogData::new_unchecked(vec![], alloy_primitives::Bytes::new()), + }; + + let mock_receipt = reth_ethereum_primitives::Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 21_000, + logs: vec![mock_log], + success: true, + }; + + let provider = MockEthProvider::default(); + provider.add_header(test_hash, test_header.header().clone()); + provider.add_receipts(test_block_number, vec![mock_receipt.clone()]); + + let eth_api = build_test_eth_api(provider); + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![test_header.clone()]; + + let mut cached_mode = CachedMode { filter_inner, headers_iter: headers.into_iter() }; + + // should find the receipt from provider fallback (cache will be empty) + let result = cached_mode.next().await.expect("next should succeed"); + let receipt_block_result = result.expect("should have receipt result"); + assert_eq!(receipt_block_result.header.hash(), test_hash); + assert_eq!(receipt_block_result.header.number, test_block_number); + assert_eq!(receipt_block_result.receipts.len(), 1); + assert_eq!(receipt_block_result.receipts[0].tx_type, mock_receipt.tx_type); + assert_eq!( + receipt_block_result.receipts[0].cumulative_gas_used, + mock_receipt.cumulative_gas_used + ); + assert_eq!(receipt_block_result.receipts[0].success, mock_receipt.success); + + // iterator should be exhausted + let result2 = cached_mode.next().await; + assert!(result2.is_ok()); + assert!(result2.unwrap().is_none()); + } + + #[tokio::test] + async fn test_cached_mode_empty_headers() { + let provider = MockEthProvider::default(); + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers: Vec> = vec![]; + + let mut cached_mode = CachedMode { filter_inner, headers_iter: headers.into_iter() }; + + // should immediately return None for empty headers + let result = cached_mode.next().await.expect("next should succeed"); + assert!(result.is_none()); + } + + #[tokio::test] + async fn test_non_consecutive_headers_after_bloom_filter() { + let provider = MockEthProvider::default(); + + // Create 4 headers where only blocks 100 and 102 will match bloom filter + let mut expected_hashes = vec![]; + let mut prev_hash = alloy_primitives::B256::default(); + + // Create a transaction for blocks that will have receipts + use alloy_consensus::TxLegacy; + use reth_ethereum_primitives::{TransactionSigned, TxType}; + + let tx_inner = TxLegacy { + chain_id: Some(1), + nonce: 0, + gas_price: 21_000, + gas_limit: 21_000, + to: alloy_primitives::TxKind::Call(alloy_primitives::Address::ZERO), + value: alloy_primitives::U256::ZERO, + input: alloy_primitives::Bytes::new(), + }; + let signature = alloy_primitives::Signature::test_signature(); + let tx = TransactionSigned::new_unhashed(tx_inner.into(), signature); + + for i in 100u64..=103 { + let header = alloy_consensus::Header { + number: i, + parent_hash: prev_hash, + // Set bloom to match filter only for blocks 100 and 102 + logs_bloom: if i == 100 || i == 102 { + alloy_primitives::Bloom::from([1u8; 256]) + } else { + alloy_primitives::Bloom::default() + }, + ..Default::default() + }; + + let hash = header.hash_slow(); + expected_hashes.push(hash); + prev_hash = hash; + + // Add transaction to blocks that will have receipts (100 and 102) + let transactions = if i == 100 || i == 102 { vec![tx.clone()] } else { vec![] }; + + let block = reth_ethereum_primitives::Block { + header, + body: reth_ethereum_primitives::BlockBody { transactions, ..Default::default() }, + }; + provider.add_block(hash, block); + } + + // Add receipts with logs only to blocks that match bloom + let mock_log = alloy_primitives::Log { + address: alloy_primitives::Address::ZERO, + data: alloy_primitives::LogData::new_unchecked(vec![], alloy_primitives::Bytes::new()), + }; + + let receipt = reth_ethereum_primitives::Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 21_000, + logs: vec![mock_log], + success: true, + }; + + provider.add_receipts(100, vec![receipt.clone()]); + provider.add_receipts(101, vec![]); + provider.add_receipts(102, vec![receipt.clone()]); + provider.add_receipts(103, vec![]); + + // Add block body indices for each block so receipts can be fetched + use reth_db_api::models::StoredBlockBodyIndices; + provider + .add_block_body_indices(100, StoredBlockBodyIndices { first_tx_num: 0, tx_count: 1 }); + provider + .add_block_body_indices(101, StoredBlockBodyIndices { first_tx_num: 1, tx_count: 0 }); + provider + .add_block_body_indices(102, StoredBlockBodyIndices { first_tx_num: 1, tx_count: 1 }); + provider + .add_block_body_indices(103, StoredBlockBodyIndices { first_tx_num: 2, tx_count: 0 }); + + let eth_api = build_test_eth_api(provider); + let eth_filter = EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + + // Use default filter which will match any non-empty bloom + let filter = Filter::default(); + + // Get logs in the range - this will trigger the bloom filtering + let logs = eth_filter + .inner + .clone() + .get_logs_in_block_range(filter, 100, 103, QueryLimits::default()) + .await + .expect("should succeed"); + + // We should get logs from blocks 100 and 102 only (bloom filtered) + assert_eq!(logs.len(), 2); + + assert_eq!(logs[0].block_number, Some(100)); + assert_eq!(logs[1].block_number, Some(102)); + + // Each block hash should be the hash of its own header, not derived from any other header + assert_eq!(logs[0].block_hash, Some(expected_hashes[0])); // block 100 + assert_eq!(logs[1].block_hash, Some(expected_hashes[2])); // block 102 + } } diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 724b3a5c965..8077802804b 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,89 +1,26 @@ //! Contains RPC handler implementations specific to blocks. -use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; -use alloy_rpc_types_eth::{BlockId, TransactionReceipt}; -use reth_chainspec::{ChainSpecProvider, EthChainSpec}; -use reth_evm::ConfigureEvm; -use reth_primitives_traits::{BlockBody, NodePrimitives}; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - types::RpcTypes, - RpcNodeCore, RpcNodeCoreExt, RpcReceipt, + helpers::{EthBlocks, LoadBlock, LoadPendingBlock}, + FromEvmError, RpcNodeCore, }; -use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; -use reth_storage_api::{BlockReader, ProviderTx}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use reth_rpc_eth_types::EthApiError; use crate::EthApi; -impl EthBlocks for EthApi +impl EthBlocks for EthApi where - Self: LoadBlock< - Error = EthApiError, - NetworkTypes: RpcTypes, - RpcConvert: RpcConvert, - Provider: BlockReader< - Transaction = reth_ethereum_primitives::TransactionSigned, - Receipt = reth_ethereum_primitives::Receipt, - >, - >, - Provider: BlockReader + ChainSpecProvider, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { - async fn block_receipts( - &self, - block_id: BlockId, - ) -> Result>>, Self::Error> - where - Self: LoadReceipt, - { - if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { - let block_number = block.number(); - let base_fee = block.base_fee_per_gas(); - let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas(); - let timestamp = block.timestamp(); - let blob_params = self.provider().chain_spec().blob_params_at_timestamp(timestamp); - - return block - .body() - .transactions() - .iter() - .zip(receipts.iter()) - .enumerate() - .map(|(idx, (tx, receipt))| { - let meta = TransactionMeta { - tx_hash: *tx.tx_hash(), - index: idx as u64, - block_hash, - block_number, - base_fee, - excess_blob_gas, - timestamp, - }; - EthReceiptBuilder::new(tx, meta, receipt, &receipts, blob_params) - .map(|builder| builder.build()) - }) - .collect::, Self::Error>>() - .map(Some) - } - - Ok(None) - } } -impl LoadBlock for EthApi +impl LoadBlock for EthApi where - Self: LoadPendingBlock - + SpawnBlocking - + RpcNodeCoreExt< - Pool: TransactionPool< - Transaction: PoolTransaction>, - >, - Primitives: NodePrimitives>, - Evm = EvmConfig, - >, - Provider: BlockReader, - EvmConfig: ConfigureEvm::Primitives>, + Self: LoadPendingBlock, + N: RpcNodeCore, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index 1a41b8d5768..8a8377f7abc 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,54 +1,27 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. use crate::EthApi; -use alloy_evm::block::BlockExecutorFactory; -use alloy_rpc_types_eth::TransactionRequest; -use reth_errors::ProviderError; -use reth_evm::{ConfigureEvm, EvmFactory, TxEnvFor}; -use reth_node_api::NodePrimitives; -use reth_rpc_convert::{RpcConvert, RpcTypes}; +use reth_evm::TxEnvFor; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, - FromEvmError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, + helpers::{estimate::EstimateCall, Call, EthCall}, + FromEvmError, RpcNodeCore, }; -use reth_storage_api::{BlockReader, ProviderHeader, ProviderTx}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use revm::context::TxEnv; +use reth_rpc_eth_types::EthApiError; -impl EthCall for EthApi +impl EthCall for EthApi where - Self: EstimateCall - + LoadPendingBlock - + FullEthApiTypes - + RpcNodeCoreExt< - Pool: TransactionPool< - Transaction: PoolTransaction>, - >, - Primitives: NodePrimitives>, - Evm = EvmConfig, - >, - EvmConfig: ConfigureEvm::Primitives>, - Provider: BlockReader, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert>, { } -impl Call for EthApi +impl Call for EthApi where - Self: LoadState< - Evm: ConfigureEvm< - BlockExecutorFactory: BlockExecutorFactory>, - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, - RpcConvert: RpcConvert, Network = Self::NetworkTypes>, - NetworkTypes: RpcTypes>, - Error: FromEvmError - + From<::Error> - + From, - > + SpawnBlocking, - Provider: BlockReader, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert>, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -61,9 +34,10 @@ where } } -impl EstimateCall for EthApi +impl EstimateCall for EthApi where - Self: Call, - Provider: BlockReader, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert>, { } diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index 87adb42b2b5..1d26644b47b 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -1,29 +1,28 @@ //! Contains RPC handler implementations for fee history. -use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; -use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; -use reth_rpc_eth_types::{FeeHistoryCache, GasPriceOracle}; -use reth_storage_api::{BlockReader, BlockReaderIdExt, ProviderHeader, StateProviderFactory}; +use reth_rpc_convert::RpcConvert; +use reth_rpc_eth_api::{ + helpers::{EthFees, LoadFee}, + FromEvmError, RpcNodeCore, +}; +use reth_rpc_eth_types::{EthApiError, FeeHistoryCache, GasPriceOracle}; +use reth_storage_api::ProviderHeader; use crate::EthApi; -impl EthFees for EthApi +impl EthFees for EthApi where - Self: LoadFee< - Provider: ChainSpecProvider< - ChainSpec: EthChainSpec

>, - >, - >, - Provider: BlockReader, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { } -impl LoadFee for EthApi +impl LoadFee for EthApi where - Self: LoadBlock, - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { @@ -31,7 +30,7 @@ where } #[inline] - fn fee_history_cache(&self) -> &FeeHistoryCache> { + fn fee_history_cache(&self) -> &FeeHistoryCache> { self.inner.fee_history_cache() } } diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index dd65fd53ca9..5e007c340f1 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -1,75 +1,26 @@ //! Support for building a pending block with transactions from local view of mempool. use crate::EthApi; -use alloy_consensus::BlockHeader; -use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; -use reth_evm::{ConfigureEvm, NextBlockEnvAttributes}; -use reth_node_api::NodePrimitives; -use reth_primitives_traits::SealedHeader; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{LoadPendingBlock, SpawnBlocking}, - types::RpcTypes, + helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock}, FromEvmError, RpcNodeCore, }; -use reth_rpc_eth_types::PendingBlock; -use reth_storage_api::{ - BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, ProviderReceipt, ProviderTx, - StateProviderFactory, -}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use revm_primitives::B256; +use reth_rpc_eth_types::{EthApiError, PendingBlock}; -impl LoadPendingBlock - for EthApi +impl LoadPendingBlock for EthApi where - Self: SpawnBlocking< - NetworkTypes: RpcTypes< - Header = alloy_rpc_types_eth::Header>, - >, - Error: FromEvmError, - RpcConvert: RpcConvert, - > + RpcNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, - Pool: TransactionPool< - Transaction: PoolTransaction>, - >, - Evm: ConfigureEvm< - Primitives = ::Primitives, - NextBlockEnvCtx: From, - >, - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, - >, - Provider: BlockReader, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] - fn pending_block( - &self, - ) -> &tokio::sync::Mutex< - Option, ProviderReceipt>>, - > { + fn pending_block(&self) -> &tokio::sync::Mutex>> { self.inner.pending_block() } - fn next_env_attributes( - &self, - parent: &SealedHeader>, - ) -> Result<::NextBlockEnvCtx, Self::Error> { - Ok(NextBlockEnvAttributes { - timestamp: parent.timestamp().saturating_add(12), - suggested_fee_recipient: parent.beneficiary(), - prev_randao: B256::random(), - gas_limit: parent.gas_limit(), - parent_beacon_block_root: parent.parent_beacon_block_root().map(|_| B256::ZERO), - withdrawals: parent.withdrawals_root().map(|_| Default::default()), - } - .into()) + #[inline] + fn pending_env_builder(&self) -> &dyn PendingEnvBuilder { + self.inner.pending_env_builder() } } diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 9d0a744ee80..358ef57f768 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,38 +1,14 @@ //! Builds an RPC receipt response w.r.t. data layout of network. -use alloy_consensus::transaction::TransactionMeta; -use reth_chainspec::{ChainSpecProvider, EthChainSpec}; -use reth_ethereum_primitives::{Receipt, TransactionSigned}; -use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; -use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; -use reth_storage_api::{BlockReader, ReceiptProvider, TransactionsProvider}; - use crate::EthApi; +use reth_rpc_convert::RpcConvert; +use reth_rpc_eth_api::{helpers::LoadReceipt, FromEvmError, RpcNodeCore}; +use reth_rpc_eth_types::EthApiError; -impl LoadReceipt for EthApi +impl LoadReceipt for EthApi where - Self: RpcNodeCoreExt< - Provider: TransactionsProvider - + ReceiptProvider, - >, - Provider: BlockReader + ChainSpecProvider, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { - async fn build_transaction_receipt( - &self, - tx: TransactionSigned, - meta: TransactionMeta, - receipt: Receipt, - ) -> Result, Self::Error> { - let hash = meta.block_hash; - // get all receipts for the block - let all_receipts = self - .cache() - .get_receipts(hash) - .await - .map_err(Self::Error::from_eth_err)? - .ok_or(EthApiError::HeaderNotFound(hash.into()))?; - let blob_params = self.provider().chain_spec().blob_params_at_timestamp(meta.timestamp); - - Ok(EthReceiptBuilder::new(&tx, meta, &receipt, &all_receipts, blob_params)?.build()) - } } diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index 01a07c4436d..60d6a151f9b 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -5,19 +5,24 @@ use std::collections::HashMap; use crate::EthApi; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Decodable2718; -use alloy_network::{eip2718::Encodable2718, EthereumWallet, TransactionBuilder}; use alloy_primitives::{eip191_hash_message, Address, Signature, B256}; -use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; -use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; -use reth_rpc_eth_types::SignError; -use reth_storage_api::BlockReader; +use reth_rpc_convert::{RpcConvert, RpcTypes, SignableTxRequest}; +use reth_rpc_eth_api::{ + helpers::{signer::Result, AddDevSigners, EthSigner}, + FromEvmError, RpcNodeCore, +}; +use reth_rpc_eth_types::{EthApiError, SignError}; +use reth_storage_api::ProviderTx; -impl AddDevSigners - for EthApi +impl AddDevSigners for EthApi where - Provider: BlockReader, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert< + Network: RpcTypes>>, + >, { fn with_dev_accounts(&self) { *self.inner.signers().write() = DevSigner::random_signers(20) @@ -32,15 +37,11 @@ pub struct DevSigner { } impl DevSigner { - /// Generates a random dev signer which satisfies [`EthSigner`] trait - pub fn random() -> Box> { - let mut signers = Self::random_signers(1); - signers.pop().expect("expect to generate at least one signer") - } - /// Generates provided number of random dev signers /// which satisfy [`EthSigner`] trait - pub fn random_signers(num: u32) -> Vec + 'static>> { + pub fn random_signers>( + num: u32, + ) -> Vec + 'static>> { let mut signers = Vec::with_capacity(num as usize); for _ in 0..num { let sk = PrivateKeySigner::random(); @@ -49,7 +50,7 @@ impl DevSigner { let addresses = vec![address]; let accounts = HashMap::from([(address, sk)]); - signers.push(Box::new(Self { addresses, accounts }) as Box>); + signers.push(Box::new(Self { addresses, accounts }) as Box>); } signers } @@ -65,7 +66,7 @@ impl DevSigner { } #[async_trait::async_trait] -impl EthSigner for DevSigner { +impl> EthSigner for DevSigner { fn accounts(&self) -> Vec
{ self.addresses.clone() } @@ -81,21 +82,17 @@ impl EthSigner for DevSigner { self.sign_hash(hash, address) } - async fn sign_transaction(&self, request: TransactionRequest, address: &Address) -> Result { + async fn sign_transaction(&self, request: TxReq, address: &Address) -> Result { // create local signer wallet from signing key let signer = self.accounts.get(address).ok_or(SignError::NoAccount)?.clone(); - let wallet = EthereumWallet::from(signer); // build and sign transaction with signer - let txn_envelope = - request.build(&wallet).await.map_err(|_| SignError::InvalidTransactionRequest)?; - - // decode transaction into signed transaction type - let encoded = txn_envelope.encoded_2718(); - let txn_signed = T::decode_2718(&mut encoded.as_ref()) + let tx = request + .try_build_and_sign(&signer) + .await .map_err(|_| SignError::InvalidTransactionRequest)?; - Ok(txn_signed) + Ok(tx) } fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result { @@ -109,7 +106,7 @@ mod tests { use super::*; use alloy_consensus::Transaction; use alloy_primitives::{Bytes, U256}; - use alloy_rpc_types_eth::TransactionInput; + use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; use reth_ethereum_primitives::TransactionSigned; use revm_primitives::TxKind; diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index a4a8ad7531a..b8ff79f9dc7 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -1,31 +1,26 @@ use alloy_primitives::U256; -use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_network_api::NetworkInfo; -use reth_rpc_eth_api::{helpers::EthApiSpec, RpcNodeCore}; -use reth_storage_api::{BlockNumReader, BlockReader, ProviderTx, StageCheckpointReader}; +use reth_rpc_convert::RpcConvert; +use reth_rpc_eth_api::{ + helpers::{spec::SignersForApi, EthApiSpec}, + RpcNodeCore, +}; +use reth_storage_api::ProviderTx; use crate::EthApi; -impl EthApiSpec for EthApi +impl EthApiSpec for EthApi where - Self: RpcNodeCore< - Provider: ChainSpecProvider - + BlockNumReader - + StageCheckpointReader, - Network: NetworkInfo, - >, - Provider: BlockReader, + N: RpcNodeCore, + Rpc: RpcConvert, { - type Transaction = ProviderTx; + type Transaction = ProviderTx; + type Rpc = Rpc::Network; fn starting_block(&self) -> U256 { self.inner.starting_block() } - fn signers( - &self, - ) -> &parking_lot::RwLock>>> - { + fn signers(&self) -> &SignersForApi { self.inner.signers() } } diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 90c9e32c64d..5d767d2ede5 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -1,103 +1,71 @@ //! Contains RPC handler implementations specific to state. -use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_storage_api::{BlockReader, StateProviderFactory}; -use reth_transaction_pool::TransactionPool; - +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{EthState, LoadState, SpawnBlocking}, - RpcNodeCoreExt, + helpers::{EthState, LoadState}, + RpcNodeCore, }; use crate::EthApi; -impl EthState for EthApi +impl EthState for EthApi where - Self: LoadState + SpawnBlocking, - Provider: BlockReader, + N: RpcNodeCore, + Rpc: RpcConvert, { fn max_proof_window(&self) -> u64 { self.inner.eth_proof_window() } } -impl LoadState for EthApi +impl LoadState for EthApi where - Self: RpcNodeCoreExt< - Provider: BlockReader - + StateProviderFactory - + ChainSpecProvider, - Pool: TransactionPool, - >, - Provider: BlockReader, + N: RpcNodeCore, + Rpc: RpcConvert, { } #[cfg(test)] mod tests { + use crate::eth::helpers::types::EthRpcConverter; + use super::*; - use alloy_consensus::Header; - use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M; use alloy_primitives::{Address, StorageKey, StorageValue, U256}; + use reth_chainspec::ChainSpec; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; - use reth_rpc_eth_api::helpers::EthState; - use reth_rpc_eth_types::{ - EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, - }; - use reth_rpc_server_types::constants::{ - DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, + use reth_provider::{ + test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}, + ChainSpecProvider, }; - use reth_tasks::pool::BlockingTaskPool; + use reth_rpc_eth_api::{helpers::EthState, node::RpcNodeCoreAdapter}; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; use std::collections::HashMap; - fn noop_eth_api() -> EthApi { + fn noop_eth_api() -> EthApi< + RpcNodeCoreAdapter, + EthRpcConverter, + > { + let provider = NoopProvider::default(); let pool = testing_pool(); let evm_config = EthEvmConfig::mainnet(); - let cache = EthStateCache::spawn(NoopProvider::default(), Default::default()); - EthApi::new( - NoopProvider::default(), - pool, - NoopNetwork::default(), - cache.clone(), - GasPriceOracle::new(NoopProvider::default(), Default::default(), cache), - ETHEREUM_BLOCK_GAS_LIMIT_30M, - DEFAULT_MAX_SIMULATE_BLOCKS, - DEFAULT_ETH_PROOF_WINDOW, - BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::
::new(FeeHistoryCacheConfig::default()), - evm_config, - DEFAULT_PROOF_PERMITS, - ) + EthApi::builder(provider, pool, NoopNetwork::default(), evm_config).build() } fn mock_eth_api( accounts: HashMap, - ) -> EthApi { + ) -> EthApi< + RpcNodeCoreAdapter, + EthRpcConverter, + > { let pool = testing_pool(); let mock_provider = MockEthProvider::default(); let evm_config = EthEvmConfig::new(mock_provider.chain_spec()); mock_provider.extend_accounts(accounts); - let cache = EthStateCache::spawn(mock_provider.clone(), Default::default()); - EthApi::new( - mock_provider.clone(), - pool, - (), - cache.clone(), - GasPriceOracle::new(mock_provider, Default::default(), cache), - ETHEREUM_BLOCK_GAS_LIMIT_30M, - DEFAULT_MAX_SIMULATE_BLOCKS, - DEFAULT_ETH_PROOF_WINDOW + 1, - BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::
::new(FeeHistoryCacheConfig::default()), - evm_config, - DEFAULT_PROOF_PERMITS, - ) + EthApi::builder(mock_provider, pool, NoopNetwork::default(), evm_config).build() } #[tokio::test] diff --git a/crates/rpc/rpc/src/eth/helpers/sync_listener.rs b/crates/rpc/rpc/src/eth/helpers/sync_listener.rs index 13c8de19b0d..e444f76d3af 100644 --- a/crates/rpc/rpc/src/eth/helpers/sync_listener.rs +++ b/crates/rpc/rpc/src/eth/helpers/sync_listener.rs @@ -91,6 +91,7 @@ mod tests { config: Default::default(), head: Default::default(), }, + capabilities: vec![], }) } diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index 98f3e255818..3e00f2df0c4 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -1,27 +1,15 @@ //! Contains RPC handler implementations specific to tracing. -use reth_evm::ConfigureEvm; -use reth_node_api::NodePrimitives; -use reth_rpc_eth_api::{ - helpers::{LoadState, Trace}, - FromEvmError, -}; -use reth_storage_api::{BlockReader, ProviderHeader, ProviderTx}; +use reth_rpc_convert::RpcConvert; +use reth_rpc_eth_api::{helpers::Trace, FromEvmError, RpcNodeCore}; +use reth_rpc_eth_types::EthApiError; use crate::EthApi; -impl Trace for EthApi +impl Trace for EthApi where - Self: LoadState< - Provider: BlockReader, - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, - Error: FromEvmError, - >, - Provider: BlockReader, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index e7efc43ac45..f82886a9beb 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -2,22 +2,24 @@ use crate::EthApi; use alloy_primitives::{Bytes, B256}; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, + helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, + FromEvmError, RpcNodeCore, +}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, }; -use reth_rpc_eth_types::utils::recover_raw_transaction; -use reth_storage_api::{BlockReader, BlockReaderIdExt, ProviderTx, TransactionsProvider}; -use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -impl EthTransactions - for EthApi +impl EthTransactions for EthApi where - Self: LoadTransaction, - Provider: BlockReader>, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] - fn signers(&self) -> &parking_lot::RwLock>>>> { + fn signers(&self) -> &SignersForRpc { self.inner.signers() } @@ -33,43 +35,30 @@ where let pool_transaction = ::Transaction::from_pooled(recovered); // submit the transaction to the pool with a `Local` origin - let hash = self - .pool() - .add_transaction(TransactionOrigin::Local, pool_transaction) - .await - .map_err(Self::Error::from_eth_err)?; + let AddedTransactionOutcome { hash, .. } = + self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; Ok(hash) } } -impl LoadTransaction - for EthApi +impl LoadTransaction for EthApi where - Self: SpawnBlocking - + FullEthApiTypes - + RpcNodeCoreExt, - Provider: BlockReader, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { } #[cfg(test)] mod tests { use super::*; - use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M; use alloy_primitives::{hex_literal::hex, Bytes}; use reth_chainspec::ChainSpecProvider; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; use reth_provider::test_utils::NoopProvider; use reth_rpc_eth_api::helpers::EthTransactions; - use reth_rpc_eth_types::{ - EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, - }; - use reth_rpc_server_types::constants::{ - DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, - }; - use reth_tasks::pool::BlockingTaskPool; use reth_transaction_pool::{test_utils::testing_pool, TransactionPool}; #[tokio::test] @@ -80,22 +69,9 @@ mod tests { let pool = testing_pool(); let evm_config = EthEvmConfig::new(noop_provider.chain_spec()); - let cache = EthStateCache::spawn(noop_provider.clone(), Default::default()); - let fee_history_cache = FeeHistoryCache::new(FeeHistoryCacheConfig::default()); - let eth_api = EthApi::new( - noop_provider.clone(), - pool.clone(), - noop_network_provider, - cache.clone(), - GasPriceOracle::new(noop_provider, Default::default(), cache.clone()), - ETHEREUM_BLOCK_GAS_LIMIT_30M, - DEFAULT_MAX_SIMULATE_BLOCKS, - DEFAULT_ETH_PROOF_WINDOW, - BlockingTaskPool::build().expect("failed to build tracing pool"), - fee_history_cache, - evm_config, - DEFAULT_PROOF_PERMITS, - ); + let eth_api = + EthApi::builder(noop_provider.clone(), pool.clone(), noop_network_provider, evm_config) + .build(); // https://etherscan.io/tx/0xa694b71e6c128a2ed8e2e0f6770bddbe52e3bb8f10e8472f9a79ab81497a8b5d let tx_1 = Bytes::from(hex!( diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 2425c15fc0b..0c1d59a6ca3 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -3,10 +3,11 @@ use alloy_network::Ethereum; use reth_evm_ethereum::EthEvmConfig; use reth_rpc_convert::RpcConverter; -use reth_rpc_eth_types::EthApiError; +use reth_rpc_eth_types::receipt::EthReceiptConverter; /// An [`RpcConverter`] with its generics set to Ethereum specific. -pub type EthRpcConverter = RpcConverter; +pub type EthRpcConverter = + RpcConverter>; //tests for simulate #[cfg(test)] @@ -14,12 +15,13 @@ mod tests { use super::*; use alloy_consensus::{Transaction, TxType}; use alloy_rpc_types_eth::TransactionRequest; + use reth_chainspec::MAINNET; use reth_rpc_eth_types::simulate::resolve_transaction; use revm::database::CacheDB; #[test] fn test_resolve_transaction_empty_request() { - let builder = EthRpcConverter::default(); + let builder = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone())); let mut db = CacheDB::>::default(); let tx = TransactionRequest::default(); let result = resolve_transaction(tx, 21000, 0, 1, &mut db, &builder).unwrap(); @@ -34,7 +36,7 @@ mod tests { #[test] fn test_resolve_transaction_legacy() { let mut db = CacheDB::>::default(); - let builder = EthRpcConverter::default(); + let builder = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone())); let tx = TransactionRequest { gas_price: Some(100), ..Default::default() }; @@ -50,7 +52,7 @@ mod tests { #[test] fn test_resolve_transaction_partial_eip1559() { let mut db = CacheDB::>::default(); - let builder = EthRpcConverter::default(); + let rpc_converter = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone())); let tx = TransactionRequest { max_fee_per_gas: Some(200), @@ -58,7 +60,7 @@ mod tests { ..Default::default() }; - let result = resolve_transaction(tx, 21000, 0, 1, &mut db, &builder).unwrap(); + let result = resolve_transaction(tx, 21000, 0, 1, &mut db, &rpc_converter).unwrap(); assert_eq!(result.tx_type(), TxType::Eip1559); let tx = result.into_inner(); diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 6221f6821c1..bdd23e44ffa 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -6,8 +6,8 @@ use alloy_evm::overrides::apply_block_overrides; use alloy_primitives::U256; use alloy_rpc_types_eth::BlockId; use alloy_rpc_types_mev::{ - BundleItem, Inclusion, Privacy, RefundConfig, SendBundleRequest, SimBundleLogs, - SimBundleOverrides, SimBundleResponse, Validity, + BundleItem, Inclusion, MevSendBundle, Privacy, RefundConfig, SimBundleLogs, SimBundleOverrides, + SimBundleResponse, Validity, }; use jsonrpsee::core::RpcResult; use reth_evm::{ConfigureEvm, Evm}; @@ -89,7 +89,7 @@ where /// inclusion, validity and privacy settings from parent bundles. fn parse_and_flatten_bundle( &self, - request: &SendBundleRequest, + request: &MevSendBundle, ) -> Result>>, EthApiError> { let mut items = Vec::new(); @@ -220,7 +220,7 @@ where async fn sim_bundle_inner( &self, - request: SendBundleRequest, + request: MevSendBundle, overrides: SimBundleOverrides, logs: bool, ) -> Result { @@ -416,7 +416,7 @@ where { async fn sim_bundle( &self, - request: SendBundleRequest, + request: MevSendBundle, overrides: SimBundleOverrides, ) -> RpcResult { trace!("mev_simBundle called, request: {:?}, overrides: {:?}", request, overrides); diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index 690fb33e871..d2905095900 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -54,6 +54,7 @@ pub use miner::MinerApi; pub use net::NetApi; pub use otterscan::OtterscanApi; pub use reth::RethApi; +pub use reth_rpc_convert::RpcTypes; pub use rpc::RPCApi; pub use trace::TraceApi; pub use txpool::TxPoolApi; diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index bafbf0730bd..92698e6eca2 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -340,9 +340,9 @@ where num.into(), None, TracingInspectorConfig::default_parity(), - |tx_info, ctx| { + |tx_info, mut ctx| { Ok(ctx - .inspector + .take_inspector() .into_parity_builder() .into_localized_transaction_traces(tx_info)) }, diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 73d461bf222..2c5b0ed143b 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -4,7 +4,6 @@ use alloy_evm::block::calc::{base_block_reward_pre_merge, block_reward, ommer_re use alloy_primitives::{map::HashSet, Bytes, B256, U256}; use alloy_rpc_types_eth::{ state::{EvmOverrides, StateOverride}, - transaction::TransactionRequest, BlockOverrides, Index, }; use alloy_rpc_types_trace::{ @@ -20,6 +19,7 @@ use reth_evm::ConfigureEvm; use reth_primitives_traits::{BlockBody, BlockHeader}; use reth_revm::{database::StateProviderDatabase, db::CacheDB}; use reth_rpc_api::TraceApiServer; +use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ helpers::{Call, LoadPendingBlock, LoadTransaction, Trace, TraceExt}, FromEthApiError, RpcNodeCore, @@ -88,7 +88,7 @@ where /// Executes the given call and returns a number of possible traces for it. pub async fn trace_call( &self, - trace_request: TraceCallRequest, + trace_request: TraceCallRequest>, ) -> Result { let at = trace_request.block_id.unwrap_or_default(); let config = TracingInspectorConfig::from_parity_config(&trace_request.trace_types); @@ -143,7 +143,7 @@ where /// Note: Allows tracing dependent transactions, hence all transactions are traced in sequence pub async fn trace_call_many( &self, - calls: Vec<(TransactionRequest, HashSet)>, + calls: Vec<(RpcTxReq, HashSet)>, block_id: Option, ) -> Result, Eth::Error> { let at = block_id.unwrap_or(BlockId::pending()); @@ -403,9 +403,9 @@ where Some(block.clone()), None, TracingInspectorConfig::default_parity(), - move |tx_info, ctx| { + move |tx_info, mut ctx| { let mut traces = ctx - .inspector + .take_inspector() .into_parity_builder() .into_localized_transaction_traces(tx_info); traces.retain(|trace| matcher.matches(&trace.trace)); @@ -472,9 +472,11 @@ where block_id, None, TracingInspectorConfig::default_parity(), - |tx_info, ctx| { - let traces = - ctx.inspector.into_parity_builder().into_localized_transaction_traces(tx_info); + |tx_info, mut ctx| { + let traces = ctx + .take_inspector() + .into_parity_builder() + .into_localized_transaction_traces(tx_info); Ok(traces) }, ); @@ -509,9 +511,9 @@ where block_id, None, TracingInspectorConfig::from_parity_config(&trace_types), - move |tx_info, ctx| { + move |tx_info, mut ctx| { let mut full_trace = ctx - .inspector + .take_inspector() .into_parity_builder() .into_trace_results(&ctx.result, &trace_types); @@ -569,7 +571,7 @@ where } #[async_trait] -impl TraceApiServer for TraceApi +impl TraceApiServer> for TraceApi where Eth: TraceExt + 'static, { @@ -578,7 +580,7 @@ where /// Handler for `trace_call` async fn trace_call( &self, - call: TransactionRequest, + call: RpcTxReq, trace_types: HashSet, block_id: Option, state_overrides: Option, @@ -593,7 +595,7 @@ where /// Handler for `trace_callMany` async fn trace_call_many( &self, - calls: Vec<(TransactionRequest, HashSet)>, + calls: Vec<(RpcTxReq, HashSet)>, block_id: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index e910e6a101e..5c7bcd45a84 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -88,8 +88,8 @@ where /// Handler for `txpool_status` async fn txpool_status(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving txpool_status"); - let all = self.pool.all_transactions(); - Ok(TxpoolStatus { pending: all.pending.len() as u64, queued: all.queued.len() as u64 }) + let (pending, queued) = self.pool.pending_and_queued_txn_count(); + Ok(TxpoolStatus { pending: pending as u64, queued: queued as u64 }) } /// Returns a summary of all the transactions currently pending for inclusion in the next diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index bd75a8045a5..ef6381e4dcd 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -5,10 +5,11 @@ use alloy_eips::{eip4844::kzg_to_versioned_hash, eip7685::RequestsOrHash}; use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, + BuilderBlockValidationRequestV5, }; use alloy_rpc_types_engine::{ - BlobsBundleV1, CancunPayloadFields, ExecutionData, ExecutionPayload, ExecutionPayloadSidecar, - PraguePayloadFields, + BlobsBundleV1, BlobsBundleV2, CancunPayloadFields, ExecutionData, ExecutionPayload, + ExecutionPayloadSidecar, PraguePayloadFields, }; use async_trait::async_trait; use core::fmt; @@ -25,7 +26,7 @@ use reth_metrics::{ metrics::{gauge, Gauge}, Metrics, }; -use reth_node_api::NewPayloadError; +use reth_node_api::{NewPayloadError, PayloadTypes}; use reth_primitives_traits::{ constants::GAS_LIMIT_BOUND_DIVISOR, BlockBody, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeaderFor, @@ -44,14 +45,15 @@ use tracing::warn; /// The type that implements the `validation` rpc namespace trait #[derive(Clone, Debug, derive_more::Deref)] -pub struct ValidationApi { +pub struct ValidationApi { #[deref] - inner: Arc>, + inner: Arc>, } -impl ValidationApi +impl ValidationApi where E: ConfigureEvm, + T: PayloadTypes, { /// Create a new instance of the [`ValidationApi`] pub fn new( @@ -61,10 +63,7 @@ where config: ValidationApiConfig, task_spawner: Box, payload_validator: Arc< - dyn PayloadValidator< - Block = ::Block, - ExecutionData = ExecutionData, - >, + dyn PayloadValidator::Block>, >, ) -> Self { let ValidationApiConfig { disallow, validation_window } = config; @@ -111,13 +110,14 @@ where } } -impl ValidationApi +impl ValidationApi where Provider: BlockReaderIdExt
::BlockHeader> + ChainSpecProvider + StateProviderFactory + 'static, E: ConfigureEvm + 'static, + T: PayloadTypes, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( @@ -184,7 +184,7 @@ where let output = executor.execute_with_state_closure(&block, |state| { if !self.disallow.is_empty() { // Check whether the submission interacted with any blacklisted account by scanning - // the `State`'s cache that records everything read form database during execution. + // the `State`'s cache that records everything read from database during execution. for account in state.cache.accounts.keys() { if self.disallow.contains(account) { accessed_blacklisted = Some(*account); @@ -307,7 +307,7 @@ where } } - if balance_after >= balance_before + message.value { + if balance_after >= balance_before.saturating_add(message.value) { return Ok(()) } @@ -365,6 +365,24 @@ where Ok(versioned_hashes) } + /// Validates the given [`BlobsBundleV1`] and returns versioned hashes for blobs. + pub fn validate_blobs_bundle_v2( + &self, + blobs_bundle: BlobsBundleV2, + ) -> Result, ValidationApiError> { + let versioned_hashes = blobs_bundle + .commitments + .iter() + .map(|c| kzg_to_versioned_hash(c.as_slice())) + .collect::>(); + + blobs_bundle + .try_into_sidecar() + .map_err(|_| ValidationApiError::InvalidBlobsBundle)? + .validate(&versioned_hashes, EnvKzgSettings::default().get())?; + + Ok(versioned_hashes) + } /// Core logic for validating the builder submission v3 async fn validate_builder_submission_v3( @@ -414,10 +432,39 @@ where ) .await } + + /// Core logic for validating the builder submission v5 + async fn validate_builder_submission_v5( + &self, + request: BuilderBlockValidationRequestV5, + ) -> Result<(), ValidationApiError> { + let block = self.payload_validator.ensure_well_formed_payload(ExecutionData { + payload: ExecutionPayload::V3(request.request.execution_payload), + sidecar: ExecutionPayloadSidecar::v4( + CancunPayloadFields { + parent_beacon_block_root: request.parent_beacon_block_root, + versioned_hashes: self + .validate_blobs_bundle_v2(request.request.blobs_bundle)?, + }, + PraguePayloadFields { + requests: RequestsOrHash::Requests( + request.request.execution_requests.to_requests(), + ), + }, + ), + })?; + + self.validate_message_against_block( + block, + request.request.message, + request.registered_gas_limit, + ) + .await + } } #[async_trait] -impl BlockSubmissionValidationApiServer for ValidationApi +impl BlockSubmissionValidationApiServer for ValidationApi where Provider: BlockReaderIdExt
::BlockHeader> + ChainSpecProvider @@ -425,6 +472,7 @@ where + Clone + 'static, E: ConfigureEvm + 'static, + T: PayloadTypes, { async fn validate_builder_submission_v1( &self, @@ -477,20 +525,34 @@ where rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v5( + &self, + request: BuilderBlockValidationRequestV5, + ) -> RpcResult<()> { + let this = self.clone(); + let (tx, rx) = oneshot::channel(); + + self.task_spawner.spawn_blocking(Box::pin(async move { + let result = Self::validate_builder_submission_v5(&this, request) + .await + .map_err(ErrorObject::from); + let _ = tx.send(result); + })); + + rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? + } } -pub struct ValidationApiInner { +pub struct ValidationApiInner { /// The provider that can interact with the chain. provider: Provider, /// Consensus implementation. consensus: Arc>, /// Execution payload validator. - payload_validator: Arc< - dyn PayloadValidator< - Block = ::Block, - ExecutionData = ExecutionData, - >, - >, + payload_validator: + Arc::Block>>, /// Block executor factory. evm_config: E, /// Set of disallowed addresses @@ -524,7 +586,7 @@ fn hash_disallow_list(disallow: &HashSet
) -> String { format!("{:x}", hasher.finalize()) } -impl fmt::Debug for ValidationApiInner { +impl fmt::Debug for ValidationApiInner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ValidationApiInner").finish_non_exhaustive() } diff --git a/crates/scroll/alloy/evm/src/block/curie.rs b/crates/scroll/alloy/evm/src/block/curie.rs index 7e2e853ad42..3c65e9cc04d 100644 --- a/crates/scroll/alloy/evm/src/block/curie.rs +++ b/crates/scroll/alloy/evm/src/block/curie.rs @@ -102,7 +102,7 @@ pub(super) fn apply_curie_hard_fork(state: &mut State) -> Resu #[cfg(test)] mod tests { - use super::{super::assert_bytecode_eq, *}; + use super::*; use revm::{ database::{ states::{bundle_state::BundleRetention, plain_account::PlainStorage, StorageSlot}, @@ -155,24 +155,8 @@ mod tests { let expected_oracle_info = AccountInfo { code_hash, code: Some(bytecode.clone()), ..Default::default() }; - // TODO: revert back to performing equality check on `AccountInfo` once we bump revm > v78 - let oracle_original_info = oracle.original_info.unwrap(); - assert_bytecode_eq( - oracle_original_info.code.as_ref().unwrap(), - oracle_pre_fork.code.as_ref().unwrap(), - ); - assert_eq!(oracle_original_info.balance, oracle_pre_fork.balance); - assert_eq!(oracle_original_info.nonce, oracle_pre_fork.nonce); - assert_eq!(oracle_original_info.code_hash, oracle_pre_fork.code_hash); - - let oracle_post_info = oracle.info.unwrap(); - assert_bytecode_eq( - oracle_post_info.code.as_ref().unwrap(), - expected_oracle_info.code.as_ref().unwrap(), - ); - assert_eq!(oracle_post_info.balance, expected_oracle_info.balance); - assert_eq!(oracle_post_info.nonce, expected_oracle_info.nonce); - assert_eq!(oracle_post_info.code_hash, expected_oracle_info.code_hash); + assert_eq!(oracle.original_info.unwrap(), oracle_pre_fork); + assert_eq!(oracle.info.unwrap(), expected_oracle_info); // check oracle storage changeset let mut storage = oracle.storage.into_iter().collect::>(); @@ -188,7 +172,7 @@ mod tests { } // check deployed contract - assert_bytecode_eq(bundle.contracts.get(&code_hash).unwrap(), &bytecode); + assert_eq!(bundle.contracts.get(&code_hash).unwrap(), &bytecode); Ok(()) } diff --git a/crates/scroll/alloy/evm/src/block/feynman.rs b/crates/scroll/alloy/evm/src/block/feynman.rs index 0b44cd245c7..c739d4a3abd 100644 --- a/crates/scroll/alloy/evm/src/block/feynman.rs +++ b/crates/scroll/alloy/evm/src/block/feynman.rs @@ -90,7 +90,7 @@ pub(super) fn apply_feynman_hard_fork( #[cfg(test)] mod tests { - use super::{super::assert_bytecode_eq, *}; + use super::*; use revm::{ database::{ states::{bundle_state::BundleRetention, plain_account::PlainStorage, StorageSlot}, @@ -158,24 +158,8 @@ mod tests { let expected_oracle_info = AccountInfo { code_hash, code: Some(bytecode.clone()), ..Default::default() }; - // TODO: revert back to performing equality check on `AccountInfo` once we bump revm > v78 - let oracle_original_info = oracle.original_info.unwrap(); - assert_bytecode_eq( - oracle_original_info.code.as_ref().unwrap(), - oracle_pre_fork.code.as_ref().unwrap(), - ); - assert_eq!(oracle_original_info.balance, oracle_pre_fork.balance); - assert_eq!(oracle_original_info.nonce, oracle_pre_fork.nonce); - assert_eq!(oracle_original_info.code_hash, oracle_pre_fork.code_hash); - - let oracle_post_info = oracle.info.unwrap(); - assert_bytecode_eq( - oracle_post_info.code.as_ref().unwrap(), - expected_oracle_info.code.as_ref().unwrap(), - ); - assert_eq!(oracle_post_info.balance, expected_oracle_info.balance); - assert_eq!(oracle_post_info.nonce, expected_oracle_info.nonce); - assert_eq!(oracle_post_info.code_hash, expected_oracle_info.code_hash); + assert_eq!(oracle.original_info.unwrap(), oracle_pre_fork); + assert_eq!(oracle.info.unwrap(), expected_oracle_info); // check oracle storage changeset let mut storage = oracle.storage.into_iter().collect::>(); @@ -191,7 +175,7 @@ mod tests { } // check deployed contract - assert_bytecode_eq(bundle.contracts.get(&code_hash).unwrap(), &bytecode); + assert_eq!(bundle.contracts.get(&code_hash).unwrap(), &bytecode); Ok(()) } diff --git a/crates/scroll/alloy/evm/src/block/mod.rs b/crates/scroll/alloy/evm/src/block/mod.rs index 054b5a5b96d..ec613d5cd72 100644 --- a/crates/scroll/alloy/evm/src/block/mod.rs +++ b/crates/scroll/alloy/evm/src/block/mod.rs @@ -403,15 +403,3 @@ where ScrollBlockExecutor::new(evm, ctx, &self.spec, &self.receipt_builder) } } - -// TODO: remove this when we bump revm > v78 -/// A helper function that compares asserts that two bytecode instances are equal. -#[cfg(test)] -fn assert_bytecode_eq(expected: &revm::bytecode::Bytecode, actual: &revm::bytecode::Bytecode) { - assert_eq!(expected.legacy_jump_table().unwrap().len, actual.legacy_jump_table().unwrap().len); - assert_eq!( - expected.legacy_jump_table().unwrap().table, - actual.legacy_jump_table().unwrap().table - ); - assert_eq!(expected.bytecode(), actual.bytecode()); -} diff --git a/crates/scroll/alloy/evm/src/lib.rs b/crates/scroll/alloy/evm/src/lib.rs index a3a0ff6fbfd..5ec8dc38009 100644 --- a/crates/scroll/alloy/evm/src/lib.rs +++ b/crates/scroll/alloy/evm/src/lib.rs @@ -20,9 +20,8 @@ mod system_caller; extern crate alloc; -use alloc::vec::Vec; use alloy_evm::{precompiles::PrecompilesMap, Database, Evm, EvmEnv, EvmFactory}; -use alloy_primitives::{Address, Bytes, TxKind, U256}; +use alloy_primitives::{Address, Bytes}; use core::{ fmt, ops::{Deref, DerefMut}, @@ -33,7 +32,7 @@ use revm::{ handler::PrecompileProvider, inspector::NoOpInspector, interpreter::{interpreter::EthInterpreter, InterpreterResult}, - Context, ExecuteEvm, InspectEvm, Inspector, + Context, ExecuteEvm, InspectEvm, Inspector, SystemCallEvm, }; use revm_scroll::{ builder::{ @@ -42,7 +41,7 @@ use revm_scroll::{ }, instructions::ScrollInstructions, precompile::ScrollPrecompileProvider, - ScrollSpecId, ScrollTransaction, + ScrollSpecId, }; /// Re-export `TX_L1_FEE_PRECISION_U256` from `revm-scroll` for convenience. @@ -140,69 +139,7 @@ where contract: Address, data: Bytes, ) -> Result, Self::Error> { - let tx = ScrollTransaction { - base: TxEnv { - caller, - kind: TxKind::Call(contract), - // Explicitly set nonce to 0 so revm does not do any nonce checks - nonce: 0, - gas_limit: 30_000_000, - value: U256::ZERO, - data, - // Setting the gas price to zero enforces that no value is transferred as part of - // the call, and that the call will not count against the block's - // gas limit - gas_price: 0, - // The chain ID check is not relevant here and is disabled if set to None - chain_id: None, - // Setting the gas priority fee to None ensures the effective gas price is derived - // from the `gas_price` field, which we need to be zero - gas_priority_fee: None, - access_list: Default::default(), - // blob fields can be None for this tx - blob_hashes: Vec::new(), - max_fee_per_blob_gas: 0, - tx_type: 0, - authorization_list: Default::default(), - }, - rlp_bytes: Some(Default::default()), - // System transactions (similar to L1MessageTx) do not pay a rollup fee, - // so this field is not used; we just set it to the default value. - compression_ratio: Some(TX_L1_FEE_PRECISION_U256), - }; - - let mut gas_limit = tx.base.gas_limit; - let mut basefee = 0; - let mut disable_nonce_check = true; - - // ensure the block gas limit is >= the tx - core::mem::swap(&mut self.block.gas_limit, &mut gas_limit); - // disable the base fee check for this call by setting the base fee to zero - core::mem::swap(&mut self.block.basefee, &mut basefee); - // disable the nonce check - core::mem::swap(&mut self.cfg.disable_nonce_check, &mut disable_nonce_check); - - let mut res = self.transact(ScrollTransactionIntoTxEnv::from(tx)); - - // swap back to the previous gas limit - core::mem::swap(&mut self.block.gas_limit, &mut gas_limit); - // swap back to the previous base fee - core::mem::swap(&mut self.block.basefee, &mut basefee); - // swap back to the previous nonce check flag - core::mem::swap(&mut self.cfg.disable_nonce_check, &mut disable_nonce_check); - - // NOTE: We assume that only the contract storage is modified. Revm currently marks the - // caller and block beneficiary accounts as "touched" when we do the above transact calls, - // and includes them in the result. - // - // We're doing this state cleanup to make sure that changeset only includes the changed - // contract storage. - // Specifically prevents incorrect nonce increment for system contract caller. - if let Ok(res) = &mut res { - res.state.retain(|addr, _| *addr == contract); - } - - res + self.inner.transact_system_call_with_caller_finalize(caller, contract, data) } fn db_mut(&mut self) -> &mut Self::DB { @@ -237,6 +174,22 @@ where fn inspector_mut(&mut self) -> &mut Self::Inspector { &mut self.inner.0.inspector } + + fn components(&self) -> (&Self::DB, &Self::Inspector, &Self::Precompiles) { + ( + &self.inner.0.ctx.journaled_state.database, + &self.inner.0.inspector, + &self.inner.0.precompiles, + ) + } + + fn components_mut(&mut self) -> (&mut Self::DB, &mut Self::Inspector, &mut Self::Precompiles) { + ( + &mut self.inner.0.ctx.journaled_state.database, + &mut self.inner.0.inspector, + &mut self.inner.0.precompiles, + ) + } } /// Factory producing [`ScrollEvm`]s. diff --git a/crates/scroll/bin/scroll-reth/src/main.rs b/crates/scroll/bin/scroll-reth/src/main.rs index b7f49e80903..97daff973af 100644 --- a/crates/scroll/bin/scroll-reth/src/main.rs +++ b/crates/scroll/bin/scroll-reth/src/main.rs @@ -5,8 +5,8 @@ static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::ne fn main() { use clap::Parser; - use reth_scroll_cli::{Cli, ScrollChainSpecParser, ScrollRollupArgs}; - use reth_scroll_node::ScrollNode; + use reth_scroll_cli::{Cli, ScrollChainSpecParser}; + use reth_scroll_node::{ScrollNode, ScrollRollupArgs}; use tracing::info; reth_cli_util::sigsegv_handler::install(); @@ -16,10 +16,11 @@ fn main() { std::env::set_var("RUST_BACKTRACE", "1"); } - if let Err(err) = Cli::::parse() - .run::<_, _, ScrollNode>(|builder, _| async move { + if let Err(err) = + Cli::::parse().run(|builder, args| async move { info!(target: "reth::cli", "Launching node"); - let handle = builder.node(ScrollNode).launch_with_debug_capabilities().await?; + let handle = + builder.node(ScrollNode::new(args)).launch_with_debug_capabilities().await?; handle.node_exit_future.await }) { diff --git a/crates/scroll/cli/Cargo.toml b/crates/scroll/cli/Cargo.toml index 88e102d194e..d2dd4e51eea 100644 --- a/crates/scroll/cli/Cargo.toml +++ b/crates/scroll/cli/Cargo.toml @@ -25,7 +25,9 @@ reth-tracing.workspace = true # scroll reth-scroll-chainspec.workspace = true +reth-scroll-consensus.workspace = true reth-scroll-evm.workspace = true +reth-scroll-node.workspace = true reth-scroll-primitives = { workspace = true, features = ["reth-codec"] } scroll-alloy-consensus = { workspace = true, optional = true } diff --git a/crates/scroll/cli/src/app.rs b/crates/scroll/cli/src/app.rs new file mode 100644 index 00000000000..fc6caf33844 --- /dev/null +++ b/crates/scroll/cli/src/app.rs @@ -0,0 +1,115 @@ +use crate::{Cli, Commands}; +use eyre::{eyre, Result}; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::launcher::Launcher; +use reth_cli_runner::CliRunner; +use reth_node_metrics::recorder::install_prometheus_recorder; +use reth_scroll_chainspec::ScrollChainSpec; +use reth_scroll_consensus::ScrollBeaconConsensus; +use reth_scroll_evm::ScrollExecutorProvider; +use reth_scroll_node::ScrollNode; +use reth_tracing::{FileWorkerGuard, Layers}; +use std::{fmt, sync::Arc}; +use tracing::info; + +/// A wrapper around a parsed CLI that handles command execution. +#[derive(Debug)] +pub struct CliApp { + cli: Cli, + runner: Option, + layers: Option, + guard: Option, +} + +impl CliApp +where + C: ChainSpecParser, + Ext: clap::Args + fmt::Debug, +{ + pub(crate) fn new(cli: Cli) -> Self { + Self { cli, runner: None, layers: Some(Layers::new()), guard: None } + } + + /// Sets the runner for the CLI commander. + /// + /// This replaces any existing runner with the provided one. + pub fn set_runner(&mut self, runner: CliRunner) { + self.runner = Some(runner); + } + + /// Access to tracing layers. + /// + /// Returns a mutable reference to the tracing layers, or error + /// if tracing initialized and layers have detached already. + pub fn access_tracing_layers(&mut self) -> Result<&mut Layers> { + self.layers.as_mut().ok_or_else(|| eyre!("Tracing already initialized")) + } + + /// Execute the configured cli command. + /// + /// This accepts a closure that is used to launch the node via the + /// [`NodeCommand`](reth_cli_commands::node::NodeCommand). + pub fn run(mut self, launcher: impl Launcher) -> Result<()> { + let runner = match self.runner.take() { + Some(runner) => runner, + None => CliRunner::try_default_runtime()?, + }; + + // add network name to logs dir + // Add network name if available to the logs dir + if let Some(chain_spec) = self.cli.command.chain_spec() { + self.cli.logs.log_file_directory = + self.cli.logs.log_file_directory.join(chain_spec.chain.to_string()); + } + + self.init_tracing()?; + + // Install the prometheus recorder to be sure to record all metrics + let _ = install_prometheus_recorder(); + + let components = |spec: Arc| { + (ScrollExecutorProvider::scroll(spec.clone()), ScrollBeaconConsensus::new(spec)) + }; + + match self.cli.command { + Commands::Node(command) => { + runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) + } + Commands::Import(command) => { + runner.run_blocking_until_ctrl_c(command.execute::(components)) + } + Commands::Init(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::InitState(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Db(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::Stage(command) => runner + .run_command_until_exit(|ctx| command.execute::(ctx, components)), + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), + Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), + Commands::Recover(command) => { + runner.run_command_until_exit(|ctx| command.execute::(ctx)) + } + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + #[cfg(feature = "dev")] + Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), + } + } + + /// Initializes tracing with the configured options. + /// + /// If file logging is enabled, this function stores guard to the struct. + pub fn init_tracing(&mut self) -> Result<()> { + if self.guard.is_none() { + let layers = self.layers.take().unwrap_or_default(); + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; + info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); + } + Ok(()) + } +} diff --git a/crates/scroll/cli/src/args.rs b/crates/scroll/cli/src/args.rs deleted file mode 100644 index d51f83146a0..00000000000 --- a/crates/scroll/cli/src/args.rs +++ /dev/null @@ -1,3 +0,0 @@ -/// Rollup arguments for the Scroll node. -#[derive(Debug, clap::Args)] -pub struct ScrollRollupArgs; diff --git a/crates/scroll/cli/src/commands/mod.rs b/crates/scroll/cli/src/commands/mod.rs index 0690fe3a312..1bd4c500164 100644 --- a/crates/scroll/cli/src/commands/mod.rs +++ b/crates/scroll/cli/src/commands/mod.rs @@ -8,7 +8,8 @@ use reth_cli_commands::{ config_cmd, db, dump_genesis, import, init_cmd, init_state, node, node::NoArgs, p2p, prune, recover, stage, }; -use std::fmt; +use reth_scroll_chainspec::ScrollChainSpec; +use std::{fmt, sync::Arc}; /// Commands to be executed #[derive(Debug, Subcommand)] @@ -54,3 +55,26 @@ pub enum Commands< #[command(name = "test-vectors")] TestVectors(test_vectors::Command), } + +impl, Ext: clap::Args + fmt::Debug> + Commands +{ + /// Returns the underlying chain being used for commands + pub fn chain_spec(&self) -> Option<&Arc> { + match self { + Self::Node(cmd) => cmd.chain_spec(), + Self::Init(cmd) => cmd.chain_spec(), + Self::InitState(cmd) => cmd.chain_spec(), + Self::Import(cmd) => cmd.chain_spec(), + Self::DumpGenesis(cmd) => cmd.chain_spec(), + Self::Db(cmd) => cmd.chain_spec(), + Self::Stage(cmd) => cmd.chain_spec(), + Self::P2P(cmd) => cmd.chain_spec(), + Self::Config(_) => None, + Self::Recover(cmd) => cmd.chain_spec(), + Self::Prune(cmd) => cmd.chain_spec(), + #[cfg(feature = "dev")] + Self::TestVectors(_) => None, + } + } +} diff --git a/crates/scroll/cli/src/lib.rs b/crates/scroll/cli/src/lib.rs index 4313f44bcaf..e81f6f07481 100644 --- a/crates/scroll/cli/src/lib.rs +++ b/crates/scroll/cli/src/lib.rs @@ -1,6 +1,7 @@ //! Scroll CLI implementation. -mod args; -pub use args::ScrollRollupArgs; + +mod app; +pub use app::CliApp; mod commands; pub use commands::Commands; @@ -10,22 +11,16 @@ pub use spec::ScrollChainSpecParser; use clap::{value_parser, Parser}; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::{common::CliNodeTypes, launcher::FnLauncher, node::NoArgs}; +use reth_cli_commands::{launcher::FnLauncher, node::NoArgs}; use reth_cli_runner::CliRunner; -use reth_consensus::noop::NoopConsensus; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::LogArgs, version::{LONG_VERSION, SHORT_VERSION}, }; -use reth_node_metrics::recorder::install_prometheus_recorder; use reth_scroll_chainspec::ScrollChainSpec; -use reth_scroll_evm::ScrollExecutorProvider; -use reth_scroll_primitives::ScrollPrimitives; -use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; -use tracing::info; /// The main scroll cli interface. /// @@ -94,63 +89,36 @@ where C: ChainSpecParser, Ext: clap::Args + fmt::Debug, { + /// Configures the CLI and returns a [`CliApp`] instance. + /// + /// This method is used to prepare the CLI for execution by wrapping it in a + /// [`CliApp`] that can be further configured before running. + pub fn configure(self) -> CliApp { + CliApp::new(self) + } + /// Execute the configured cli command. /// /// This accepts a closure that is used to launch the node via the /// [`NodeCommand`](reth_cli_commands::node::NodeCommand). - pub fn run(mut self, launcher: L) -> eyre::Result<()> + pub fn run(self, launcher: L) -> eyre::Result<()> where L: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, Fut: Future>, - Types: CliNodeTypes, { - // add network name to logs dir - self.logs.log_file_directory = - self.logs.log_file_directory.join(self.chain.chain().to_string()); - - let _guard = self.init_tracing()?; - info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); - - // Install the prometheus recorder to be sure to record all metrics - let _ = install_prometheus_recorder(); - let components = |spec: Arc| { - (ScrollExecutorProvider::scroll(spec), NoopConsensus::default()) - }; - - let runner = CliRunner::try_default_runtime()?; - match self.command { - Commands::Node(command) => runner.run_command_until_exit(|ctx| { - command.execute(ctx, FnLauncher::new::(launcher)) - }), - Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute::()), - Commands::InitState(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) - } - Commands::Import(command) => { - runner.run_blocking_until_ctrl_c(command.execute::(components)) - } - Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), - Commands::Stage(command) => { - runner.run_command_until_exit(|ctx| command.execute::(ctx, components)) - } - Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), - Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), - Commands::Recover(command) => { - runner.run_command_until_exit(|ctx| command.execute::(ctx)) - } - Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), - #[cfg(feature = "dev")] - Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), - } + self.with_runner(CliRunner::try_default_runtime()?, launcher) } - /// Initializes tracing with the configured options. - /// - /// If file logging is enabled, this function returns a guard that must be kept alive to ensure - /// that all logs are flushed to disk. - pub fn init_tracing(&self) -> eyre::Result> { - let guard = self.logs.init_tracing()?; - Ok(guard) + /// Execute the configured cli command with the provided [`CliRunner`]. + pub fn with_runner(self, runner: CliRunner, launcher: L) -> eyre::Result<()> + where + L: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, + Fut: Future>, + { + let mut this = self.configure(); + this.set_runner(runner); + this.run(FnLauncher::new::(async move |builder, chain_spec| { + launcher(builder, chain_spec).await + })) } } diff --git a/crates/scroll/evm/src/config.rs b/crates/scroll/evm/src/config.rs index e9baba86dff..4f8adc3f105 100644 --- a/crates/scroll/evm/src/config.rs +++ b/crates/scroll/evm/src/config.rs @@ -155,8 +155,8 @@ mod tests { // prepare all fork heads let curie_head = &Head { number: 7096836, ..Default::default() }; - let bernouilli_head = &Head { number: 5220340, ..Default::default() }; - let pre_bernouilli_head = &Head { number: 0, ..Default::default() }; + let bernoulli_head = &Head { number: 5220340, ..Default::default() }; + let pre_bernoulli_head = &Head { number: 0, ..Default::default() }; // check correct spec id assert_eq!( @@ -164,14 +164,13 @@ mod tests { ScrollSpecId::CURIE ); assert_eq!( - config - .spec_id_at_timestamp_and_number(bernouilli_head.timestamp, bernouilli_head.number), + config.spec_id_at_timestamp_and_number(bernoulli_head.timestamp, bernoulli_head.number), ScrollSpecId::BERNOULLI ); assert_eq!( config.spec_id_at_timestamp_and_number( - pre_bernouilli_head.timestamp, - pre_bernouilli_head.number + pre_bernoulli_head.timestamp, + pre_bernoulli_head.number ), ScrollSpecId::SHANGHAI ); @@ -195,20 +194,20 @@ mod tests { assert_eq!(env.cfg_env.spec, ScrollSpecId::CURIE); // bernoulli - let bernouilli_header = Header { number: 5220340, ..Default::default() }; + let bernoulli_header = Header { number: 5220340, ..Default::default() }; // fill cfg env - let env = config.evm_env(&bernouilli_header); + let env = config.evm_env(&bernoulli_header); // check correct cfg env assert_eq!(env.cfg_env.chain_id, Scroll as u64); assert_eq!(env.cfg_env.spec, ScrollSpecId::BERNOULLI); // pre-bernoulli - let pre_bernouilli_header = Header { number: 0, ..Default::default() }; + let pre_bernoulli_header = Header { number: 0, ..Default::default() }; // fill cfg env - let env = config.evm_env(&pre_bernouilli_header); + let env = config.evm_env(&pre_bernoulli_header); // check correct cfg env assert_eq!(env.cfg_env.chain_id, Scroll as u64); diff --git a/crates/scroll/evm/src/execute.rs b/crates/scroll/evm/src/execute.rs index 8648e3d8453..75ad6c239b3 100644 --- a/crates/scroll/evm/src/execute.rs +++ b/crates/scroll/evm/src/execute.rs @@ -148,9 +148,9 @@ mod tests { ) } - fn transaction(typ: ScrollTxType, gas_limit: u64) -> ScrollTxEnvelope { + fn transaction(ty: ScrollTxType, gas_limit: u64) -> ScrollTxEnvelope { let pk = B256::random(); - match typ { + match ty { ScrollTxType::Legacy => { let tx = TxLegacy { to: TxKind::Call(Address::ZERO), @@ -407,16 +407,15 @@ mod tests { let oracle_bytecode = oracle.info.unwrap().code.unwrap(); let bytecode = Bytecode::new_raw(CURIE_L1_GAS_PRICE_ORACLE_BYTECODE); - // TODO: update when we bump to revm > v78 // Note: Eq operator fails due to the presence of `table_ptr` in the `JumpTable` struct // therefore we do a manual comparison. assert_eq!( - bytecode.legacy_jump_table().unwrap().len, - oracle_bytecode.legacy_jump_table().unwrap().len + bytecode.legacy_jump_table().unwrap().len(), + oracle_bytecode.legacy_jump_table().unwrap().len() ); assert_eq!( - bytecode.legacy_jump_table().unwrap().table, - oracle_bytecode.legacy_jump_table().unwrap().table + bytecode.legacy_jump_table().unwrap().as_slice(), + oracle_bytecode.legacy_jump_table().unwrap().as_slice() ); assert_eq!(bytecode.bytecode(), oracle_bytecode.bytecode()); diff --git a/crates/scroll/node/Cargo.toml b/crates/scroll/node/Cargo.toml index 58e2e4901c5..01c5794bb5c 100644 --- a/crates/scroll/node/Cargo.toml +++ b/crates/scroll/node/Cargo.toml @@ -59,9 +59,11 @@ reth-scroll-txpool.workspace = true scroll-alloy-consensus.workspace = true scroll-alloy-evm.workspace = true scroll-alloy-hardforks.workspace = true +scroll-alloy-network.workspace = true scroll-alloy-rpc-types-engine.workspace = true # misc +clap.workspace = true eyre.workspace = true serde_json = { workspace = true, optional = true } tracing.workspace = true diff --git a/crates/scroll/node/src/addons.rs b/crates/scroll/node/src/addons.rs index 7e4a75b3396..8d48748d123 100644 --- a/crates/scroll/node/src/addons.rs +++ b/crates/scroll/node/src/addons.rs @@ -1,10 +1,13 @@ -use crate::{ScrollEngineValidator, ScrollEngineValidatorBuilder, ScrollStorage}; +use crate::{ + builder::payload::SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT, ScrollEngineValidator, + ScrollEngineValidatorBuilder, ScrollStorage, +}; use reth_evm::{ConfigureEvm, EvmFactory, EvmFactoryFor}; use reth_node_api::{AddOnsContext, NodeAddOns}; use reth_node_builder::{ rpc::{ BasicEngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, EthApiBuilder, - RethRpcAddOns, RpcAddOns, RpcHandle, + Identity, RethRpcAddOns, RethRpcMiddleware, RpcAddOns, RpcHandle, }, FullNodeComponents, }; @@ -14,13 +17,15 @@ use reth_scroll_chainspec::ScrollChainSpec; use reth_scroll_engine_primitives::ScrollEngineTypes; use reth_scroll_evm::ScrollNextBlockEnvAttributes; use reth_scroll_primitives::ScrollPrimitives; -use reth_scroll_rpc::{eth::ScrollEthApiBuilder, ScrollEthApi, ScrollEthApiError}; +use reth_scroll_rpc::{eth::ScrollEthApiBuilder, ScrollEthApiError}; use revm::context::TxEnv; use scroll_alloy_evm::ScrollTransactionIntoTxEnv; +use scroll_alloy_network::Scroll; +use std::marker::PhantomData; /// Add-ons for the Scroll follower node. #[derive(Debug)] -pub struct ScrollAddOns +pub struct ScrollAddOns where N: FullNodeComponents, ScrollEthApiBuilder: EthApiBuilder, @@ -32,31 +37,32 @@ where ScrollEthApiBuilder, ScrollEngineValidatorBuilder, BasicEngineApiBuilder, + RpcMiddleWare, >, } -impl Default for ScrollAddOns +impl Default for ScrollAddOns where N: FullNodeComponents>, ScrollEthApiBuilder: EthApiBuilder, { fn default() -> Self { - Self::builder().build() + Self::builder::().build() } } -impl ScrollAddOns +impl ScrollAddOns where N: FullNodeComponents>, ScrollEthApiBuilder: EthApiBuilder, { /// Build a [`ScrollAddOns`] using [`ScrollAddOnsBuilder`]. - pub fn builder() -> ScrollAddOnsBuilder { + pub fn builder() -> ScrollAddOnsBuilder { ScrollAddOnsBuilder::default() } } -impl NodeAddOns for ScrollAddOns +impl NodeAddOns for ScrollAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -69,8 +75,9 @@ where >, ScrollEthApiError: FromEvmError, EvmFactoryFor: EvmFactory>, + RpcMiddleware: RethRpcMiddleware, { - type Handle = RpcHandle>; + type Handle = RpcHandle>::EthApi>; async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { let Self { rpc_add_ons } = self; @@ -78,7 +85,7 @@ where } } -impl RethRpcAddOns for ScrollAddOns +impl RethRpcAddOns for ScrollAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -91,15 +98,16 @@ where >, ScrollEthApiError: FromEvmError, EvmFactoryFor: EvmFactory>, + RpcMiddleware: RethRpcMiddleware, { - type EthApi = ScrollEthApi; + type EthApi = >::EthApi; fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks { self.rpc_add_ons.hooks_mut() } } -impl EngineValidatorAddOn for ScrollAddOns +impl EngineValidatorAddOn for ScrollAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -109,6 +117,7 @@ where >, >, ScrollEthApiBuilder: EthApiBuilder, + RpcMiddleware: Send, { type Validator = ScrollEngineValidator; @@ -118,23 +127,94 @@ where } /// A regular scroll evm and executor builder. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] #[non_exhaustive] -pub struct ScrollAddOnsBuilder {} +pub struct ScrollAddOnsBuilder { + /// Sequencer client, configured to forward submitted transactions to sequencer of given Scroll + /// network. + sequencer_url: Option, + /// Minimum suggested priority fee (tip) + min_suggested_priority_fee: u64, + /// Maximum payload size + payload_size_limit: u64, + /// Marker for network types. + _nt: PhantomData, + /// RPC middleware to use + rpc_middleware: RpcMiddleware, +} + +impl Default for ScrollAddOnsBuilder { + fn default() -> Self { + Self { + sequencer_url: None, + payload_size_limit: SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT, + // TODO (scroll): update with default values. + min_suggested_priority_fee: 1_000_000, + _nt: PhantomData, + rpc_middleware: Identity::new(), + } + } +} -impl ScrollAddOnsBuilder { +impl ScrollAddOnsBuilder { + /// With a [`reth_scroll_rpc::SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_client: Option) -> Self { + self.sequencer_url = sequencer_client; + self + } + + /// With minimum suggested priority fee. + pub const fn with_min_suggested_priority_fee( + mut self, + min_suggested_priority_fee: u64, + ) -> Self { + self.min_suggested_priority_fee = min_suggested_priority_fee; + self + } + + /// With maximum payload size limit. + pub const fn with_payload_size_limit(mut self, payload_size_limit: u64) -> Self { + self.payload_size_limit = payload_size_limit; + self + } + + /// Configure the RPC middleware to use + pub fn with_rpc_middleware(self, rpc_middleware: T) -> ScrollAddOnsBuilder { + let Self { sequencer_url, min_suggested_priority_fee, payload_size_limit, _nt, .. } = self; + ScrollAddOnsBuilder { + sequencer_url, + payload_size_limit, + min_suggested_priority_fee, + _nt, + rpc_middleware, + } + } +} + +impl ScrollAddOnsBuilder { /// Builds an instance of [`ScrollAddOns`]. - pub fn build(self) -> ScrollAddOns + pub fn build(self) -> ScrollAddOns where N: FullNodeComponents>, ScrollEthApiBuilder: EthApiBuilder, { + let Self { + sequencer_url, + payload_size_limit, + min_suggested_priority_fee, + rpc_middleware, + .. + } = self; + ScrollAddOns { rpc_add_ons: RpcAddOns::new( - ScrollEthApi::::builder(), - Default::default(), + ScrollEthApiBuilder::new() + .with_sequencer(sequencer_url) + .with_payload_size_limit(payload_size_limit) + .with_min_suggested_priority_fee(min_suggested_priority_fee), Default::default(), Default::default(), + rpc_middleware, ), } } diff --git a/crates/scroll/node/src/args.rs b/crates/scroll/node/src/args.rs new file mode 100644 index 00000000000..7fe563804e2 --- /dev/null +++ b/crates/scroll/node/src/args.rs @@ -0,0 +1,30 @@ +use crate::builder::payload::SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT; + +use reth_scroll_rpc::eth::DEFAULT_MIN_SUGGESTED_PRIORITY_FEE; + +/// Rollup arguments for the Scroll node. +#[derive(Debug, Clone, clap::Args)] +pub struct ScrollRollupArgs { + /// Endpoint for the sequencer mempool (can be both HTTP and WS) + #[arg(long = "scroll.sequencer")] + pub sequencer: Option, + + /// Minimum suggested priority fee (tip) in wei, default to + /// [`DEFAULT_MIN_SUGGESTED_PRIORITY_FEE`]. + #[arg(long = "scroll.min-suggested-priority-fee", default_value_t = DEFAULT_MIN_SUGGESTED_PRIORITY_FEE)] + pub min_suggested_priority_fee: u64, + + /// Payload size limit, default to `122kB`. + #[arg(long = "scroll.payload-size-limit", default_value_t = SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT)] + pub payload_size_limit: u64, +} + +impl Default for ScrollRollupArgs { + fn default() -> Self { + Self { + sequencer: None, + min_suggested_priority_fee: DEFAULT_MIN_SUGGESTED_PRIORITY_FEE, + payload_size_limit: SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT, + } + } +} diff --git a/crates/scroll/node/src/builder/engine.rs b/crates/scroll/node/src/builder/engine.rs index c91fe467d01..97085fdecfb 100644 --- a/crates/scroll/node/src/builder/engine.rs +++ b/crates/scroll/node/src/builder/engine.rs @@ -65,7 +65,7 @@ where fn validate_version_specific_fields( &self, _version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, ScrollPayloadAttributes>, + payload_or_attrs: PayloadOrAttributes<'_, Types::ExecutionData, ScrollPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { validate_scroll_payload_or_attributes( &payload_or_attrs, @@ -95,17 +95,6 @@ where Ok(()) } - - fn validate_payload_attributes_against_header( - &self, - attr: &::PayloadAttributes, - header: &::Header, - ) -> Result<(), InvalidPayloadAttributesError> { - if attr.timestamp() < header.timestamp() { - return Err(InvalidPayloadAttributesError::InvalidTimestamp); - } - Ok(()) - } } /// Validates the payload or attributes for Scroll. @@ -125,9 +114,11 @@ fn validate_scroll_payload_or_attributes( Ok(()) } -impl PayloadValidator for ScrollEngineValidator { +impl PayloadValidator for ScrollEngineValidator +where + Types: PayloadTypes, +{ type Block = ScrollBlock; - type ExecutionData = ExecutionData; fn ensure_well_formed_payload( &self, @@ -163,4 +154,15 @@ impl PayloadValidator for ScrollEngineValidator { Err(PayloadError::BlockHash { execution: block_hash_no_turn, consensus: expected_hash } .into()) } + + fn validate_payload_attributes_against_header( + &self, + attr: &Types::PayloadAttributes, + header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + if attr.timestamp() < header.timestamp() { + return Err(InvalidPayloadAttributesError::InvalidTimestamp); + } + Ok(()) + } } diff --git a/crates/scroll/node/src/builder/payload.rs b/crates/scroll/node/src/builder/payload.rs index 4ea061efebc..c4c2624d8d7 100644 --- a/crates/scroll/node/src/builder/payload.rs +++ b/crates/scroll/node/src/builder/payload.rs @@ -35,7 +35,7 @@ impl Default for ScrollPayloadBuilderBuilder { const SCROLL_GAS_LIMIT: u64 = 20_000_000; const SCROLL_PAYLOAD_BUILDING_DURATION: Duration = Duration::from_secs(1); -const SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT: u64 = 122_880; +pub(crate) const SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT: u64 = 122_880; impl ScrollPayloadBuilderBuilder { /// A helper method to initialize [`reth_scroll_payload::ScrollPayloadBuilder`] with the diff --git a/crates/scroll/node/src/lib.rs b/crates/scroll/node/src/lib.rs index 2d262086661..099a252bde4 100644 --- a/crates/scroll/node/src/lib.rs +++ b/crates/scroll/node/src/lib.rs @@ -1,4 +1,8 @@ //! Node specific implementations for Scroll. + +mod args; +pub use args::ScrollRollupArgs; + mod builder; pub use builder::{ consensus::ScrollConsensusBuilder, diff --git a/crates/scroll/node/src/node.rs b/crates/scroll/node/src/node.rs index 93f7a0901e7..17832c91290 100644 --- a/crates/scroll/node/src/node.rs +++ b/crates/scroll/node/src/node.rs @@ -1,10 +1,12 @@ //! Node specific implementations for Scroll. use crate::{ - ScrollAddOns, ScrollConsensusBuilder, ScrollExecutorBuilder, ScrollNetworkBuilder, - ScrollPayloadBuilderBuilder, ScrollPoolBuilder, ScrollStorage, + args::ScrollRollupArgs, ScrollAddOns, ScrollAddOnsBuilder, ScrollConsensusBuilder, + ScrollExecutorBuilder, ScrollNetworkBuilder, ScrollPayloadBuilderBuilder, ScrollPoolBuilder, + ScrollStorage, }; -use reth_node_api::FullNodeComponents; +use reth_engine_local::LocalPayloadAttributesBuilder; +use reth_node_api::{FullNodeComponents, PayloadAttributesBuilder, PayloadTypes}; use reth_node_builder::{ components::{BasicPayloadServiceBuilder, ComponentsBuilder}, node::{FullNodeTypes, NodeTypes}, @@ -14,12 +16,23 @@ use reth_scroll_chainspec::ScrollChainSpec; use reth_scroll_engine_primitives::ScrollEngineTypes; use reth_scroll_primitives::ScrollPrimitives; use reth_trie_db::MerklePatriciaTrie; +use scroll_alloy_network::Scroll; +use std::sync::Arc; /// The Scroll node implementation. #[derive(Clone, Debug, Default)] -pub struct ScrollNode; +#[non_exhaustive] +pub struct ScrollNode { + /// Additional Scroll args. + pub args: ScrollRollupArgs, +} impl ScrollNode { + /// Creates a new instance of the Scroll node type. + pub const fn new(args: ScrollRollupArgs) -> Self { + Self { args } + } + /// Returns a [`ComponentsBuilder`] configured for a regular Ethereum node. pub fn components() -> ComponentsBuilder< Node, @@ -71,7 +84,11 @@ where } fn add_ons(&self) -> Self::AddOns { - ScrollAddOns::default() + ScrollAddOnsBuilder::::default() + .with_sequencer(self.args.sequencer.clone()) + .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) + .with_payload_size_limit(self.args.payload_size_limit) + .build() } } @@ -84,6 +101,13 @@ where fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_node_api::BlockTy { rpc_block.into_consensus() } + + fn local_payload_attributes_builder( + chain_spec: &Self::ChainSpec, + ) -> impl PayloadAttributesBuilder<<::Payload as PayloadTypes>::PayloadAttributes> + { + LocalPayloadAttributesBuilder::new(Arc::new(chain_spec.clone())) + } } impl NodeTypes for ScrollNode { diff --git a/crates/scroll/openvm-compat/Cargo.lock b/crates/scroll/openvm-compat/Cargo.lock index c5267e11ad0..92cac3d6642 100644 --- a/crates/scroll/openvm-compat/Cargo.lock +++ b/crates/scroll/openvm-compat/Cargo.lock @@ -35,9 +35,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74a694d8be621ee12b45ae23e7f18393b9a1e04f1ba47a0136767cb8c955f7f8" +checksum = "1b6093bc69509849435a2d68237a2e9fea79d27390c8e62f1e4012c460aabad8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -60,9 +60,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1647d47f59288584cc3b40eff3e7dde6af8c88a2fca8fe02c22de7b9ab218ffa" +checksum = "8d1cfed4fefd13b5620cb81cdb6ba397866ff0de514c1b24806e6e79cdff5570" dependencies = [ "alloy-consensus", "alloy-eips", @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715ae25d525c567481ba2fc97000415624836d516958b9c3f189f1e267d1d90a" +checksum = "5937e2d544e9b71000942d875cbc57965b32859a666ea543cc57aae5a06d602d" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -132,9 +132,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.12.3" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff5aae4c6dc600734b206b175f3200085ee82dcdaa388760358830a984ca9869" +checksum = "822fc12d28a75059f87ef03939679e775c0655e83c98589500f7b9ec41d63e95" dependencies = [ "alloy-consensus", "alloy-eips", @@ -149,9 +149,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "696a83af273bfc512e02693bd4b5056c8c57898328bd0ce594013fb864de4dcf" +checksum = "c51b4c13e02a8104170a4de02ccf006d7c233e6c10ab290ee16e7041e6ac221d" dependencies = [ "alloy-eips", "alloy-primitives", @@ -176,9 +176,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35648c318b4649d2d141d1ed4f6e32c69f4959bdc2f6e44d53c0a333ed615a37" +checksum = "793df1e3457573877fbde8872e4906638fde565ee2d3bd16d04aad17d43dbf0e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -189,9 +189,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6177ed26655d4e84e00b65cb494d4e0b8830e7cae7ef5d63087d445a2600fb55" +checksum = "3cfebde8c581a5d37b678d0a48a32decb51efd7a63a08ce2517ddec26db705c8" dependencies = [ "alloy-rlp", "bytes", @@ -238,9 +238,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed717902ec7e7e5b737cf416f29c21f43a4e86db90ff6fddde199f4ed6ea1ac" +checksum = "f2f9cbf5f781b9ee39cfdddea078fdef6015424f4c8282ef0e5416d15ca352c4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -252,9 +252,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8300d59b0126876a1914102c588f9a4792eb4c754d483a954dc29904ddf79d6" +checksum = "46586ec3c278639fc0e129f0eb73dbfa3d57f683c44b2ff5e066fab7ba63fa1f" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -267,14 +267,15 @@ dependencies = [ "itertools 0.14.0", "serde", "serde_json", + "serde_with", "thiserror", ] [[package]] name = "alloy-serde" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8070bc2af2d48969e3aa709ea3ebf1f8316176b91c2132efe33d113f74383a9e" +checksum = "1e1722bc30feef87cc0fa824e43c9013f9639cc6c037be7be28a31361c788be2" dependencies = [ "alloy-primitives", "serde", @@ -283,9 +284,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a14f21d053aea4c6630687c2f4ad614bed4c81e14737a9b904798b24f30ea849" +checksum = "aedac07a10d4c2027817a43cc1f038313fc53c7ac866f7363239971fd01f9f18" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -297,9 +298,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d99282e7c9ef14eb62727981a985a01869e586d1dec729d3bb33679094c100" +checksum = "24f9a598f010f048d8b8226492b6401104f5a5c1273c2869b72af29b48bb4ba9" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -315,9 +316,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda029f955b78e493360ee1d7bd11e1ab9f2a220a5715449babc79d6d0a01105" +checksum = "f494adf9d60e49aa6ce26dfd42c7417aa6d4343cf2ae621f20e4d92a5ad07d85" dependencies = [ "const-hex", "dunce", @@ -331,9 +332,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58377025a47d8b8426b3e4846a251f2c1991033b27f517aade368146f6ab1dfe" +checksum = "a285b46e3e0c177887028278f04cc8262b76fd3b8e0e20e93cea0a58c35f5ac5" dependencies = [ "alloy-primitives", "alloy-sol-macro", @@ -357,9 +358,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "472e12600c46b766110edd8382b4804d70188870f064531ee8fd61a35ed18686" +checksum = "9f916ff6d52f219c44a9684aea764ce2c7e1d53bd4a724c9b127863aeacc30bb" dependencies = [ "alloy-primitives", "darling", @@ -643,6 +644,12 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + [[package]] name = "arrayvec" version = "0.7.6" @@ -821,9 +828,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.29" +version = "1.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c1599538de2394445747c8cf7935946e3cc27e9625f889d979bfb2aaf569362" +checksum = "c736e259eea577f443d5c86c304f9f4ae0295c43f3ba05c21f1d66b5f06001af" dependencies = [ "jobserver", "libc", @@ -1891,9 +1898,9 @@ dependencies = [ [[package]] name = "op-alloy-consensus" -version = "0.18.9" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8719d9b783b29cfa1cf8d591b894805786b9ab4940adc700a57fd0d5b721cf5" +checksum = "d3c719b26da6d9cac18c3a35634d6ab27a74a304ed9b403b43749c22e57a389f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2342,7 +2349,7 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reth-chainspec" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -2361,7 +2368,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2378,7 +2385,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.5.0" +version = "1.6.0" dependencies = [ "convert_case", "proc-macro2", @@ -2388,7 +2395,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -2397,7 +2404,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -2408,7 +2415,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2422,7 +2429,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2443,7 +2450,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2460,7 +2467,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-evm", "alloy-primitives", @@ -2472,7 +2479,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2487,7 +2494,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -2498,7 +2505,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "once_cell", @@ -2510,7 +2517,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2537,7 +2544,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "derive_more", @@ -2546,7 +2553,7 @@ dependencies = [ [[package]] name = "reth-scroll-chainspec" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -2554,6 +2561,7 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "alloy-serde", + "auto_impl", "derive_more", "once_cell", "reth-chainspec", @@ -2569,7 +2577,7 @@ dependencies = [ [[package]] name = "reth-scroll-evm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2584,6 +2592,7 @@ dependencies = [ "reth-scroll-chainspec", "reth-scroll-forks", "reth-scroll-primitives", + "reth-storage-api", "revm", "revm-primitives", "revm-scroll", @@ -2596,7 +2605,7 @@ dependencies = [ [[package]] name = "reth-scroll-forks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -2608,7 +2617,7 @@ dependencies = [ [[package]] name = "reth-scroll-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2624,7 +2633,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "reth-trie-common", @@ -2632,7 +2641,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "derive_more", @@ -2642,7 +2651,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2663,7 +2672,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -2678,7 +2687,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2699,7 +2708,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -2714,7 +2723,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -2729,15 +2738,15 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.5.0" +version = "1.6.0" dependencies = [ "zstd", ] [[package]] name = "revm" -version = "26.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "27.1.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-context", @@ -2754,8 +2763,8 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "6.1.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "bitvec", "once_cell", @@ -2766,8 +2775,8 @@ dependencies = [ [[package]] name = "revm-context" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "8.0.4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "cfg-if", "derive-where", @@ -2780,8 +2789,8 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "9.0.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -2794,8 +2803,8 @@ dependencies = [ [[package]] name = "revm-database" -version = "6.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "7.0.2" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-database-interface", @@ -2805,18 +2814,19 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "6.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "7.0.2" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", + "either", "revm-primitives", "revm-state", ] [[package]] name = "revm-handler" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "8.1.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "derive-where", @@ -2832,8 +2842,8 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "8.1.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "either", @@ -2847,8 +2857,8 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "22.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "24.0.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -2857,14 +2867,15 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "23.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "25.0.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "ark-bls12-381", "ark-bn254", "ark-ec", "ark-ff 0.5.0", "ark-serialize 0.5.0", + "arrayref", "aurora-engine-modexp", "cfg-if", "k256", @@ -2877,8 +2888,8 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "20.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "20.1.0" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-primitives", "num_enum", @@ -2888,7 +2899,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv78#c0609bc9e8cb23aba8f560a82e040a49726cf760" +source = "git+https://github.com/scroll-tech/scroll-revm#08e6281f419517527d77a66fae19075e8bfeac3e" dependencies = [ "auto_impl", "enumn", @@ -2900,8 +2911,8 @@ dependencies = [ [[package]] name = "revm-state" -version = "6.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "7.0.2" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "bitflags", "revm-bytecode", @@ -3064,7 +3075,7 @@ dependencies = [ [[package]] name = "scroll-alloy-consensus" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3079,7 +3090,7 @@ dependencies = [ [[package]] name = "scroll-alloy-evm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3094,7 +3105,7 @@ dependencies = [ [[package]] name = "scroll-alloy-hardforks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-hardforks", "auto_impl", @@ -3102,7 +3113,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3405,9 +3416,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ac494e7266fcdd2ad80bf4375d55d27a117ea5c866c26d0e97fe5b3caeeb75" +checksum = "a7a985ff4ffd7373e10e0fb048110fb11a162e5a4c47f92ddb8787a6f766b769" dependencies = [ "paste", "proc-macro2", diff --git a/crates/scroll/openvm-compat/Cargo.toml b/crates/scroll/openvm-compat/Cargo.toml index 5f136c1fca4..e244598819b 100644 --- a/crates/scroll/openvm-compat/Cargo.toml +++ b/crates/scroll/openvm-compat/Cargo.toml @@ -28,4 +28,4 @@ scroll-alloy-consensus = { path = "../alloy/consensus", default-features = false scroll-alloy-rpc-types = { path = "../alloy/rpc-types", default-features = false } [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" } +revm = { git = "https://github.com/scroll-tech/revm" } diff --git a/crates/scroll/rpc/Cargo.toml b/crates/scroll/rpc/Cargo.toml index 414953296ea..5bf30ccaa36 100644 --- a/crates/scroll/rpc/Cargo.toml +++ b/crates/scroll/rpc/Cargo.toml @@ -13,11 +13,11 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-evm.workspace = true -reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-provider.workspace = true -reth-rpc-eth-api.workspace = true +reth-rpc-eth-api = { workspace = true, features = ["scroll"] } reth-rpc-eth-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true @@ -25,15 +25,12 @@ reth-rpc.workspace = true reth-rpc-convert = { workspace = true, features = ["scroll"] } reth-node-api.workspace = true reth-node-builder.workspace = true -reth-network-api.workspace = true -reth-chainspec.workspace = true # scroll reth-scroll-chainspec.workspace = true reth-scroll-evm.workspace = true reth-scroll-primitives = { workspace = true, features = ["serde", "serde-bincode-compat", "reth-codec"] } scroll-alloy-consensus.workspace = true -scroll-alloy-evm.workspace = true scroll-alloy-hardforks.workspace = true scroll-alloy-network.workspace = true scroll-alloy-rpc-types.workspace = true @@ -56,7 +53,6 @@ reqwest = { workspace = true, default-features = false, features = ["rustls-tls- tracing.workspace = true # async -parking_lot.workspace = true tokio.workspace = true # rpc diff --git a/crates/scroll/rpc/src/eth/block.rs b/crates/scroll/rpc/src/eth/block.rs index 49bcd5b29e2..7dc8ec9951a 100644 --- a/crates/scroll/rpc/src/eth/block.rs +++ b/crates/scroll/rpc/src/eth/block.rs @@ -1,81 +1,26 @@ //! Loads and formats Scroll block RPC response. -use crate::{eth::ScrollNodeCore, ScrollEthApi, ScrollEthApiError, ScrollReceiptBuilder}; +use crate::{ScrollEthApi, ScrollEthApiError}; -use alloy_consensus::BlockHeader; -use alloy_rpc_types_eth::BlockId; -use reth_chainspec::ChainSpecProvider; -use reth_node_api::BlockBody; -use reth_primitives::TransactionMeta; -use reth_primitives_traits::SignedTransaction; -use reth_provider::{BlockReader, HeaderProvider, ProviderTx}; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - types::RpcTypes, - RpcReceipt, + helpers::{EthBlocks, LoadBlock}, + RpcNodeCore, }; -use reth_scroll_chainspec::ScrollChainSpec; -use reth_scroll_primitives::{ScrollReceipt, ScrollTransactionSigned}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use scroll_alloy_rpc_types::ScrollTransactionReceipt; +use reth_rpc_eth_types::error::FromEvmError; -impl EthBlocks for ScrollEthApi +impl EthBlocks for ScrollEthApi where - Self: LoadBlock< - Error = ScrollEthApiError, - NetworkTypes: RpcTypes, - Provider: BlockReader, - >, - N: ScrollNodeCore + HeaderProvider>, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { - async fn block_receipts( - &self, - block_id: BlockId, - ) -> Result>>, Self::Error> - where - Self: LoadReceipt, - { - if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { - let block_number = block.number(); - let base_fee = block.base_fee_per_gas(); - let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas(); - let timestamp = block.timestamp(); - - return block - .body() - .transactions() - .iter() - .zip(receipts.iter()) - .enumerate() - .map(|(idx, (tx, receipt))| -> Result<_, _> { - let meta = TransactionMeta { - tx_hash: *tx.tx_hash(), - index: idx as u64, - block_hash, - block_number, - base_fee, - excess_blob_gas, - timestamp, - }; - ScrollReceiptBuilder::new(tx, meta, receipt, &receipts) - .map(|builder| builder.build()) - }) - .collect::, Self::Error>>() - .map(Some) - } - - Ok(None) - } } -impl LoadBlock for ScrollEthApi +impl LoadBlock for ScrollEthApi where - Self: LoadPendingBlock< - Pool: TransactionPool< - Transaction: PoolTransaction>, - >, - > + SpawnBlocking, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { } diff --git a/crates/scroll/rpc/src/eth/call.rs b/crates/scroll/rpc/src/eth/call.rs index 2181ed0f01f..99249412db1 100644 --- a/crates/scroll/rpc/src/eth/call.rs +++ b/crates/scroll/rpc/src/eth/call.rs @@ -1,53 +1,36 @@ -use super::ScrollNodeCore; use crate::{ScrollEthApi, ScrollEthApiError}; -use alloy_rpc_types_eth::transaction::TransactionRequest; -use reth_evm::{block::BlockExecutorFactory, ConfigureEvm, EvmFactory, TxEnvFor}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{errors::ProviderError, ProviderHeader, ProviderTx}; +use reth_evm::TxEnvFor; use reth_rpc_eth_api::{ - helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking}, - FullEthApiTypes, RpcConvert, RpcTypes, + helpers::{estimate::EstimateCall, Call, EthCall}, + RpcConvert, RpcNodeCore, }; use reth_rpc_eth_types::error::FromEvmError; -use revm::context::TxEnv; -use scroll_alloy_evm::ScrollTransactionIntoTxEnv; -impl EthCall for ScrollEthApi +impl EthCall for ScrollEthApi where - Self: EstimateCall + LoadBlock + FullEthApiTypes, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: + RpcConvert>, { } -impl EstimateCall for ScrollEthApi +impl EstimateCall for ScrollEthApi where - Self: Call, - Self::Error: From, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: + RpcConvert>, { } -impl Call for ScrollEthApi +impl Call for ScrollEthApi where - Self: LoadState< - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - BlockExecutorFactory: BlockExecutorFactory< - EvmFactory: EvmFactory>, - >, - >, - RpcConvert: RpcConvert, Network = Self::NetworkTypes>, - NetworkTypes: RpcTypes>, - Error: FromEvmError - + From<::Error> - + From, - > + SpawnBlocking, - Self::Error: From, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: + RpcConvert>, { #[inline] fn call_gas_limit(&self) -> u64 { diff --git a/crates/scroll/rpc/src/eth/fee.rs b/crates/scroll/rpc/src/eth/fee.rs index 200559eaa3a..dd8803b114f 100644 --- a/crates/scroll/rpc/src/eth/fee.rs +++ b/crates/scroll/rpc/src/eth/fee.rs @@ -1,4 +1,5 @@ -use crate::{eth::ScrollNodeCore, ScrollEthApi}; +use crate::{ScrollEthApi, ScrollEthApiError}; + use alloy_consensus::BlockHeader; use alloy_eips::eip7840::BlobParams; use alloy_primitives::{Sealable, U256}; @@ -9,18 +10,21 @@ use reth_provider::{ BaseFeeProvider, BlockIdReader, ChainSpecProvider, HeaderProvider, ProviderHeader, StateProviderFactory, }; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{EthFees, LoadFee}, FromEthApiError, RpcNodeCore, RpcNodeCoreExt, }; -use reth_rpc_eth_types::{fee_history::calculate_reward_percentiles_for_block, EthApiError}; +use reth_rpc_eth_types::{ + error::FromEvmError, fee_history::calculate_reward_percentiles_for_block, EthApiError, +}; use reth_scroll_chainspec::{ChainConfig, ScrollChainConfig}; use reth_scroll_evm::ScrollBaseFeeProvider; use scroll_alloy_hardforks::ScrollHardforks; use std::future::Future; use tracing::debug; -impl EthFees for ScrollEthApi +impl EthFees for ScrollEthApi where Self: LoadFee< Provider: StateProviderFactory @@ -30,7 +34,9 @@ where + ChainConfig, >, >, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { #[allow(clippy::manual_async_fn)] fn fee_history( diff --git a/crates/scroll/rpc/src/eth/mod.rs b/crates/scroll/rpc/src/eth/mod.rs index 4fe7198a5b3..6deb09f7dc3 100644 --- a/crates/scroll/rpc/src/eth/mod.rs +++ b/crates/scroll/rpc/src/eth/mod.rs @@ -1,40 +1,33 @@ //! Scroll-Reth `eth_` endpoint implementation. +use crate::{ + eth::{receipt::ScrollReceiptConverter, transaction::ScrollTxInfoMapper}, + ScrollEthApiError, SequencerClient, +}; use alloy_primitives::U256; use eyre::WrapErr; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +pub use receipt::ScrollReceiptBuilder; use reth_evm::ConfigureEvm; -use reth_network_api::NetworkInfo; -use reth_node_api::FullNodeComponents; -use reth_provider::{ - BlockNumReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, - ProviderBlock, ProviderHeader, ProviderReceipt, ProviderTx, StageCheckpointReader, - StateProviderFactory, -}; +use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy}; +use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; +use reth_provider::{BlockReader, ProviderHeader, ProviderTx}; use reth_rpc::eth::{core::EthApiInner, DevSigner}; +use reth_rpc_convert::{RpcConvert, RpcConverter, RpcTypes, SignableTxRequest}; use reth_rpc_eth_api::{ helpers::{ - AddDevSigners, EthApiSpec, EthSigner, EthState, LoadBlock, LoadFee, LoadState, - SpawnBlocking, Trace, + pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthState, + LoadFee, LoadState, SpawnBlocking, Trace, }, - EthApiTypes, FullEthApiServer, RpcConverter, RpcNodeCore, RpcNodeCoreExt, + EthApiTypes, FullEthApiServer, RpcNodeCore, RpcNodeCoreExt, }; -use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; +use reth_rpc_eth_types::{error::FromEvmError, EthStateCache, FeeHistoryCache, GasPriceOracle}; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, }; -use reth_transaction_pool::TransactionPool; +use scroll_alloy_network::Scroll; use std::{fmt, marker::PhantomData, sync::Arc}; -use crate::{eth::transaction::ScrollTxInfoMapper, ScrollEthApiError}; -pub use receipt::ScrollReceiptBuilder; -use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; -use reth_primitives_traits::NodePrimitives; -use reth_rpc_eth_types::error::FromEvmError; -use reth_scroll_primitives::ScrollPrimitives; -use scroll_alloy_network::{Network, Scroll}; - mod block; mod call; mod fee; @@ -42,15 +35,8 @@ mod pending_block; pub mod receipt; pub mod transaction; -use crate::SequencerClient; - /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. -pub type EthApiNodeBackend = EthApiInner< - ::Provider, - ::Pool, - ::Network, - ::Evm, ->; +pub type EthApiNodeBackend = EthApiInner; /// A helper trait with requirements for [`RpcNodeCore`] to be used in [`ScrollEthApi`]. pub trait ScrollNodeCore: RpcNodeCore {} @@ -67,21 +53,18 @@ impl ScrollNodeCore for T where T: RpcNodeCore {} /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. #[derive(Clone)] -pub struct ScrollEthApi { +pub struct ScrollEthApi { /// Gateway to node's core components. - inner: Arc>, - /// Marker for the network types. - _nt: PhantomData, - tx_resp_builder: RpcConverter>, + inner: Arc>, } -impl ScrollEthApi { +impl ScrollEthApi { /// Creates a new [`ScrollEthApi`]. pub fn new( - eth_api: EthApiNodeBackend, + eth_api: EthApiNodeBackend, + sequencer_client: Option, min_suggested_priority_fee: U256, payload_size_limit: u64, - sequencer_client: Option, ) -> Self { let inner = Arc::new(ScrollEthApiInner { eth_api, @@ -89,26 +72,17 @@ impl ScrollEthApi { payload_size_limit, sequencer_client, }); - Self { - inner: inner.clone(), - _nt: PhantomData, - tx_resp_builder: RpcConverter::with_mapper(ScrollTxInfoMapper::new(inner)), - } + Self { inner } } } -impl ScrollEthApi +impl ScrollEthApi where - N: ScrollNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + CanonStateSubscriptions - + Clone - + 'static, - >, + N: RpcNodeCore, + Rpc: RpcConvert, { /// Returns a reference to the [`EthApiNodeBackend`]. - pub fn eth_api(&self) -> &EthApiNodeBackend { + pub fn eth_api(&self) -> &EthApiNodeBackend { self.inner.eth_api() } @@ -123,34 +97,30 @@ where } } -impl EthApiTypes for ScrollEthApi +impl EthApiTypes for ScrollEthApi where - Self: Send + Sync + fmt::Debug, - N: ScrollNodeCore, - NetworkT: Network + Clone + fmt::Debug, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { type Error = ScrollEthApiError; - type NetworkTypes = Scroll; - type RpcConvert = RpcConverter>; + type NetworkTypes = Rpc::Network; + type RpcConvert = Rpc; fn tx_resp_builder(&self) -> &Self::RpcConvert { - &self.tx_resp_builder + self.inner.eth_api.tx_resp_builder() } } -impl RpcNodeCore for ScrollEthApi +impl RpcNodeCore for ScrollEthApi where - N: ScrollNodeCore, - NetworkT: Network, + N: RpcNodeCore, + Rpc: RpcConvert, { type Primitives = N::Primitives; type Provider = N::Provider; type Pool = N::Pool; type Evm = ::Evm; type Network = ::Network; - type PayloadBuilder = (); #[inline] fn pool(&self) -> &Self::Pool { @@ -167,39 +137,30 @@ where self.inner.eth_api.network() } - #[inline] - fn payload_builder(&self) -> &Self::PayloadBuilder { - &() - } - #[inline] fn provider(&self) -> &Self::Provider { self.inner.eth_api.provider() } } -impl RpcNodeCoreExt for ScrollEthApi +impl RpcNodeCoreExt for ScrollEthApi where - N: ScrollNodeCore, - NetworkT: Network, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] - fn cache(&self) -> &EthStateCache, ProviderReceipt> { + fn cache(&self) -> &EthStateCache { self.inner.eth_api.cache() } } -impl EthApiSpec for ScrollEthApi +impl EthApiSpec for ScrollEthApi where - N: ScrollNodeCore< - Provider: ChainSpecProvider - + BlockNumReader - + StageCheckpointReader, - Network: NetworkInfo, - >, - NetworkT: Network, + N: RpcNodeCore, + Rpc: RpcConvert, { type Transaction = ProviderTx; + type Rpc = Rpc::Network; #[inline] fn starting_block(&self) -> U256 { @@ -207,18 +168,15 @@ where } #[inline] - fn signers(&self) -> &parking_lot::RwLock>>>> { + fn signers(&self) -> &SignersForApi { self.inner.eth_api.signers() } } -impl SpawnBlocking for ScrollEthApi +impl SpawnBlocking for ScrollEthApi where - Self: Send + Sync + Clone + 'static, - N: ScrollNodeCore, - NetworkT: Network, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -236,14 +194,11 @@ where } } -impl LoadFee for ScrollEthApi +impl LoadFee for ScrollEthApi where - Self: LoadBlock, - N: ScrollNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, - >, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { @@ -266,22 +221,17 @@ where } } -impl LoadState for ScrollEthApi +impl LoadState for ScrollEthApi where - N: ScrollNodeCore< - Provider: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, - >, - NetworkT: Network, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { } -impl EthState for ScrollEthApi +impl EthState for ScrollEthApi where - Self: LoadState + SpawnBlocking, - N: ScrollNodeCore, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] fn max_proof_window(&self) -> u64 { @@ -289,32 +239,27 @@ where } } -impl Trace for ScrollEthApi +impl Trace for ScrollEthApi where - Self: RpcNodeCore - + LoadState< - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, - Error: FromEvmError, - >, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { } -impl AddDevSigners for ScrollEthApi +impl AddDevSigners for ScrollEthApi where - N: ScrollNodeCore, + N: RpcNodeCore, + Rpc: RpcConvert< + Network: RpcTypes>>, + >, { fn with_dev_accounts(&self) { *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) } } -impl fmt::Debug for ScrollEthApi { +impl fmt::Debug for ScrollEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ScrollEthApi").finish_non_exhaustive() } @@ -322,21 +267,21 @@ impl fmt::Debug for ScrollEthApi { /// Container type `ScrollEthApi` #[allow(missing_debug_implementations)] -pub struct ScrollEthApiInner { +pub struct ScrollEthApiInner { /// Gateway to node's core components. - pub eth_api: EthApiNodeBackend, + pub eth_api: EthApiNodeBackend, + /// Sequencer client, configured to forward submitted transactions to sequencer of given Scroll + /// network. + sequencer_client: Option, /// Minimum priority fee min_suggested_priority_fee: U256, /// Maximum payload size payload_size_limit: u64, - /// Sequencer client, configured to forward submitted transactions to sequencer of given Scroll - /// network. - sequencer_client: Option, } -impl ScrollEthApiInner { +impl ScrollEthApiInner { /// Returns a reference to the [`EthApiNodeBackend`]. - const fn eth_api(&self) -> &EthApiNodeBackend { + const fn eth_api(&self) -> &EthApiNodeBackend { &self.eth_api } @@ -346,40 +291,58 @@ impl ScrollEthApiInner { } } +/// Converter for Scroll RPC types. +pub type ScrollRpcConvert = RpcConverter< + NetworkT, + ::Evm, + ScrollReceiptConverter, + (), + ScrollTxInfoMapper<::Provider>, +>; + /// The default suggested priority fee for the gas price oracle. -const DEFAULT_MIN_SUGGESTED_PRIORITY_FEE: u64 = 100; +pub const DEFAULT_MIN_SUGGESTED_PRIORITY_FEE: u64 = 100; /// The default payload size limit in bytes for the sequencer. -const DEFAULT_PAYLOAD_SIZE_LIMIT: u64 = 122_880; +pub const DEFAULT_PAYLOAD_SIZE_LIMIT: u64 = 122_880; /// A type that knows how to build a [`ScrollEthApi`]. #[derive(Debug)] -pub struct ScrollEthApiBuilder { +pub struct ScrollEthApiBuilder { + /// Sequencer client, configured to forward submitted transactions to sequencer of given Scroll + /// network. + sequencer_url: Option, /// Minimum suggested priority fee (tip) min_suggested_priority_fee: u64, /// Maximum payload size payload_size_limit: u64, - /// Sequencer client, configured to forward submitted transactions to sequencer of given Scroll - /// network. - sequencer_url: Option, + /// Marker for network types. + _nt: PhantomData, } -impl Default for ScrollEthApiBuilder { +impl Default for ScrollEthApiBuilder { fn default() -> Self { Self { + sequencer_url: None, min_suggested_priority_fee: DEFAULT_MIN_SUGGESTED_PRIORITY_FEE, payload_size_limit: DEFAULT_PAYLOAD_SIZE_LIMIT, - sequencer_url: None, + _nt: PhantomData, } } } -impl ScrollEthApiBuilder { +impl ScrollEthApiBuilder { /// Creates a [`ScrollEthApiBuilder`] instance. pub fn new() -> Self { Self::default() } + /// With a [`SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_url: Option) -> Self { + self.sequencer_url = sequencer_url; + self + } + /// With minimum suggested priority fee (tip) pub const fn with_min_suggested_priority_fee(mut self, min: u64) -> Self { self.min_suggested_priority_fee = min; @@ -391,38 +354,24 @@ impl ScrollEthApiBuilder { self.payload_size_limit = limit; self } - - /// With a [`SequencerClient`]. - pub fn with_sequencer(mut self, sequencer_url: Option) -> Self { - self.sequencer_url = sequencer_url; - self - } } -impl EthApiBuilder for ScrollEthApiBuilder +impl EthApiBuilder for ScrollEthApiBuilder where - N: FullNodeComponents, - ScrollEthApi: FullEthApiServer, + N: FullNodeComponents>>>, + NetworkT: RpcTypes, + ScrollRpcConvert: RpcConvert, + ScrollEthApi>: + FullEthApiServer + AddDevSigners, { - type EthApi = ScrollEthApi; + type EthApi = ScrollEthApi>; async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { - let Self { min_suggested_priority_fee, payload_size_limit, sequencer_url } = self; - - let eth_api = reth_rpc::EthApiBuilder::new( - ctx.components.provider().clone(), - ctx.components.pool().clone(), - ctx.components.network().clone(), - ctx.components.evm_config().clone(), - ) - .eth_cache(ctx.cache) - .task_spawner(ctx.components.task_executor().clone()) - .gas_cap(ctx.config.rpc_gas_cap.into()) - .max_simulate_blocks(ctx.config.rpc_max_simulate_blocks) - .eth_proof_window(ctx.config.eth_proof_window) - .fee_history_cache_config(ctx.config.fee_history_cache) - .proof_permits(ctx.config.proof_permits) - .build_inner(); + let Self { min_suggested_priority_fee, payload_size_limit, sequencer_url, .. } = self; + let rpc_converter = RpcConverter::new(ScrollReceiptConverter::default()) + .with_mapper(ScrollTxInfoMapper::new(ctx.components.provider().clone())); + + let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner(); let sequencer_client = if let Some(url) = sequencer_url { Some( @@ -436,9 +385,9 @@ where Ok(ScrollEthApi::new( eth_api, + sequencer_client, U256::from(min_suggested_priority_fee), payload_size_limit, - sequencer_client, )) } } diff --git a/crates/scroll/rpc/src/eth/pending_block.rs b/crates/scroll/rpc/src/eth/pending_block.rs index 7b0323288ea..21075c77f87 100644 --- a/crates/scroll/rpc/src/eth/pending_block.rs +++ b/crates/scroll/rpc/src/eth/pending_block.rs @@ -1,75 +1,25 @@ //! Loads Scroll pending block for an RPC response. -use crate::ScrollEthApi; - -use alloy_consensus::{BlockHeader, Header}; -use reth_chainspec::EthChainSpec; -use reth_evm::ConfigureEvm; -use reth_primitives_traits::{NodePrimitives, SealedHeader}; -use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, ProviderBlock, ProviderHeader, ProviderReceipt, - ProviderTx, StateProviderFactory, -}; +use crate::{ScrollEthApi, ScrollEthApiError}; use reth_rpc_eth_api::{ - helpers::{LoadPendingBlock, SpawnBlocking}, - types::RpcTypes, - EthApiTypes, RpcConvert, RpcNodeCore, + helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock}, + RpcConvert, RpcNodeCore, }; use reth_rpc_eth_types::{error::FromEvmError, PendingBlock}; -use reth_scroll_evm::ScrollNextBlockEnvAttributes; -use reth_scroll_primitives::{ScrollBlock, ScrollReceipt, ScrollTransactionSigned}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use scroll_alloy_hardforks::ScrollHardforks; -impl LoadPendingBlock for ScrollEthApi +impl LoadPendingBlock for ScrollEthApi where - Self: SpawnBlocking - + EthApiTypes< - NetworkTypes: RpcTypes< - Header = alloy_rpc_types_eth::Header>, - >, - Error: FromEvmError, - RpcConvert: RpcConvert, - >, - N: RpcNodeCore< - Provider: BlockReaderIdExt< - Transaction = ScrollTransactionSigned, - Block = ScrollBlock, - Receipt = ScrollReceipt, - Header = Header, - > + ChainSpecProvider - + StateProviderFactory, - Pool: TransactionPool>>, - Evm: ConfigureEvm< - Primitives = ::Primitives, - NextBlockEnvCtx = ScrollNextBlockEnvAttributes, - >, - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, - >, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] - fn pending_block( - &self, - ) -> &tokio::sync::Mutex< - Option, ProviderReceipt>>, - > { + fn pending_block(&self) -> &tokio::sync::Mutex>> { self.inner.eth_api.pending_block() } - fn next_env_attributes( - &self, - parent: &SealedHeader>, - ) -> Result<::NextBlockEnvCtx, Self::Error> { - Ok(ScrollNextBlockEnvAttributes { - timestamp: parent.timestamp().saturating_add(3), - suggested_fee_recipient: parent.beneficiary(), - gas_limit: parent.gas_limit(), - base_fee: parent.base_fee_per_gas().unwrap_or_default(), - }) + #[inline] + fn pending_env_builder(&self) -> &dyn PendingEnvBuilder { + self.inner.eth_api.pending_env_builder() } } diff --git a/crates/scroll/rpc/src/eth/receipt.rs b/crates/scroll/rpc/src/eth/receipt.rs index 254fb1f546b..16d5ac93d60 100644 --- a/crates/scroll/rpc/src/eth/receipt.rs +++ b/crates/scroll/rpc/src/eth/receipt.rs @@ -2,42 +2,48 @@ use crate::{ScrollEthApi, ScrollEthApiError}; use alloy_rpc_types_eth::{Log, TransactionReceipt}; -use reth_node_api::{FullNodeComponents, NodeTypes}; -use reth_primitives::TransactionMeta; -use reth_provider::{ReceiptProvider, TransactionsProvider}; -use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; -use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; - -use reth_scroll_chainspec::ScrollChainSpec; +use reth_primitives_traits::NodePrimitives; +use reth_rpc_convert::{ + transaction::{ConvertReceiptInput, ReceiptConverter}, + RpcConvert, +}; +use reth_rpc_eth_api::{helpers::LoadReceipt, RpcNodeCore}; +use reth_rpc_eth_types::receipt::build_receipt; use reth_scroll_primitives::{ScrollReceipt, ScrollTransactionSigned}; use scroll_alloy_consensus::ScrollReceiptEnvelope; use scroll_alloy_rpc_types::{ScrollTransactionReceipt, ScrollTransactionReceiptFields}; +use std::fmt::Debug; + +impl LoadReceipt for ScrollEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, +{ +} + +/// Converter for Scroll receipts. +#[derive(Debug, Default, Clone)] +#[non_exhaustive] +pub struct ScrollReceiptConverter; -impl LoadReceipt for ScrollEthApi +impl ReceiptConverter for ScrollReceiptConverter where - Self: Send + Sync, - N: FullNodeComponents>, - Self::Provider: TransactionsProvider - + ReceiptProvider, + N: NodePrimitives, { - async fn build_transaction_receipt( + type RpcReceipt = ScrollTransactionReceipt; + type Error = ScrollEthApiError; + + fn convert_receipts( &self, - tx: ScrollTransactionSigned, - meta: TransactionMeta, - receipt: ScrollReceipt, - ) -> Result, Self::Error> { - let all_receipts = self - .inner - .eth_api - .cache() - .get_receipts(meta.block_hash) - .await - .map_err(Self::Error::from_eth_err)? - .ok_or(Self::Error::from_eth_err(EthApiError::HeaderNotFound( - meta.block_hash.into(), - )))?; + inputs: Vec>, + ) -> Result, Self::Error> { + let mut receipts = Vec::with_capacity(inputs.len()); + + for input in inputs { + receipts.push(ScrollReceiptBuilder::new(input)?.build()); + } - Ok(ScrollReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) + Ok(receipts) } } @@ -52,35 +58,31 @@ pub struct ScrollReceiptBuilder { impl ScrollReceiptBuilder { /// Returns a new builder. - pub fn new( - transaction: &ScrollTransactionSigned, - meta: TransactionMeta, - receipt: &ScrollReceipt, - all_receipts: &[ScrollReceipt], - ) -> Result { + pub fn new(input: ConvertReceiptInput<'_, N>) -> Result + where + N: NodePrimitives, + { let core_receipt = - build_receipt(transaction, meta, receipt, all_receipts, None, |receipt_with_bloom| { - match receipt { - ScrollReceipt::Legacy(_) => { - ScrollReceiptEnvelope::::Legacy(receipt_with_bloom) - } - ScrollReceipt::Eip2930(_) => { - ScrollReceiptEnvelope::::Eip2930(receipt_with_bloom) - } - ScrollReceipt::Eip1559(_) => { - ScrollReceiptEnvelope::::Eip1559(receipt_with_bloom) - } - ScrollReceipt::Eip7702(_) => { - ScrollReceiptEnvelope::::Eip7702(receipt_with_bloom) - } - ScrollReceipt::L1Message(_) => { - ScrollReceiptEnvelope::::L1Message(receipt_with_bloom) - } + build_receipt(&input, None, |receipt_with_bloom| match input.receipt.as_ref() { + ScrollReceipt::Legacy(_) => { + ScrollReceiptEnvelope::::Legacy(receipt_with_bloom) + } + ScrollReceipt::Eip2930(_) => { + ScrollReceiptEnvelope::::Eip2930(receipt_with_bloom) + } + ScrollReceipt::Eip1559(_) => { + ScrollReceiptEnvelope::::Eip1559(receipt_with_bloom) + } + ScrollReceipt::Eip7702(_) => { + ScrollReceiptEnvelope::::Eip7702(receipt_with_bloom) + } + ScrollReceipt::L1Message(_) => { + ScrollReceiptEnvelope::::L1Message(receipt_with_bloom) } - })?; + }); let scroll_receipt_fields = - ScrollTransactionReceiptFields { l1_fee: Some(receipt.l1_fee().saturating_to()) }; + ScrollTransactionReceiptFields { l1_fee: Some(input.receipt.l1_fee().saturating_to()) }; Ok(Self { core_receipt, scroll_receipt_fields }) } diff --git a/crates/scroll/rpc/src/eth/transaction.rs b/crates/scroll/rpc/src/eth/transaction.rs index 7a749f891da..1364b1ee642 100644 --- a/crates/scroll/rpc/src/eth/transaction.rs +++ b/crates/scroll/rpc/src/eth/transaction.rs @@ -1,36 +1,29 @@ //! Loads and formats Scroll transaction RPC response. -use crate::{ - eth::{ScrollEthApiInner, ScrollNodeCore}, - ScrollEthApi, ScrollEthApiError, SequencerClient, -}; +use crate::{ScrollEthApi, ScrollEthApiError, SequencerClient}; use alloy_consensus::transaction::TransactionInfo; use alloy_primitives::{Bytes, B256}; use reth_evm::execute::ProviderError; -use reth_node_api::FullNodeComponents; -use reth_provider::{ - BlockReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, TransactionsProvider, -}; +use reth_provider::ReceiptProvider; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - try_into_scroll_tx_info, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcNodeCore, - RpcNodeCoreExt, TxInfoMapper, + helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, + try_into_scroll_tx_info, FromEthApiError, RpcNodeCore, TxInfoMapper, }; use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_scroll_primitives::ScrollReceipt; -use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -use scroll_alloy_consensus::{ScrollTransactionInfo, ScrollTxEnvelope}; -use std::{ - fmt::{Debug, Formatter}, - sync::Arc, +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, }; +use scroll_alloy_consensus::{ScrollTransactionInfo, ScrollTxEnvelope}; +use std::fmt::{Debug, Formatter}; -impl EthTransactions for ScrollEthApi +impl EthTransactions for ScrollEthApi where - Self: LoadTransaction + EthApiTypes, - N: ScrollNodeCore>>, + N: RpcNodeCore, + Rpc: RpcConvert, { - fn signers(&self) -> &parking_lot::RwLock>>>> { + fn signers(&self) -> &SignersForRpc { self.inner.eth_api.signers() } @@ -47,7 +40,7 @@ where tracing::debug!(target: "scroll::rpc::eth", hash = %pool_transaction.hash(), "forwarding raw transaction to sequencer"); // Retain tx in local tx pool before forwarding to sequencer rpc, for local RPC usage. - let hash = self + let AddedTransactionOutcome { hash, .. } = self .pool() .add_transaction(TransactionOrigin::Local, pool_transaction.clone()) .await @@ -69,7 +62,7 @@ where } // submit the transaction to the pool with a `Local` origin - let hash = self + let AddedTransactionOutcome { hash, .. } = self .pool() .add_transaction(TransactionOrigin::Local, pool_transaction) .await @@ -79,17 +72,17 @@ where } } -impl LoadTransaction for ScrollEthApi +impl LoadTransaction for ScrollEthApi where - Self: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt, - N: ScrollNodeCore, - Self::Pool: TransactionPool, + N: RpcNodeCore, + Rpc: RpcConvert, { } -impl ScrollEthApi +impl ScrollEthApi where - N: ScrollNodeCore, + N: RpcNodeCore, + Rpc: RpcConvert, { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { @@ -100,26 +93,30 @@ where /// Scroll implementation of [`TxInfoMapper`]. /// /// Receipt is fetched to extract the `l1_fee` for all transactions but L1 messages. -#[derive(Clone)] -pub struct ScrollTxInfoMapper(Arc>); +pub struct ScrollTxInfoMapper(Provider); + +impl Clone for ScrollTxInfoMapper { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} -impl Debug for ScrollTxInfoMapper { +impl Debug for ScrollTxInfoMapper { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("ScrollTxInfoMapper").finish() } } -impl ScrollTxInfoMapper { +impl ScrollTxInfoMapper { /// Creates [`ScrollTxInfoMapper`] that uses [`ReceiptProvider`] borrowed from given `eth_api`. - pub const fn new(eth_api: Arc>) -> Self { - Self(eth_api) + pub const fn new(provider: Provider) -> Self { + Self(provider) } } -impl TxInfoMapper<&ScrollTxEnvelope> for ScrollTxInfoMapper +impl TxInfoMapper<&ScrollTxEnvelope> for ScrollTxInfoMapper where - N: FullNodeComponents, - N::Provider: ReceiptProvider, + Provider: ReceiptProvider, { type Out = ScrollTransactionInfo; type Err = ProviderError; @@ -129,6 +126,6 @@ where tx: &ScrollTxEnvelope, tx_info: TransactionInfo, ) -> Result { - try_into_scroll_tx_info(self.0.eth_api.provider(), tx, tx_info) + try_into_scroll_tx_info(&self.0, tx, tx_info) } } diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index 6d230b34731..c8eb81289d0 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -44,6 +44,7 @@ auto_impl.workspace = true [dev-dependencies] assert_matches.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } +tokio = { workspace = true, features = ["sync", "rt-multi-thread"] } tokio-stream.workspace = true reth-testing-utils.workspace = true diff --git a/crates/stages/api/src/metrics/listener.rs b/crates/stages/api/src/metrics/listener.rs index aba001a92f1..8c0707d1bea 100644 --- a/crates/stages/api/src/metrics/listener.rs +++ b/crates/stages/api/src/metrics/listener.rs @@ -4,6 +4,7 @@ use std::{ future::Future, pin::Pin, task::{ready, Context, Poll}, + time::Duration, }; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; use tracing::trace; @@ -28,6 +29,8 @@ pub enum MetricEvent { /// Maximum known block number reachable by this stage. /// If specified, `entities_total` metric is updated. max_block_number: Option, + /// The duration of stage iteration including database commit. + elapsed: Duration, }, } @@ -57,12 +60,14 @@ impl MetricsListener { stage_checkpoint: None, }, max_block_number: Some(height), + elapsed: Duration::default(), }); } } - MetricEvent::StageCheckpoint { stage_id, checkpoint, max_block_number } => { + MetricEvent::StageCheckpoint { stage_id, checkpoint, max_block_number, elapsed } => { let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id); + stage_metrics.total_elapsed.increment(elapsed.as_secs_f64()); stage_metrics.checkpoint.set(checkpoint.block_number as f64); let (processed, total) = match checkpoint.entities() { diff --git a/crates/stages/api/src/metrics/sync_metrics.rs b/crates/stages/api/src/metrics/sync_metrics.rs index b89d7b8822e..754a2b22fcc 100644 --- a/crates/stages/api/src/metrics/sync_metrics.rs +++ b/crates/stages/api/src/metrics/sync_metrics.rs @@ -4,6 +4,7 @@ use std::collections::HashMap; #[derive(Debug, Default)] pub(crate) struct SyncMetrics { + /// Stage metrics by stage. pub(crate) stages: HashMap, } @@ -26,4 +27,6 @@ pub(crate) struct StageMetrics { pub(crate) entities_processed: Gauge, /// The number of total entities of the last commit for a stage, if applicable. pub(crate) entities_total: Gauge, + /// The number of seconds spent executing the stage and committing the data. + pub(crate) total_elapsed: Gauge, } diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index b8d41e9e552..61c6755be9f 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -14,7 +14,10 @@ use reth_provider::{ use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; use reth_tokio_util::{EventSender, EventStream}; -use std::pin::Pin; +use std::{ + pin::Pin, + time::{Duration, Instant}, +}; use tokio::sync::watch; use tracing::*; @@ -138,6 +141,7 @@ impl Pipeline { stage_id, checkpoint: provider.get_stage_checkpoint(stage_id)?.unwrap_or_default(), max_block_number: None, + elapsed: Duration::default(), }); } Ok(()) @@ -338,6 +342,7 @@ impl Pipeline { "Starting unwind" ); while checkpoint.block_number > to { + let unwind_started_at = Instant::now(); let input = UnwindInput { checkpoint, unwind_to: to, bad_block }; self.event_sender.notify(PipelineEvent::Unwind { stage_id, input }); @@ -353,6 +358,13 @@ impl Pipeline { done = checkpoint.block_number == to, "Stage unwound" ); + + provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; + + // Notify event listeners and update metrics. + self.event_sender + .notify(PipelineEvent::Unwound { stage_id, result: unwind_output }); + if let Some(metrics_tx) = &mut self.metrics_tx { let _ = metrics_tx.send(MetricEvent::StageCheckpoint { stage_id, @@ -360,12 +372,9 @@ impl Pipeline { // We assume it was set in the previous execute iteration, so it // doesn't change when we unwind. max_block_number: None, + elapsed: unwind_started_at.elapsed(), }); } - provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; - - self.event_sender - .notify(PipelineEvent::Unwound { stage_id, result: unwind_output }); // update finalized block if needed let last_saved_finalized_block_number = @@ -452,6 +461,7 @@ impl Pipeline { }; } + let stage_started_at = Instant::now(); let provider_rw = self.provider_factory.database_provider_rw()?; self.event_sender.notify(PipelineEvent::Run { @@ -466,18 +476,16 @@ impl Pipeline { match self.stage(stage_index).execute(&provider_rw, exec_input) { Ok(out @ ExecOutput { checkpoint, done }) => { - made_progress |= - checkpoint.block_number != prev_checkpoint.unwrap_or_default().block_number; - - if let Some(metrics_tx) = &mut self.metrics_tx { - let _ = metrics_tx.send(MetricEvent::StageCheckpoint { - stage_id, - checkpoint, - max_block_number: target, - }); - } + // Update stage checkpoint. provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; + // Commit processed data to the database. + UnifiedStorageWriter::commit(provider_rw)?; + + // Invoke stage post commit hook. + self.stage(stage_index).post_execute_commit()?; + + // Notify event listeners and update metrics. self.event_sender.notify(PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: stage_index + 1, @@ -486,13 +494,19 @@ impl Pipeline { stage_id, result: out.clone(), }); + if let Some(metrics_tx) = &mut self.metrics_tx { + let _ = metrics_tx.send(MetricEvent::StageCheckpoint { + stage_id, + checkpoint, + max_block_number: target, + elapsed: stage_started_at.elapsed(), + }); + } - UnifiedStorageWriter::commit(provider_rw)?; - - self.stage(stage_index).post_execute_commit()?; - + let block_number = checkpoint.block_number; + let prev_block_number = prev_checkpoint.unwrap_or_default().block_number; + made_progress |= block_number != prev_block_number; if done { - let block_number = checkpoint.block_number; return Ok(if made_progress { ControlFlow::Continue { block_number } } else { diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 68e1f99d7e7..532888ca27a 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -76,7 +76,6 @@ reth-execution-errors.workspace = true reth-consensus = { workspace = true, features = ["test-utils"] } reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true -reth-revm.workspace = true reth-static-file.workspace = true reth-stages-api = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true @@ -87,7 +86,7 @@ reth-tracing.workspace = true alloy-primitives = { workspace = true, features = ["getrandom", "rand"] } alloy-rlp.workspace = true -itertools.workspace = true + tokio = { workspace = true, features = ["rt", "sync", "macros"] } assert_matches.workspace = true rand.workspace = true diff --git a/crates/stages/stages/benches/README.md b/crates/stages/stages/benches/README.md index 7c482c59c60..c3d3268e318 100644 --- a/crates/stages/stages/benches/README.md +++ b/crates/stages/stages/benches/README.md @@ -13,10 +13,10 @@ It will generate a flamegraph report without running any criterion analysis. ``` cargo bench --package reth-stages --bench criterion --features test-utils -- --profile-time=2 ``` -Flamegraph reports can be find at `target/criterion/Stages/$STAGE_LABEL/profile/flamegraph.svg` +Flamegraph reports can be found at `target/criterion/Stages/$STAGE_LABEL/profile/flamegraph.svg` ## External DB support To choose an external DB, just pass an environment variable to the `cargo bench` command. -* Account Hashing Stage: `ACCOUNT_HASHING_DB=` \ No newline at end of file +* Account Hashing Stage: `ACCOUNT_HASHING_DB=` diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 776b00ef420..30dbd9281dd 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -451,12 +451,12 @@ where .add_stage(IndexStorageHistoryStage::new( self.stages_config.index_storage_history, self.stages_config.etl.clone(), - self.prune_modes.account_history, + self.prune_modes.storage_history, )) .add_stage(IndexAccountHistoryStage::new( self.stages_config.index_account_history, self.stages_config.etl.clone(), - self.prune_modes.storage_history, + self.prune_modes.account_history, )) } } diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index e5592cd8dec..50313f24d42 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -627,7 +627,8 @@ fn execution_checkpoint( }) } -fn calculate_gas_used_from_headers( +/// Calculates the total amount of gas used from the headers in the given range. +pub fn calculate_gas_used_from_headers( provider: &StaticFileProvider, range: RangeInclusive, ) -> Result { diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 726609b2350..b73136d0922 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -213,7 +213,7 @@ mod tests { if prune_modes.storage_history == Some(PruneMode::Full) { // Full is not supported - assert!(acc_indexing_stage.execute(&provider, input).is_err()); + assert!(storage_indexing_stage.execute(&provider, input).is_err()); } else { storage_indexing_stage.execute(&provider, input).unwrap(); diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 6671c4a4139..f62259dcfdd 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -42,7 +42,9 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory>, + + StaticFileProviderFactory< + Primitives: NodePrimitives, + >, { fn id(&self) -> StageId { StageId::Prune @@ -131,7 +133,9 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory>, + + StaticFileProviderFactory< + Primitives: NodePrimitives, + >, { fn id(&self) -> StageId { StageId::PruneSenderRecovery diff --git a/crates/stages/stages/src/stages/s3/downloader/fetch.rs b/crates/stages/stages/src/stages/s3/downloader/fetch.rs index 7f82552bda2..1715186bffc 100644 --- a/crates/stages/stages/src/stages/s3/downloader/fetch.rs +++ b/crates/stages/stages/src/stages/s3/downloader/fetch.rs @@ -176,11 +176,18 @@ mod tests { reth_tracing::init_test_tracing(); let b3sum = b256!("0xe9908f4992ae39c4d1fe9984dd743ae3f8e9a84a4a5af768128833605ff72723"); - let url = "https://link.testfile.org/15MB"; + let url = "https://link.testfile.org/5MB"; let file = tempfile::NamedTempFile::new().unwrap(); let filename = file.path().file_name().unwrap().to_str().unwrap(); let target_dir = file.path().parent().unwrap(); - fetch(filename, target_dir, url, 4, Some(b3sum)).await.unwrap(); + match fetch(filename, target_dir, url, 4, Some(b3sum)).await { + Ok(_) | Err(DownloaderError::EmptyContentLength) => { + // the testfil API can be flaky, so we ignore this error + } + Err(error) => { + panic!("Unexpected download error: {error:?}"); + } + } } } diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index e6bdb92cf20..2a2870f07ca 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -315,7 +315,7 @@ fn recover_sender( // value is greater than `secp256k1n / 2` if past EIP-2. There are transactions // pre-homestead which have large `s` values, so using [Signature::recover_signer] here // would not be backwards-compatible. - let sender = tx.recover_signer_unchecked_with_buf(rlp_buf).map_err(|_| { + let sender = tx.recover_unchecked_with_buf(rlp_buf).map_err(|_| { SenderRecoveryStageError::FailedRecovery(FailedSenderRecoveryError { tx: tx_id }) })?; diff --git a/crates/stateless/src/trie.rs b/crates/stateless/src/trie.rs index 5a35e52a7f3..f5c570b425d 100644 --- a/crates/stateless/src/trie.rs +++ b/crates/stateless/src/trie.rs @@ -9,8 +9,9 @@ use reth_errors::ProviderError; use reth_revm::state::Bytecode; use reth_trie_common::{HashedPostState, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE}; use reth_trie_sparse::{ - blinded::DefaultBlindedProviderFactory, errors::SparseStateTrieResult, SparseStateTrie, - SparseTrie, + errors::SparseStateTrieResult, + provider::{DefaultTrieNodeProvider, DefaultTrieNodeProviderFactory}, + SparseStateTrie, SparseTrie, SparseTrieInterface, }; /// Trait for stateless trie implementations that can be used for stateless validation. @@ -174,7 +175,8 @@ fn verify_execution_witness( witness: &ExecutionWitness, pre_state_root: B256, ) -> Result<(SparseStateTrie, B256Map), StatelessValidationError> { - let mut trie = SparseStateTrie::new(DefaultBlindedProviderFactory); + let provider_factory = DefaultTrieNodeProviderFactory; + let mut trie = SparseStateTrie::new(); let mut state_witness = B256Map::default(); let mut bytecode = B256Map::default(); @@ -200,7 +202,7 @@ fn verify_execution_witness( // Calculate the root let computed_root = trie - .root() + .root(&provider_factory) .map_err(|_e| StatelessValidationError::StatelessPreStateRootCalculationFailed)?; if computed_root == pre_state_root { @@ -235,6 +237,11 @@ fn calculate_state_root( // borrowing issues. let mut storage_results = Vec::with_capacity(state.storages.len()); + // In `verify_execution_witness` a `DefaultTrieNodeProviderFactory` is used, so we use the same + // again in here. + let provider_factory = DefaultTrieNodeProviderFactory; + let storage_provider = DefaultTrieNodeProvider; + for (address, storage) in state.storages.into_iter().sorted_unstable_by_key(|(addr, _)| *addr) { // Take the existing storage trie (or create an empty, “revealed” one) let mut storage_trie = @@ -250,9 +257,13 @@ fn calculate_state_root( { let nibbles = Nibbles::unpack(hashed_slot); if value.is_zero() { - storage_trie.remove_leaf(&nibbles)?; + storage_trie.remove_leaf(&nibbles, &storage_provider)?; } else { - storage_trie.update_leaf(nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; + storage_trie.update_leaf( + nibbles, + alloy_rlp::encode_fixed_size(&value).to_vec(), + &storage_provider, + )?; } } @@ -288,14 +299,14 @@ fn calculate_state_root( // Decide whether to remove or update the account leaf if account.is_empty() && storage_root == EMPTY_ROOT_HASH { - trie.remove_account_leaf(&nibbles)?; + trie.remove_account_leaf(&nibbles, &provider_factory)?; } else { account_rlp_buf.clear(); account.into_trie_account(storage_root).encode(&mut account_rlp_buf); - trie.update_account_leaf(nibbles, account_rlp_buf.clone())?; + trie.update_account_leaf(nibbles, account_rlp_buf.clone(), &provider_factory)?; } } // Return new state root - trie.root() + trie.root(&provider_factory) } diff --git a/crates/stateless/src/validation.rs b/crates/stateless/src/validation.rs index a2a93f38e26..165deac1bb3 100644 --- a/crates/stateless/src/validation.rs +++ b/crates/stateless/src/validation.rs @@ -71,7 +71,7 @@ pub enum StatelessValidationError { HeaderDeserializationFailed, /// Error when the computed state root does not match the one in the block header. - #[error("mismatched post- state root: {got}\n {expected}")] + #[error("mismatched post-state root: {got}\n {expected}")] PostStateRootMismatch { /// The computed post-state root got: B256, diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 5d638493643..caa0bd39e9e 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -110,12 +110,11 @@ impl StaticFileTargets { (self.block_meta.as_ref(), static_files.block_meta), ] .iter() - .all(|(target_block_range, highest_static_fileted_block)| { + .all(|(target_block_range, highest_static_file_block)| { target_block_range.is_none_or(|target_block_range| { *target_block_range.start() == - highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { - highest_static_fileted_block + 1 - }) + highest_static_file_block + .map_or(0, |highest_static_file_block| highest_static_file_block + 1) }) }) } diff --git a/crates/storage/codecs/src/alloy/transaction/ethereum.rs b/crates/storage/codecs/src/alloy/transaction/ethereum.rs index 799fcf7861e..7824f60301a 100644 --- a/crates/storage/codecs/src/alloy/transaction/ethereum.rs +++ b/crates/storage/codecs/src/alloy/transaction/ethereum.rs @@ -112,7 +112,7 @@ impl Envelope } } -/// Wraps the [`Compact`] trait. +/// Compact serialization for transaction envelopes with compression and bitfield packing. pub trait CompactEnvelope: Sized { /// Takes a buffer which can be written to. *Ideally*, it returns the length written to. fn to_compact(&self, buf: &mut B) -> usize diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs index ece47f81ee5..d37ffa289b9 100644 --- a/crates/storage/db-api/src/mock.rs +++ b/crates/storage/db-api/src/mock.rs @@ -7,6 +7,7 @@ use crate::{ ReverseWalker, Walker, }, database::Database, + database_metrics::DatabaseMetrics, table::{DupSort, Encode, Table, TableImporter}, transaction::{DbTx, DbTxMut}, DatabaseError, @@ -34,6 +35,8 @@ impl Database for DatabaseMock { } } +impl DatabaseMetrics for DatabaseMock {} + /// Mock read only tx #[derive(Debug, Clone, Default)] pub struct TxMock { diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 7edf2987ee4..f593662ee4a 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -215,7 +215,7 @@ impl_compression_for_compact!( Header, Account, Log, - Receipt, + Receipt, TxType, StorageEntry, BranchNodeCompact, diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 7d05bc9815f..7ddcaaa01b8 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -43,7 +43,6 @@ tracing.workspace = true [dev-dependencies] reth-db = { workspace = true, features = ["mdbx"] } reth-provider = { workspace = true, features = ["test-utils"] } -alloy-consensus.workspace = true [lints] workspace = true diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index d536e69a270..3234666e7c7 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -146,12 +146,20 @@ impl DatabaseArguments { self } + /// Set the maximum duration of a read transaction. + pub const fn max_read_transaction_duration( + &mut self, + max_read_transaction_duration: Option, + ) { + self.max_read_transaction_duration = max_read_transaction_duration; + } + /// Set the maximum duration of a read transaction. pub const fn with_max_read_transaction_duration( mut self, max_read_transaction_duration: Option, ) -> Self { - self.max_read_transaction_duration = max_read_transaction_duration; + self.max_read_transaction_duration(max_read_transaction_duration); self } diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 9be857796f1..f4a639cb341 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -515,7 +515,7 @@ impl Default for Geometry { /// Read transactions prevent reuse of pages freed by newer write transactions, thus the database /// can grow quickly. This callback will be called when there is not enough space in the database /// (i.e. before increasing the database size or before `MDBX_MAP_FULL` error) and thus can be -/// used to resolve issues with a "long-lived" read transacttions. +/// used to resolve issues with a "long-lived" read transactions. /// /// Depending on the arguments and needs, your implementation may wait, /// terminate a process or thread that is performing a long read, or perform @@ -782,7 +782,7 @@ impl EnvironmentBuilder { /// Sets the maximum number of threads or reader slots for the environment. /// /// This defines the number of slots in the lock table that is used to track readers in the - /// the environment. The default is 126. Starting a read-only transaction normally ties a lock + /// environment. The default is 126. Starting a read-only transaction normally ties a lock /// table slot to the [Transaction] object until it or the [Environment] object is destroyed. pub const fn set_max_readers(&mut self, max_readers: u64) -> &mut Self { self.max_readers = Some(max_readers); diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index b20d9d65070..52bdd8b2e64 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -48,7 +48,7 @@ pub struct NippyJarWriter { impl NippyJarWriter { /// Creates a [`NippyJarWriter`] from [`NippyJar`]. /// - /// If will **always** attempt to heal any inconsistent state when called. + /// If will **always** attempt to heal any inconsistent state when called. pub fn new(jar: NippyJar) -> Result { let (data_file, offsets_file, is_created) = Self::create_or_open_files(jar.data_path(), &jar.offsets_path())?; diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index c45fde7729c..82a3726c43e 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -74,14 +74,12 @@ reth-ethereum-primitives.workspace = true revm-database-interface.workspace = true revm-state.workspace = true -parking_lot.workspace = true + tempfile.workspace = true assert_matches.workspace = true rand.workspace = true -eyre.workspace = true tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } -alloy-consensus.workspace = true [features] test-utils = [ diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index a44e038d49b..7ffdc153b22 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -173,7 +173,7 @@ mod tests { (B256::from_slice(&[8; 32]), U256::from(70)), // Revert takes priority. (B256::from_slice(&[9; 32]), U256::from(80)), // Only revert present. (B256::from_slice(&[10; 32]), U256::from(85)), // Wiped entry. - (B256::from_slice(&[15; 32]), U256::from(90)), // WGreater revert entry + (B256::from_slice(&[15; 32]), U256::from(90)), // Greater revert entry ] ); } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 5bc5e707153..f372d0c0c09 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -305,7 +305,7 @@ impl BlockReader for BlockchainProvider { fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -1205,7 +1205,10 @@ mod tests { Some(RecoveredBlock::new_sealed(block.clone(), block.senders().unwrap())) ); - assert_eq!(provider.pending_block_and_receipts()?, Some((block, vec![]))); + assert_eq!( + provider.pending_block_and_receipts()?, + Some((RecoveredBlock::new_sealed(block.clone(), block.senders().unwrap()), vec![])) + ); Ok(()) } @@ -2049,7 +2052,7 @@ mod tests { // Test range that spans database and in-memory { - // This block will be persisted to disk and removed from memory AFTER the firsk database query. This ensures that we query the in-memory state before the database avoiding any race condition. + // This block will be persisted to disk and removed from memory AFTER the first database query. This ensures that we query the in-memory state before the database avoiding any race condition. persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[0].number); assert_eq!( @@ -2141,7 +2144,7 @@ mod tests { // Test range that spans database and in-memory { - // This block will be persisted to disk and removed from memory AFTER the firsk database query. This ensures that we query the in-memory state before the database avoiding any race condition. + // This block will be persisted to disk and removed from memory AFTER the first database query. This ensures that we query the in-memory state before the database avoiding any race condition. persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[0].number); assert_eq!( @@ -2258,7 +2261,7 @@ mod tests { // Ensure that the first generated in-memory block exists { - // This block will be persisted to disk and removed from memory AFTER the firsk database query. This ensures that we query the in-memory state before the database avoiding any race condition. + // This block will be persisted to disk and removed from memory AFTER the first database query. This ensures that we query the in-memory state before the database avoiding any race condition. persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[0].number); call_method!($arg_count, provider, $method, $item_extractor, tx_num, tx_hash, &in_memory_blocks[0], &receipts); diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 3922e286c29..f617c3f6fa4 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -20,9 +20,7 @@ use reth_chainspec::ChainInfo; use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices}; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; use reth_node_types::{BlockTy, HeaderTy, ReceiptTy, TxTy}; -use reth_primitives_traits::{ - Account, BlockBody, RecoveredBlock, SealedBlock, SealedHeader, StorageEntry, -}; +use reth_primitives_traits::{Account, BlockBody, RecoveredBlock, SealedHeader, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ @@ -853,7 +851,7 @@ impl BlockReader for ConsistentProvider { fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } diff --git a/crates/storage/provider/src/providers/database/builder.rs b/crates/storage/provider/src/providers/database/builder.rs index 2f25c806945..4bc8569432e 100644 --- a/crates/storage/provider/src/providers/database/builder.rs +++ b/crates/storage/provider/src/providers/database/builder.rs @@ -4,7 +4,10 @@ //! up to the intended build target. use crate::{providers::StaticFileProvider, ProviderFactory}; -use reth_db::{mdbx::DatabaseArguments, open_db_read_only, DatabaseEnv}; +use reth_db::{ + mdbx::{DatabaseArguments, MaxReadTransactionDuration}, + open_db_read_only, DatabaseEnv, +}; use reth_db_api::{database_metrics::DatabaseMetrics, Database}; use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter}; use std::{ @@ -62,7 +65,7 @@ impl ProviderFactoryBuilder { /// ```no_run /// use reth_chainspec::MAINNET; /// use reth_node_types::NodeTypes; - /// /// + /// /// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig}; /// /// fn demo>() { @@ -71,6 +74,29 @@ impl ProviderFactoryBuilder { /// .unwrap(); /// } /// ``` + /// + /// # Open an instance with disabled read-transaction timeout + /// + /// By default, read transactions are automatically terminated after a timeout to prevent + /// database free list growth. However, if the database is static (no writes occurring), this + /// safety mechanism can be disabled using + /// [`ReadOnlyConfig::disable_long_read_transaction_safety`]. + /// + /// ```no_run + /// use reth_chainspec::MAINNET; + /// use reth_node_types::NodeTypes; + /// + /// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig}; + /// + /// fn demo>() { + /// let provider_factory = ProviderFactoryBuilder::::default() + /// .open_read_only( + /// MAINNET.clone(), + /// ReadOnlyConfig::from_datadir("datadir").disable_long_read_transaction_safety(), + /// ) + /// .unwrap(); + /// } + /// ``` pub fn open_read_only( self, chainspec: Arc, @@ -129,6 +155,15 @@ impl ReadOnlyConfig { Self::from_dirs(datadir.join("db"), datadir.join("static_files")) } + /// Disables long-lived read transaction safety guarantees. + /// + /// Caution: Keeping database transaction open indefinitely can cause the free list to grow if + /// changes to the database are made. + pub const fn disable_long_read_transaction_safety(mut self) -> Self { + self.db_args.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)); + self + } + /// Derives the [`ReadOnlyConfig`] from the database dir. /// /// By default this assumes the following datadir layout: diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 6fdff7bfa88..a172fda90da 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -18,7 +18,7 @@ use reth_errors::{RethError, RethResult}; use reth_node_types::{ BlockTy, HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithDBAdapter, ReceiptTy, TxTy, }; -use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; +use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -347,7 +347,7 @@ impl BlockNumReader for ProviderFactory { fn earliest_block_number(&self) -> ProviderResult { // earliest history height tracks the lowest block number that has __not__ been expired, in - // other words, the first/earlierst available block. + // other words, the first/earliest available block. Ok(self.static_file_provider.earliest_history_height()) } @@ -377,7 +377,7 @@ impl BlockReader for ProviderFactory { fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { self.provider()?.pending_block_and_receipts() } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8178a8c3133..646b81cf859 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -47,7 +47,7 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives_traits::{ Account, Block as _, BlockBody as _, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, - SealedBlock, SealedHeader, SignedTransaction, StorageEntry, + SealedHeader, SignedTransaction, StorageEntry, }; use reth_prune_types::{ PruneCheckpoint, PruneMode, PruneModes, PruneSegment, MINIMUM_PRUNING_DISTANCE, @@ -1196,7 +1196,7 @@ impl BlockReader for DatabaseProvid fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(None) } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index dee449c82d9..d66d2cdf2ce 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -36,7 +36,7 @@ use reth_db_api::{ use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_node_types::{FullNodePrimitives, NodePrimitives}; -use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader, SignedTransaction}; +use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction}; use reth_stages_types::{PipelineTarget, StageId}; use reth_static_file_types::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, StaticFileSegment, @@ -1763,7 +1763,7 @@ impl> fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 3fd2828faad..356d46c85bb 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -702,7 +702,7 @@ impl StaticFileProviderRW { Ok(Some(tx_number)) } - /// Adds an instruction to prune `to_delete`transactions during commit. + /// Adds an instruction to prune `to_delete` transactions during commit. /// /// Note: `last_block` refers to the block the unwinds ends at. pub fn prune_transactions( @@ -732,7 +732,7 @@ impl StaticFileProviderRW { self.queue_prune(to_delete, None) } - /// Adds an instruction to prune `to_delete` bloc_ meta rows during commit. + /// Adds an instruction to prune `to_delete` block meta rows during commit. pub fn prune_block_meta(&mut self, to_delete: u64) -> ProviderResult<()> { debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::BlockMeta); self.queue_prune(to_delete, None) diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 2d0cfb665df..68f8c38e59d 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -23,8 +23,7 @@ use reth_ethereum_primitives::{EthPrimitives, Receipt}; use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypes; use reth_primitives_traits::{ - Account, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, - SignerRecoverable, + Account, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, SignerRecoverable, }; use reth_prune_types::PruneModes; use reth_stages_types::{StageCheckpoint, StageId}; @@ -65,6 +64,8 @@ pub struct MockEthProvider< pub chain_spec: Arc, /// Local state roots pub state_roots: Arc>>, + /// Local block body indices store + pub block_body_indices: Arc>>, tx: TxMock, prune_modes: Arc, } @@ -81,6 +82,7 @@ where accounts: self.accounts.clone(), chain_spec: self.chain_spec.clone(), state_roots: self.state_roots.clone(), + block_body_indices: self.block_body_indices.clone(), tx: self.tx.clone(), prune_modes: self.prune_modes.clone(), } @@ -97,6 +99,7 @@ impl MockEthProvider { accounts: Default::default(), chain_spec: Arc::new(reth_chainspec::ChainSpecBuilder::mainnet().build()), state_roots: Default::default(), + block_body_indices: Default::default(), tx: Default::default(), prune_modes: Default::default(), } @@ -157,6 +160,15 @@ impl MockEthProvider MockEthProvider BlockReader fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(None) } @@ -955,8 +968,8 @@ impl StatePr impl BlockBodyIndicesProvider for MockEthProvider { - fn block_body_indices(&self, _num: u64) -> ProviderResult> { - Ok(None) + fn block_body_indices(&self, num: u64) -> ProviderResult> { + Ok(self.block_body_indices.lock().get(&num).copied()) } fn block_body_indices_range( &self, diff --git a/crates/alloy-provider/Cargo.toml b/crates/storage/rpc-provider/Cargo.toml similarity index 82% rename from crates/alloy-provider/Cargo.toml rename to crates/storage/rpc-provider/Cargo.toml index 22a8e724890..a47bf7ea218 100644 --- a/crates/alloy-provider/Cargo.toml +++ b/crates/storage/rpc-provider/Cargo.toml @@ -1,12 +1,12 @@ [package] -name = "reth-alloy-provider" +name = "reth-storage-rpc-provider" version.workspace = true edition.workspace = true rust-version.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true -description = "Alloy provider implementation for reth that fetches state via RPC" +description = "RPC-based blockchain provider for reth that fetches data via RPC calls" [lints] workspace = true @@ -27,7 +27,7 @@ reth-db-api.workspace = true reth-rpc-convert.workspace = true # alloy -alloy-provider.workspace = true +alloy-provider = { workspace = true, features = ["debug-api"] } alloy-network.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true @@ -40,10 +40,10 @@ tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } # other tracing.workspace = true +parking_lot.workspace = true # revm revm.workspace = true -revm-primitives.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["rt", "macros"] } diff --git a/crates/storage/rpc-provider/README.md b/crates/storage/rpc-provider/README.md new file mode 100644 index 00000000000..7180d41840d --- /dev/null +++ b/crates/storage/rpc-provider/README.md @@ -0,0 +1,71 @@ +# RPC Blockchain Provider for Reth + +This crate provides an RPC-based implementation of reth's [`BlockchainProvider`](../provider/src/providers/blockchain_provider.rs) which provides access to local blockchain data, this crate offers the same functionality but for remote blockchain access via RPC. + +Originally created by [cakevm](https://github.com/cakevm/alloy-reth-provider). + +## Features + +- Provides the same interface as `BlockchainProvider` but for remote nodes +- Implements `StateProviderFactory` for remote RPC state access +- Supports Ethereum networks +- Useful for testing without requiring a full database +- Can be used with reth ExEx (Execution Extensions) for testing + +## Usage + +```rust +use alloy_provider::ProviderBuilder; +use reth_storage_rpc_provider::RpcBlockchainProvider; + +// Initialize provider +let provider = ProviderBuilder::new() + .builtin("https://eth.merkle.io") + .await + .unwrap(); + +// Create RPC blockchain provider with NodeTypes +let rpc_provider = RpcBlockchainProvider::new(provider); + +// Get state at specific block - same interface as BlockchainProvider +let state = rpc_provider.state_by_block_id(BlockId::number(16148323)).unwrap(); +``` + +## Configuration + +The provider can be configured with custom settings: + +```rust +use reth_storage_rpc_provider::{RpcBlockchainProvider, RpcBlockchainProviderConfig}; + +let config = RpcBlockchainProviderConfig { + compute_state_root: true, // Enable state root computation + reth_rpc_support: true, // Use Reth-specific RPC methods (default: true) +}; + +let rpc_provider = RpcBlockchainProvider::new_with_config(provider, config); +``` + +## Configuration Options + +- `compute_state_root`: When enabled, computes state root and trie updates (requires Reth-specific RPC methods) +- `reth_rpc_support`: When enabled (default), uses Reth-specific RPC methods for better performance: + - `eth_getAccountInfo`: Fetches account balance, nonce, and code in a single call + - `debug_codeByHash`: Retrieves bytecode by hash without needing the address + + When disabled, falls back to standard RPC methods and caches bytecode locally for compatibility with non-Reth nodes. + +## Technical Details + +The `RpcBlockchainProvider` uses `alloy_network::AnyNetwork` for network operations, providing compatibility with various Ethereum-based networks while maintaining the expected block structure with headers. + +This provider implements the same traits as the local `BlockchainProvider`, making it a drop-in replacement for scenarios where remote RPC access is preferred over local database access. + +## License + +Licensed under either of: + +- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. diff --git a/crates/alloy-provider/src/lib.rs b/crates/storage/rpc-provider/src/lib.rs similarity index 69% rename from crates/alloy-provider/src/lib.rs rename to crates/storage/rpc-provider/src/lib.rs index ba4767006a4..1e3c288e8a4 100644 --- a/crates/alloy-provider/src/lib.rs +++ b/crates/storage/rpc-provider/src/lib.rs @@ -1,7 +1,11 @@ -//! # Alloy Provider for Reth +//! # RPC Blockchain Provider for Reth //! -//! This crate provides an implementation of reth's `StateProviderFactory` and related traits -//! that fetches state data via RPC instead of from a local database. +//! This crate provides an RPC-based implementation of reth's `StateProviderFactory` and related +//! traits that fetches blockchain data via RPC instead of from a local database. +//! +//! Similar to the [`BlockchainProvider`](../../provider/src/providers/blockchain_provider.rs) +//! which provides access to local blockchain data, this crate offers the same functionality but for +//! remote blockchain access via RPC. //! //! Originally created by [cakevm](https://github.com/cakevm/alloy-reth-provider). //! @@ -20,23 +24,26 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_consensus::BlockHeader; -use alloy_eips::BlockHashOrNumber; +use alloy_consensus::{constants::KECCAK_EMPTY, BlockHeader}; +use alloy_eips::{BlockHashOrNumber, BlockNumberOrTag}; use alloy_network::{primitives::HeaderResponse, BlockResponse}; -use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, TxHash, TxNumber, B256, U256}; -use alloy_provider::{network::Network, Provider}; -use alloy_rpc_types::BlockId; +use alloy_primitives::{ + map::HashMap, Address, BlockHash, BlockNumber, StorageKey, TxHash, TxNumber, B256, U256, +}; +use alloy_provider::{ext::DebugApi, network::Network, Provider}; +use alloy_rpc_types::{AccountInfo, BlockId}; use alloy_rpc_types_engine::ForkchoiceState; +use parking_lot::RwLock; use reth_chainspec::{ChainInfo, ChainSpecProvider}; use reth_db_api::{ mock::{DatabaseMock, TxMock}, models::StoredBlockBodyIndices, }; use reth_errors::{ProviderError, ProviderResult}; -use reth_node_types::{BlockTy, HeaderTy, NodeTypes, PrimitivesTy, ReceiptTy, TxTy}; -use reth_primitives::{ - Account, Bytecode, RecoveredBlock, SealedBlock, SealedHeader, TransactionMeta, +use reth_node_types::{ + Block, BlockBody, BlockTy, HeaderTy, NodeTypes, PrimitivesTy, ReceiptTy, TxTy, }; +use reth_primitives::{Account, Bytecode, RecoveredBlock, SealedHeader, TransactionMeta}; use reth_provider::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BytecodeReader, CanonChainTracker, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, @@ -46,7 +53,7 @@ use reth_provider::{ TransactionVariant, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; -use reth_rpc_convert::TryFromBlockResponse; +use reth_rpc_convert::{TryFromBlockResponse, TryFromReceiptResponse, TryFromTransactionResponse}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockReaderIdExt, BlockSource, DBProvider, NodePrimitivesProvider, @@ -55,39 +62,63 @@ use reth_storage_api::{ use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState, MultiProof, TrieInput}; use std::{ collections::BTreeMap, - future::Future, + future::{Future, IntoFuture}, ops::{RangeBounds, RangeInclusive}, sync::Arc, }; use tokio::{runtime::Handle, sync::broadcast}; -use tracing::trace; +use tracing::{trace, warn}; -/// Configuration for `AlloyRethProvider` -#[derive(Debug, Clone, Default)] -pub struct AlloyRethProviderConfig { +/// Configuration for `RpcBlockchainProvider` +#[derive(Debug, Clone)] +pub struct RpcBlockchainProviderConfig { /// Whether to compute state root when creating execution outcomes pub compute_state_root: bool, + /// Whether to use Reth-specific RPC methods for better performance + /// + /// If enabled, the node will use Reth's RPC methods (`debug_codeByHash` and + /// `eth_getAccountInfo`) to speed up account information retrieval. When disabled, it will + /// use multiple standard RPC calls to get account information. + pub reth_rpc_support: bool, +} + +impl Default for RpcBlockchainProviderConfig { + fn default() -> Self { + Self { compute_state_root: false, reth_rpc_support: true } + } } -impl AlloyRethProviderConfig { +impl RpcBlockchainProviderConfig { /// Sets whether to compute state root when creating execution outcomes pub const fn with_compute_state_root(mut self, compute: bool) -> Self { self.compute_state_root = compute; self } + + /// Sets whether to use Reth-specific RPC methods for better performance + pub const fn with_reth_rpc_support(mut self, support: bool) -> Self { + self.reth_rpc_support = support; + self + } } -/// A provider implementation that uses Alloy RPC to fetch state data +/// An RPC-based blockchain provider that fetches blockchain data via remote RPC calls. +/// +/// This is the RPC equivalent of +/// [`BlockchainProvider`](../../provider/src/providers/blockchain_provider.rs), implementing +/// the same `StateProviderFactory` and related traits but fetching data from a remote node instead +/// of local storage. /// -/// This provider implements reth's `StateProviderFactory` and related traits, -/// allowing it to be used as a drop-in replacement for database-backed providers -/// in scenarios where RPC access is preferred (e.g., testing). +/// This provider is useful for: +/// - Testing without requiring a full local database +/// - Accessing blockchain state from remote nodes +/// - Building light clients or tools that don't need full node storage /// /// The provider type is generic over the network type N (defaulting to `AnyNetwork`), /// but the current implementation is specialized for `alloy_network::AnyNetwork` /// as it needs to access block header fields directly. #[derive(Clone)] -pub struct AlloyRethProvider +pub struct RpcBlockchainProvider where Node: NodeTypes, { @@ -100,28 +131,28 @@ where /// Broadcast channel for canon state notifications canon_state_notification: broadcast::Sender>>, /// Configuration for the provider - config: AlloyRethProviderConfig, + config: RpcBlockchainProviderConfig, /// Cached chain spec chain_spec: Arc, } -impl std::fmt::Debug for AlloyRethProvider { +impl std::fmt::Debug for RpcBlockchainProvider { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("AlloyRethProvider").field("config", &self.config).finish() + f.debug_struct("RpcBlockchainProvider").field("config", &self.config).finish() } } -impl AlloyRethProvider { - /// Creates a new `AlloyRethProvider` with default configuration +impl RpcBlockchainProvider { + /// Creates a new `RpcBlockchainProvider` with default configuration pub fn new(provider: P) -> Self where Node::ChainSpec: Default, { - Self::new_with_config(provider, AlloyRethProviderConfig::default()) + Self::new_with_config(provider, RpcBlockchainProviderConfig::default()) } - /// Creates a new `AlloyRethProvider` with custom configuration - pub fn new_with_config(provider: P, config: AlloyRethProviderConfig) -> Self + /// Creates a new `RpcBlockchainProvider` with custom configuration + pub fn new_with_config(provider: P, config: RpcBlockchainProviderConfig) -> Self where Node::ChainSpec: Default, { @@ -136,6 +167,18 @@ impl AlloyRethProvider { } } + /// Use a custom chain spec for the provider + pub fn with_chain_spec(self, chain_spec: Arc) -> Self { + Self { + provider: self.provider, + node_types: std::marker::PhantomData, + network: std::marker::PhantomData, + canon_state_notification: self.canon_state_notification, + config: self.config, + chain_spec, + } + } + /// Helper function to execute async operations in a blocking context fn block_on_async(&self, fut: F) -> T where @@ -152,19 +195,21 @@ impl AlloyRethProvider { } } -impl AlloyRethProvider +impl RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, { /// Helper function to create a state provider for a given block ID - fn create_state_provider(&self, block_id: BlockId) -> AlloyRethStateProvider { - AlloyRethStateProvider::with_chain_spec( + fn create_state_provider(&self, block_id: BlockId) -> RpcBlockchainStateProvider { + RpcBlockchainStateProvider::with_chain_spec( self.provider.clone(), block_id, self.chain_spec.clone(), ) + .with_compute_state_root(self.config.compute_state_root) + .with_reth_rpc_support(self.config.reth_rpc_support) } /// Helper function to get state provider by block number @@ -181,7 +226,7 @@ where // This allows the types to be instantiated with any network while the actual functionality // requires AnyNetwork. Future improvements could add trait bounds for networks with // compatible block structures. -impl BlockHashReader for AlloyRethProvider +impl BlockHashReader for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -204,15 +249,23 @@ where } } -impl BlockNumReader for AlloyRethProvider +impl BlockNumReader for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, { fn chain_info(&self) -> Result { - // For RPC provider, we can't get full chain info - Err(ProviderError::UnsupportedProvider) + self.block_on_async(async { + let block = self + .provider + .get_block(BlockId::Number(BlockNumberOrTag::Latest)) + .await + .map_err(ProviderError::other)? + .ok_or(ProviderError::HeaderNotFound(0.into()))?; + + Ok(ChainInfo { best_hash: block.header().hash(), best_number: block.header().number() }) + }) } fn best_block_number(&self) -> Result { @@ -233,7 +286,7 @@ where } } -impl BlockIdReader for AlloyRethProvider +impl BlockIdReader for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -276,28 +329,51 @@ where } } -impl HeaderProvider for AlloyRethProvider +impl HeaderProvider for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, + BlockTy: TryFromBlockResponse, { type Header = HeaderTy; - fn header(&self, _block_hash: &BlockHash) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + let block_response = self.block_on_async(async { + self.provider.get_block_by_hash(*block_hash).await.map_err(ProviderError::other) + })?; + + let Some(block_response) = block_response else { + // If the block was not found, return None + return Ok(None); + }; + + // Convert the network block response to primitive block + let block = as TryFromBlockResponse>::from_block_response(block_response) + .map_err(ProviderError::other)?; + + Ok(Some(block.into_header())) } - fn header_by_number(&self, _num: u64) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn header_by_number(&self, num: u64) -> ProviderResult> { + let Some(sealed_header) = self.sealed_header(num)? else { + // If the block was not found, return None + return Ok(None); + }; + + Ok(Some(sealed_header.into_header())) } - fn header_td(&self, _hash: &BlockHash) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn header_td(&self, hash: &BlockHash) -> ProviderResult> { + let header = self.header(hash).map_err(ProviderError::other)?; + + Ok(header.map(|b| b.difficulty())) } - fn header_td_by_number(&self, _number: BlockNumber) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { + let header = self.header_by_number(number).map_err(ProviderError::other)?; + + Ok(header.map(|b| b.difficulty())) } fn headers_range( @@ -309,9 +385,23 @@ where fn sealed_header( &self, - _number: BlockNumber, + number: BlockNumber, ) -> ProviderResult>> { - Err(ProviderError::UnsupportedProvider) + let block_response = self.block_on_async(async { + self.provider.get_block_by_number(number.into()).await.map_err(ProviderError::other) + })?; + + let Some(block_response) = block_response else { + // If the block was not found, return None + return Ok(None); + }; + let block_hash = block_response.header().hash(); + + // Convert the network block response to primitive block + let block = as TryFromBlockResponse>::from_block_response(block_response) + .map_err(ProviderError::other)?; + + Ok(Some(SealedHeader::new(block.into_header(), block_hash))) } fn sealed_headers_while( @@ -323,7 +413,7 @@ where } } -impl BlockBodyIndicesProvider for AlloyRethProvider +impl BlockBodyIndicesProvider for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -341,12 +431,14 @@ where } } -impl BlockReader for AlloyRethProvider +impl BlockReader for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, BlockTy: TryFromBlockResponse, + TxTy: TryFromTransactionResponse, + ReceiptTy: TryFromReceiptResponse, { type Block = BlockTy; @@ -381,7 +473,7 @@ where fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Err(ProviderError::UnsupportedProvider) } @@ -420,37 +512,46 @@ where } } -impl BlockReaderIdExt for AlloyRethProvider +impl BlockReaderIdExt for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, BlockTy: TryFromBlockResponse, + TxTy: TryFromTransactionResponse, + ReceiptTy: TryFromReceiptResponse, { fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { - BlockId::Number(number_or_tag) => self.block_by_number_or_tag(number_or_tag), BlockId::Hash(hash) => self.block_by_hash(hash.block_hash), + BlockId::Number(number_or_tag) => self.block_by_number_or_tag(number_or_tag), } } fn sealed_header_by_id( &self, - _id: BlockId, + id: BlockId, ) -> ProviderResult>> { - Err(ProviderError::UnsupportedProvider) + match id { + BlockId::Hash(hash) => self.sealed_header_by_hash(hash.block_hash), + BlockId::Number(number_or_tag) => self.sealed_header_by_number_or_tag(number_or_tag), + } } - fn header_by_id(&self, _id: BlockId) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn header_by_id(&self, id: BlockId) -> ProviderResult> { + match id { + BlockId::Hash(hash) => self.header_by_hash_or_number(hash.block_hash.into()), + BlockId::Number(number_or_tag) => self.header_by_number_or_tag(number_or_tag), + } } } -impl ReceiptProvider for AlloyRethProvider +impl ReceiptProvider for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, + ReceiptTy: TryFromReceiptResponse, { type Receipt = ReceiptTy; @@ -458,15 +559,53 @@ where Err(ProviderError::UnsupportedProvider) } - fn receipt_by_hash(&self, _hash: TxHash) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + let receipt_response = self.block_on_async(async { + self.provider.get_transaction_receipt(hash).await.map_err(ProviderError::other) + })?; + + let Some(receipt_response) = receipt_response else { + // If the receipt was not found, return None + return Ok(None); + }; + + // Convert the network receipt response to primitive receipt + let receipt = + as TryFromReceiptResponse>::from_receipt_response(receipt_response) + .map_err(ProviderError::other)?; + + Ok(Some(receipt)) } fn receipts_by_block( &self, - _block: BlockHashOrNumber, + block: BlockHashOrNumber, ) -> ProviderResult>> { - Err(ProviderError::UnsupportedProvider) + self.block_on_async(async { + let receipts_response = self + .provider + .get_block_receipts(block.into()) + .await + .map_err(ProviderError::other)?; + + let Some(receipts) = receipts_response else { + // If the receipts were not found, return None + return Ok(None); + }; + + // Convert the network receipts response to primitive receipts + let receipts = receipts + .into_iter() + .map(|receipt_response| { + as TryFromReceiptResponse>::from_receipt_response( + receipt_response, + ) + .map_err(ProviderError::other) + }) + .collect::, _>>()?; + + Ok(Some(receipts)) + }) } fn receipts_by_tx_range( @@ -484,19 +623,22 @@ where } } -impl ReceiptProviderIdExt for AlloyRethProvider +impl ReceiptProviderIdExt for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, + ReceiptTy: TryFromReceiptResponse, { } -impl TransactionsProvider for AlloyRethProvider +impl TransactionsProvider for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, + BlockTy: TryFromBlockResponse, + TxTy: TryFromTransactionResponse, { type Transaction = TxTy; @@ -515,8 +657,23 @@ where Err(ProviderError::UnsupportedProvider) } - fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + let transaction_response = self.block_on_async(async { + self.provider.get_transaction_by_hash(hash).await.map_err(ProviderError::other) + })?; + + let Some(transaction_response) = transaction_response else { + // If the transaction was not found, return None + return Ok(None); + }; + + // Convert the network transaction response to primitive transaction + let transaction = as TryFromTransactionResponse>::from_transaction_response( + transaction_response, + ) + .map_err(ProviderError::other)?; + + Ok(Some(transaction)) } fn transaction_by_hash_with_meta( @@ -532,9 +689,22 @@ where fn transactions_by_block( &self, - _block: BlockHashOrNumber, + block: BlockHashOrNumber, ) -> ProviderResult>> { - Err(ProviderError::UnsupportedProvider) + let block_response = self.block_on_async(async { + self.provider.get_block(block.into()).full().await.map_err(ProviderError::other) + })?; + + let Some(block_response) = block_response else { + // If the block was not found, return None + return Ok(None); + }; + + // Convert the network block response to primitive block + let block = as TryFromBlockResponse>::from_block_response(block_response) + .map_err(ProviderError::other)?; + + Ok(Some(block.into_body().into_transactions())) } fn transactions_by_block_range( @@ -563,20 +733,14 @@ where } } -impl StateProviderFactory for AlloyRethProvider +impl StateProviderFactory for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, { fn latest(&self) -> Result { - trace!(target: "alloy-provider", "Getting latest state provider"); - - let block_number = self.block_on_async(async { - self.provider.get_block_number().await.map_err(ProviderError::other) - })?; - - self.state_by_block_number(block_number) + Ok(Box::new(self.create_state_provider(self.best_block_number()?.into()))) } fn state_by_block_id(&self, block_id: BlockId) -> Result { @@ -641,15 +805,15 @@ where } } -impl DatabaseProviderFactory for AlloyRethProvider +impl DatabaseProviderFactory for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, { type DB = DatabaseMock; - type ProviderRW = AlloyRethStateProvider; - type Provider = AlloyRethStateProvider; + type ProviderRW = RpcBlockchainStateProvider; + type Provider = RpcBlockchainStateProvider; fn database_provider_ro(&self) -> Result { // RPC provider returns a new state provider @@ -670,7 +834,7 @@ where } } -impl CanonChainTracker for AlloyRethProvider +impl CanonChainTracker for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -698,7 +862,7 @@ where } } -impl NodePrimitivesProvider for AlloyRethProvider +impl NodePrimitivesProvider for RpcBlockchainProvider where P: Send + Sync, N: Send + Sync, @@ -707,7 +871,7 @@ where type Primitives = PrimitivesTy; } -impl CanonStateSubscriptions for AlloyRethProvider +impl CanonStateSubscriptions for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -719,7 +883,7 @@ where } } -impl ChainSpecProvider for AlloyRethProvider +impl ChainSpecProvider for RpcBlockchainProvider where P: Send + Sync, N: Send + Sync, @@ -733,9 +897,11 @@ where } } -/// State provider implementation that fetches state via RPC -#[derive(Clone)] -pub struct AlloyRethStateProvider +/// RPC-based state provider implementation that fetches blockchain state via remote RPC calls. +/// +/// This is the state provider counterpart to `RpcBlockchainProvider`, handling state queries +/// at specific block heights via RPC instead of local database access. +pub struct RpcBlockchainStateProvider where Node: NodeTypes, { @@ -749,22 +915,30 @@ where network: std::marker::PhantomData, /// Cached chain spec (shared with parent provider) chain_spec: Option>, + /// Whether to enable state root calculation + compute_state_root: bool, + /// Cached bytecode for accounts + /// + /// Since the state provider is short-lived, we don't worry about memory leaks. + code_store: RwLock>, + /// Whether to use Reth-specific RPC methods for better performance + reth_rpc_support: bool, } impl std::fmt::Debug - for AlloyRethStateProvider + for RpcBlockchainStateProvider { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("AlloyRethStateProvider") + f.debug_struct("RpcBlockchainStateProvider") .field("provider", &self.provider) .field("block_id", &self.block_id) .finish() } } -impl AlloyRethStateProvider { +impl RpcBlockchainStateProvider { /// Creates a new state provider for the given block - pub const fn new( + pub fn new( provider: P, block_id: BlockId, _primitives: std::marker::PhantomData, @@ -775,11 +949,14 @@ impl AlloyRethStateProvider { node_types: std::marker::PhantomData, network: std::marker::PhantomData, chain_spec: None, + compute_state_root: false, + code_store: RwLock::new(HashMap::default()), + reth_rpc_support: true, } } /// Creates a new state provider with a cached chain spec - pub const fn with_chain_spec( + pub fn with_chain_spec( provider: P, block_id: BlockId, chain_spec: Arc, @@ -790,6 +967,9 @@ impl AlloyRethStateProvider { node_types: std::marker::PhantomData, network: std::marker::PhantomData, chain_spec: Some(chain_spec), + compute_state_root: false, + code_store: RwLock::new(HashMap::default()), + reth_rpc_support: true, } } @@ -809,48 +989,92 @@ impl AlloyRethStateProvider { node_types: self.node_types, network: self.network, chain_spec: self.chain_spec.clone(), + compute_state_root: self.compute_state_root, + code_store: RwLock::new(HashMap::default()), + reth_rpc_support: self.reth_rpc_support, } } + /// Helper function to enable state root calculation + /// + /// If enabled, the node will compute the state root and updates. + /// When disabled, it will return zero for state root and no updates. + pub const fn with_compute_state_root(mut self, is_enable: bool) -> Self { + self.compute_state_root = is_enable; + self + } + + /// Sets whether to use Reth-specific RPC methods for better performance + /// + /// If enabled, the node will use Reth's RPC methods (`debug_codeByHash` and + /// `eth_getAccountInfo`) to speed up account information retrieval. When disabled, it will + /// use multiple standard RPC calls to get account information. + pub const fn with_reth_rpc_support(mut self, is_enable: bool) -> Self { + self.reth_rpc_support = is_enable; + self + } + /// Get account information from RPC fn get_account(&self, address: Address) -> Result, ProviderError> where P: Provider + Clone + 'static, N: Network, { - self.block_on_async(async { - // Get account info in a single RPC call - let account_info = self - .provider - .get_account_info(address) - .block_id(self.block_id) - .await - .map_err(ProviderError::other)?; + let account_info = self.block_on_async(async { + // Get account info in a single RPC call using `eth_getAccountInfo` + if self.reth_rpc_support { + return self + .provider + .get_account_info(address) + .block_id(self.block_id) + .await + .map_err(ProviderError::other); + } + // Get account info in multiple RPC calls + let nonce = self.provider.get_transaction_count(address).block_id(self.block_id); + let balance = self.provider.get_balance(address).block_id(self.block_id); + let code = self.provider.get_code_at(address).block_id(self.block_id); + + let (nonce, balance, code) = tokio::join!(nonce, balance, code,); + + let account_info = AccountInfo { + balance: balance.map_err(ProviderError::other)?, + nonce: nonce.map_err(ProviderError::other)?, + code: code.map_err(ProviderError::other)?, + }; + + let code_hash = account_info.code_hash(); + if code_hash != KECCAK_EMPTY { + // Insert code into the cache + self.code_store + .write() + .insert(code_hash, Bytecode::new_raw(account_info.code.clone())); + } - // Only return account if it exists (has balance, nonce, or code) - if account_info.balance.is_zero() && - account_info.nonce == 0 && - account_info.code.is_empty() - { - Ok(None) + Ok(account_info) + })?; + + // Only return account if it exists (has balance, nonce, or code) + if account_info.balance.is_zero() && account_info.nonce == 0 && account_info.code.is_empty() + { + Ok(None) + } else { + let bytecode = if account_info.code.is_empty() { + None } else { - let bytecode = if account_info.code.is_empty() { - None - } else { - Some(Bytecode::new_raw(account_info.code)) - }; - - Ok(Some(Account { - balance: account_info.balance, - nonce: account_info.nonce, - bytecode_hash: bytecode.as_ref().map(|b| b.hash_slow()), - })) - } - }) + Some(Bytecode::new_raw(account_info.code)) + }; + + Ok(Some(Account { + balance: account_info.balance, + nonce: account_info.nonce, + bytecode_hash: bytecode.as_ref().map(|b| b.hash_slow()), + })) + } } } -impl StateProvider for AlloyRethStateProvider +impl StateProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -862,18 +1086,13 @@ where storage_key: StorageKey, ) -> Result, ProviderError> { self.block_on_async(async { - let value = self - .provider - .get_storage_at(address, storage_key.into()) - .block_id(self.block_id) - .await - .map_err(ProviderError::other)?; - - if value.is_zero() { - Ok(None) - } else { - Ok(Some(value)) - } + Ok(Some( + self.provider + .get_storage_at(address, storage_key.into()) + .block_id(self.block_id) + .await + .map_err(ProviderError::other)?, + )) }) } @@ -903,19 +1122,36 @@ where } } -impl BytecodeReader for AlloyRethStateProvider +impl BytecodeReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, { - fn bytecode_by_hash(&self, _code_hash: &B256) -> Result, ProviderError> { - // Cannot fetch bytecode by hash via RPC - Err(ProviderError::UnsupportedProvider) + fn bytecode_by_hash(&self, code_hash: &B256) -> Result, ProviderError> { + if !self.reth_rpc_support { + return Ok(self.code_store.read().get(code_hash).cloned()); + } + + self.block_on_async(async { + // The method `debug_codeByHash` is currently only available on a Reth node + let code = self + .provider + .debug_code_by_hash(*code_hash, None) + .await + .map_err(ProviderError::other)?; + + let Some(code) = code else { + // If the code was not found, return None + return Ok(None); + }; + + Ok(Some(Bytecode::new_raw(code))) + }) } } -impl AccountReader for AlloyRethStateProvider +impl AccountReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -926,46 +1162,51 @@ where } } -impl StateRootProvider for AlloyRethStateProvider +impl StateRootProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, { - fn state_root(&self, _state: HashedPostState) -> Result { - // Return the state root from the block - self.block_on_async(async { - let block = self - .provider - .get_block(self.block_id) - .await - .map_err(ProviderError::other)? - .ok_or(ProviderError::HeaderNotFound(0.into()))?; - - Ok(block.header().state_root()) - }) + fn state_root(&self, hashed_state: HashedPostState) -> Result { + self.state_root_from_nodes(TrieInput::from_state(hashed_state)) } fn state_root_from_nodes(&self, _input: TrieInput) -> Result { - Err(ProviderError::UnsupportedProvider) + warn!("state_root_from_nodes is not implemented and will return zero"); + Ok(B256::ZERO) } fn state_root_with_updates( &self, - _state: HashedPostState, + hashed_state: HashedPostState, ) -> Result<(B256, TrieUpdates), ProviderError> { - Err(ProviderError::UnsupportedProvider) + if !self.compute_state_root { + return Ok((B256::ZERO, TrieUpdates::default())); + } + + self.block_on_async(async { + self.provider + .raw_request::<(HashedPostState, BlockId), (B256, TrieUpdates)>( + "debug_stateRootWithUpdates".into(), + (hashed_state, self.block_id), + ) + .into_future() + .await + .map_err(ProviderError::other) + }) } fn state_root_from_nodes_with_updates( &self, _input: TrieInput, ) -> Result<(B256, TrieUpdates), ProviderError> { - Err(ProviderError::UnsupportedProvider) + warn!("state_root_from_nodes_with_updates is not implemented and will return zero"); + Ok((B256::ZERO, TrieUpdates::default())) } } -impl StorageReader for AlloyRethStateProvider +impl StorageReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1004,7 +1245,7 @@ where } } -impl reth_storage_api::StorageRootProvider for AlloyRethStateProvider +impl reth_storage_api::StorageRootProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1038,7 +1279,7 @@ where } } -impl reth_storage_api::StateProofProvider for AlloyRethStateProvider +impl reth_storage_api::StateProofProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1070,7 +1311,8 @@ where } } -impl reth_storage_api::HashedPostStateProvider for AlloyRethStateProvider +impl reth_storage_api::HashedPostStateProvider + for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1082,7 +1324,7 @@ where } } -impl StateReader for AlloyRethStateProvider +impl StateReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1099,7 +1341,7 @@ where } } -impl DBProvider for AlloyRethStateProvider +impl DBProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1131,7 +1373,7 @@ where } } -impl BlockNumReader for AlloyRethStateProvider +impl BlockNumReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1170,7 +1412,7 @@ where } } -impl BlockHashReader for AlloyRethStateProvider +impl BlockHashReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1197,7 +1439,7 @@ where } } -impl BlockIdReader for AlloyRethStateProvider +impl BlockIdReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1223,7 +1465,7 @@ where } } -impl BlockReader for AlloyRethStateProvider +impl BlockReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1252,7 +1494,7 @@ where fn pending_block_and_receipts( &self, - ) -> Result, Vec)>, ProviderError> { + ) -> Result, Vec)>, ProviderError> { Err(ProviderError::UnsupportedProvider) } @@ -1294,7 +1536,7 @@ where } } -impl TransactionsProvider for AlloyRethStateProvider +impl TransactionsProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1365,7 +1607,7 @@ where } } -impl ReceiptProvider for AlloyRethStateProvider +impl ReceiptProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1403,7 +1645,7 @@ where } } -impl HeaderProvider for AlloyRethStateProvider +impl HeaderProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1457,7 +1699,7 @@ where } } -impl PruneCheckpointReader for AlloyRethStateProvider +impl PruneCheckpointReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1475,7 +1717,7 @@ where } } -impl StageCheckpointReader for AlloyRethStateProvider +impl StageCheckpointReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1497,7 +1739,7 @@ where } } -impl ChangeSetReader for AlloyRethStateProvider +impl ChangeSetReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1511,7 +1753,7 @@ where } } -impl StateProviderFactory for AlloyRethStateProvider +impl StateProviderFactory for RpcBlockchainStateProvider where P: Provider + Clone + 'static + Send + Sync, Node: NodeTypes + 'static, @@ -1520,7 +1762,7 @@ where Self: Clone + 'static, { fn latest(&self) -> Result { - Ok(Box::new(self.clone()) as StateProviderBox) + Ok(Box::new(self.with_block_id(self.best_block_number()?.into()))) } fn state_by_block_id(&self, block_id: BlockId) -> Result { @@ -1577,7 +1819,7 @@ where } } -impl ChainSpecProvider for AlloyRethStateProvider +impl ChainSpecProvider for RpcBlockchainStateProvider where P: Send + Sync + std::fmt::Debug, N: Send + Sync, @@ -1599,7 +1841,7 @@ where // Note: FullExecutionDataProvider is already implemented via the blanket implementation // for types that implement both ExecutionDataProvider and BlockExecutionForkProvider -impl StatsReader for AlloyRethStateProvider +impl StatsReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1610,7 +1852,7 @@ where } } -impl BlockBodyIndicesProvider for AlloyRethStateProvider +impl BlockBodyIndicesProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1631,7 +1873,7 @@ where } } -impl NodePrimitivesProvider for AlloyRethStateProvider +impl NodePrimitivesProvider for RpcBlockchainStateProvider where P: Send + Sync + std::fmt::Debug, N: Send + Sync, @@ -1640,7 +1882,7 @@ where type Primitives = PrimitivesTy; } -impl ChainStateBlockReader for AlloyRethStateProvider +impl ChainStateBlockReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1655,7 +1897,7 @@ where } } -impl ChainStateBlockWriter for AlloyRethStateProvider +impl ChainStateBlockWriter for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1669,101 +1911,3 @@ where Err(ProviderError::UnsupportedProvider) } } - -// Async database wrapper for revm compatibility -#[allow(dead_code)] -#[derive(Debug, Clone)] -struct AsyncDbWrapper { - provider: P, - block_id: BlockId, - network: std::marker::PhantomData, -} - -#[allow(dead_code)] -impl AsyncDbWrapper { - const fn new(provider: P, block_id: BlockId) -> Self { - Self { provider, block_id, network: std::marker::PhantomData } - } - - /// Helper function to execute async operations in a blocking context - fn block_on_async(&self, fut: F) -> T - where - F: Future, - { - tokio::task::block_in_place(move || Handle::current().block_on(fut)) - } -} - -impl revm::Database for AsyncDbWrapper -where - P: Provider + Clone + 'static, - N: Network, -{ - type Error = ProviderError; - - fn basic(&mut self, address: Address) -> Result, Self::Error> { - self.block_on_async(async { - let account_info = self - .provider - .get_account_info(address) - .block_id(self.block_id) - .await - .map_err(ProviderError::other)?; - - // Only return account if it exists - if account_info.balance.is_zero() && - account_info.nonce == 0 && - account_info.code.is_empty() - { - Ok(None) - } else { - let code_hash = if account_info.code.is_empty() { - revm_primitives::KECCAK_EMPTY - } else { - revm_primitives::keccak256(&account_info.code) - }; - - Ok(Some(revm::state::AccountInfo { - balance: account_info.balance, - nonce: account_info.nonce, - code_hash, - code: if account_info.code.is_empty() { - None - } else { - Some(revm::bytecode::Bytecode::new_raw(account_info.code)) - }, - })) - } - }) - } - - fn code_by_hash(&mut self, _code_hash: B256) -> Result { - // Cannot fetch bytecode by hash via RPC - Ok(revm::bytecode::Bytecode::default()) - } - - fn storage(&mut self, address: Address, index: U256) -> Result { - let index = B256::from(index); - - self.block_on_async(async { - self.provider - .get_storage_at(address, index.into()) - .block_id(self.block_id) - .await - .map_err(ProviderError::other) - }) - } - - fn block_hash(&mut self, number: u64) -> Result { - self.block_on_async(async { - let block = self - .provider - .get_block_by_number(number.into()) - .await - .map_err(ProviderError::other)? - .ok_or(ProviderError::HeaderNotFound(number.into()))?; - - Ok(block.header().hash()) - }) - } -} diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 4316e5af673..40a009935ca 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -6,7 +6,7 @@ use alloc::{sync::Arc, vec::Vec}; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, B256}; use core::ops::RangeInclusive; -use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; +use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_storage_errors::provider::ProviderResult; /// A helper enum that represents the origin of the requested block. @@ -88,7 +88,7 @@ pub trait BlockReader: #[expect(clippy::type_complexity)] fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>>; + ) -> ProviderResult, Vec)>>; /// Returns the block with matching hash from the database. /// @@ -164,7 +164,7 @@ impl BlockReader for Arc { } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } fn block_by_hash(&self, hash: B256) -> ProviderResult> { @@ -222,7 +222,7 @@ impl BlockReader for &T { } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } fn block_by_hash(&self, hash: B256) -> ProviderResult> { diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 2afa4b616f5..0409bfad62b 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -8,6 +8,9 @@ use crate::{ StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, StorageRootProvider, TransactionVariant, TransactionsProvider, }; + +#[cfg(feature = "db-api")] +use crate::{DBProvider, DatabaseProviderFactory}; use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; @@ -20,11 +23,13 @@ use core::{ ops::{RangeBounds, RangeInclusive}, }; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, MAINNET}; +#[cfg(feature = "db-api")] +use reth_db_api::mock::{DatabaseMock, TxMock}; use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_ethereum_primitives::EthPrimitives; -use reth_primitives_traits::{ - Account, Bytecode, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, -}; +use reth_primitives_traits::{Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader}; +#[cfg(feature = "db-api")] +use reth_prune_types::PruneModes; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -38,20 +43,38 @@ use reth_trie_common::{ #[non_exhaustive] pub struct NoopProvider { chain_spec: Arc, + #[cfg(feature = "db-api")] + tx: TxMock, + #[cfg(feature = "db-api")] + prune_modes: PruneModes, _phantom: PhantomData, } impl NoopProvider { /// Create a new instance for specific primitive types. pub fn new(chain_spec: Arc) -> Self { - Self { chain_spec, _phantom: Default::default() } + Self { + chain_spec, + #[cfg(feature = "db-api")] + tx: TxMock::default(), + #[cfg(feature = "db-api")] + prune_modes: PruneModes::none(), + _phantom: Default::default(), + } } } impl NoopProvider { /// Create a new instance of the `NoopBlockReader`. pub fn eth(chain_spec: Arc) -> Self { - Self { chain_spec, _phantom: Default::default() } + Self { + chain_spec, + #[cfg(feature = "db-api")] + tx: TxMock::default(), + #[cfg(feature = "db-api")] + prune_modes: PruneModes::none(), + _phantom: Default::default(), + } } } @@ -70,7 +93,14 @@ impl Default for NoopProvider { impl Clone for NoopProvider { fn clone(&self) -> Self { - Self { chain_spec: Arc::clone(&self.chain_spec), _phantom: Default::default() } + Self { + chain_spec: Arc::clone(&self.chain_spec), + #[cfg(feature = "db-api")] + tx: self.tx.clone(), + #[cfg(feature = "db-api")] + prune_modes: self.prune_modes.clone(), + _phantom: Default::default(), + } } } @@ -169,7 +199,7 @@ impl BlockReader for NoopProvider { fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(None) } @@ -560,3 +590,41 @@ impl BlockBodyIndicesProvider for NoopProvider DBProvider for NoopProvider { + type Tx = TxMock; + + fn tx_ref(&self) -> &Self::Tx { + &self.tx + } + + fn tx_mut(&mut self) -> &mut Self::Tx { + &mut self.tx + } + + fn into_tx(self) -> Self::Tx { + self.tx + } + + fn prune_modes_ref(&self) -> &PruneModes { + &self.prune_modes + } +} + +#[cfg(feature = "db-api")] +impl DatabaseProviderFactory + for NoopProvider +{ + type DB = DatabaseMock; + type Provider = Self; + type ProviderRW = Self; + + fn database_provider_ro(&self) -> ProviderResult { + Ok(self.clone()) + } + + fn database_provider_rw(&self) -> ProviderResult { + Ok(self.clone()) + } +} diff --git a/crates/transaction-pool/docs/mermaid/txpool.mmd b/crates/transaction-pool/docs/mermaid/txpool.mmd index 94f3abda3e6..e183d8f3c91 100644 --- a/crates/transaction-pool/docs/mermaid/txpool.mmd +++ b/crates/transaction-pool/docs/mermaid/txpool.mmd @@ -16,7 +16,7 @@ graph TB A[Incoming Tx] --> B[Validation] -->|insert| pool pool --> |if ready + blobfee too low| B4 pool --> |if ready| B1 - pool --> |if ready + basfee too low| B2 + pool --> |if ready + basefee too low| B2 pool --> |nonce gap or lack of funds| B3 pool --> |update| pool B1 --> |best| production diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index e738bfc6681..b550b085fb1 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -424,10 +424,9 @@ impl DiskFileBlobStoreInner { if let Some(blob) = self.blob_cache.lock().get(&tx) { return Ok(Some(blob.clone())) } - let blob = self.read_one(tx)?; - if let Some(blob) = &blob { - let blob_arc = Arc::new(blob.clone()); + if let Some(blob) = self.read_one(tx)? { + let blob_arc = Arc::new(blob); self.blob_cache.lock().insert(tx, blob_arc.clone()); return Ok(Some(blob_arc)) } @@ -542,11 +541,18 @@ impl DiskFileBlobStoreInner { if from_disk.is_empty() { return Ok(res) } + let from_disk = from_disk + .into_iter() + .map(|(tx, data)| { + let data = Arc::new(data); + res.push((tx, data.clone())); + (tx, data) + }) + .collect::>(); + let mut cache = self.blob_cache.lock(); for (tx, data) in from_disk { - let arc = Arc::new(data.clone()); - cache.insert(tx, arc.clone()); - res.push((tx, arc.clone())); + cache.insert(tx, data); } Ok(res) diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 5263cd18344..a58b02bb327 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -50,6 +50,8 @@ pub struct PoolConfig { pub price_bumps: PriceBumpConfig, /// Minimum base fee required by the protocol. pub minimal_protocol_basefee: u64, + /// Minimum priority fee required for transaction acceptance into the pool. + pub minimum_priority_fee: Option, /// The max gas limit for transactions in the pool pub gas_limit: u64, /// How to handle locally received transactions: @@ -87,6 +89,7 @@ impl Default for PoolConfig { max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, price_bumps: Default::default(), minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE, + minimum_priority_fee: None, gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M, local_transactions_config: Default::default(), pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE, diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 686c9456d39..b499c57aebd 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -218,6 +218,9 @@ pub enum InvalidPoolTransactionError { /// respect the size limits of the pool. #[error("transaction's gas limit {0} exceeds block's gas limit {1}")] ExceedsGasLimit(u64, u64), + /// Thrown when a transaction's gas limit exceeds the configured maximum per-transaction limit. + #[error("transaction's gas limit {0} exceeds maximum per-transaction gas limit {1}")] + MaxTxGasLimitExceeded(u64, u64), /// Thrown when a new transaction is added to the pool, but then immediately discarded to /// respect the tx fee exceeds the configured cap #[error("tx fee ({max_tx_fee_wei} wei) exceeds the configured cap ({tx_fee_cap_wei} wei)")] @@ -264,6 +267,12 @@ pub enum InvalidPoolTransactionError { /// invocation. #[error("intrinsic gas too low")] IntrinsicGasTooLow, + /// The transaction priority fee is below the minimum required priority fee. + #[error("transaction priority fee below minimum required priority fee {minimum_priority_fee}")] + PriorityFeeBelowMinimum { + /// Minimum required priority fee. + minimum_priority_fee: u128, + }, } // === impl InvalidPoolTransactionError === @@ -320,6 +329,10 @@ impl InvalidPoolTransactionError { } } Self::ExceedsGasLimit(_, _) => true, + Self::MaxTxGasLimitExceeded(_, _) => { + // local setting + false + } Self::ExceedsFeeCap { max_tx_fee_wei: _, tx_fee_cap_wei: _ } => true, Self::ExceedsMaxInitCodeSize(_, _) => true, Self::OversizedData(_, _) => true, @@ -374,9 +387,15 @@ impl InvalidPoolTransactionError { Eip7702PoolTransactionError::InflightTxLimitReached => false, Eip7702PoolTransactionError::AuthorityReserved => false, }, + Self::PriorityFeeBelowMinimum { .. } => false, } } + /// Returns `true` if an import failed due to an oversized transaction + pub const fn is_oversized(&self) -> bool { + matches!(self, Self::OversizedData(_, _)) + } + /// Returns `true` if an import failed due to nonce gap. pub const fn is_nonce_gap(&self) -> bool { matches!(self, Self::Consensus(InvalidTransactionError::NonceNotConsistent { .. })) || diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index 17320ecf930..96cfd1ef2df 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -1,7 +1,6 @@ //! Identifier types for transactions and senders. -use alloy_primitives::Address; +use alloy_primitives::{map::HashMap, Address}; use rustc_hash::FxHashMap; -use std::collections::HashMap; /// An internal mapping of addresses. /// diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 5063746aae2..340ddaae2c2 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -282,8 +282,9 @@ pub use crate::{ error::PoolResult, ordering::{CoinbaseTipOrdering, Priority, TransactionOrdering}, pool::{ - blob_tx_priority, fee_delta, state::SubPool, AllTransactionsEvents, FullTransactionEvent, - NewTransactionEvent, TransactionEvent, TransactionEvents, TransactionListenerKind, + blob_tx_priority, fee_delta, state::SubPool, AddedTransactionOutcome, + AllTransactionsEvents, FullTransactionEvent, NewTransactionEvent, TransactionEvent, + TransactionEvents, TransactionListenerKind, }, traits::*, validate::{ @@ -488,7 +489,7 @@ where &self, origin: TransactionOrigin, transaction: Self::Transaction, - ) -> PoolResult { + ) -> PoolResult { let (_, tx) = self.validate(origin, transaction).await; let mut results = self.pool.add_transactions(origin, std::iter::once(tx)); results.pop().expect("result length is the same as the input") @@ -498,7 +499,7 @@ where &self, origin: TransactionOrigin, transactions: Vec, - ) -> Vec> { + ) -> Vec> { if transactions.is_empty() { return Vec::new() } @@ -593,6 +594,13 @@ where self.pool.queued_transactions() } + fn pending_and_queued_txn_count(&self) -> (usize, usize) { + let data = self.pool.get_pool_data(); + let pending = data.pending_transactions_count(); + let queued = data.queued_transactions_count(); + (pending, queued) + } + fn all_transactions(&self) -> AllPoolTransactions { self.pool.all_transactions() } diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 132854bb712..45851f31f88 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -9,9 +9,9 @@ use crate::{ pool::TransactionListenerKind, traits::{BestTransactionsAttributes, GetPooledTransactionLimit, NewBlobSidecar}, validate::ValidTransaction, - AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPoolTransaction, - EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, - PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, + AddedTransactionOutcome, AllPoolTransactions, AllTransactionsEvents, BestTransactions, + BlockInfo, EthPoolTransaction, EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, + PoolTransaction, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; use alloy_eips::{ @@ -79,7 +79,7 @@ impl TransactionPool for NoopTransactionPool { &self, _origin: TransactionOrigin, transaction: Self::Transaction, - ) -> PoolResult { + ) -> PoolResult { let hash = *transaction.hash(); Err(PoolError::other(hash, Box::new(NoopInsertError::new(transaction)))) } @@ -88,7 +88,7 @@ impl TransactionPool for NoopTransactionPool { &self, _origin: TransactionOrigin, transactions: Vec, - ) -> Vec> { + ) -> Vec> { transactions .into_iter() .map(|transaction| { @@ -190,6 +190,10 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn pending_and_queued_txn_count(&self) -> (usize, usize) { + (0, 0) + } + fn all_transactions(&self) -> AllPoolTransactions { AllPoolTransactions::default() } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index eba3c2c35d0..ecf28a519e2 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -91,7 +91,7 @@ pub struct BestTransactions { /// There might be the case where a yielded transactions is invalid, this will track it. pub(crate) invalid: HashSet, /// Used to receive any new pending transactions that have been added to the pool after this - /// iterator was static fileted + /// iterator was static filtered /// /// These new pending transactions are inserted into this iterator's pool before yielding the /// next value diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index bf96431f78a..008003c3c20 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -418,13 +418,30 @@ where let changed_senders = self.changed_senders(accounts.into_iter()); let UpdateOutcome { promoted, discarded } = self.pool.write().update_accounts(changed_senders); - let mut listener = self.event_listener.write(); - for tx in &promoted { - listener.pending(tx.hash(), None); + // Notify about promoted pending transactions (similar to notify_on_new_state) + if !promoted.is_empty() { + self.pending_transaction_listener.lock().retain_mut(|listener| { + let promoted_hashes = promoted.iter().filter_map(|tx| { + if listener.kind.is_propagate_only() && !tx.propagate { + None + } else { + Some(*tx.hash()) + } + }); + listener.send_all(promoted_hashes) + }); } - for tx in &discarded { - listener.discarded(tx.hash()); + + { + let mut listener = self.event_listener.write(); + + for tx in &promoted { + listener.pending(tx.hash(), None); + } + for tx in &discarded { + listener.discarded(tx.hash()); + } } // This deletes outdated blob txs from the blob store, based on the account's nonce. This is @@ -441,7 +458,7 @@ where pool: &mut RwLockWriteGuard<'_, TxPool>, origin: TransactionOrigin, tx: TransactionValidationOutcome, - ) -> PoolResult { + ) -> PoolResult { match tx { TransactionValidationOutcome::Valid { balance, @@ -477,6 +494,10 @@ where let added = pool.add_transaction(tx, balance, state_nonce, bytecode_hash)?; let hash = *added.hash(); + let state = match added.subpool() { + SubPool::Pending => AddedTransactionState::Pending, + _ => AddedTransactionState::Queued, + }; // transaction was successfully inserted into the pool if let Some(sidecar) = maybe_sidecar { @@ -507,7 +528,7 @@ where // Notify listeners for _all_ transactions self.on_new_transaction(added.into_new_transaction_event()); - Ok(hash) + Ok(AddedTransactionOutcome { hash, state }) } TransactionValidationOutcome::Invalid(tx, err) => { let mut listener = self.event_listener.write(); @@ -546,7 +567,7 @@ where &self, origin: TransactionOrigin, transactions: impl IntoIterator>, - ) -> Vec> { + ) -> Vec> { // Add the transactions and enforce the pool size limits in one write lock let (mut added, discarded) = { let mut pool = self.pool.write(); @@ -582,7 +603,7 @@ where // A newly added transaction may be immediately discarded, so we need to // adjust the result here for res in &mut added { - if let Ok(hash) = res { + if let Ok(AddedTransactionOutcome { hash, .. }) = res { if discarded_hashes.contains(hash) { *res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert)) } @@ -1152,7 +1173,6 @@ impl AddedTransaction { } /// Returns the subpool this transaction was added to - #[cfg(test)] pub(crate) const fn subpool(&self) -> SubPool { match self { Self::Pending(_) => SubPool::Pending, @@ -1171,6 +1191,24 @@ impl AddedTransaction { } } +/// The state of a transaction when is was added to the pool +#[derive(Debug)] +pub enum AddedTransactionState { + /// Ready for execution + Pending, + /// Not ready for execution due to a nonce gap or insufficient balance + Queued, // TODO: Break it down to missing nonce, insufficient balance, etc. +} + +/// The outcome of a successful transaction addition +#[derive(Debug)] +pub struct AddedTransactionOutcome { + /// The hash of the transaction + pub hash: TxHash, + /// The state of the transaction + pub state: AddedTransactionState, +} + /// Contains all state changes after a [`CanonicalStateUpdate`] was processed #[derive(Debug)] pub(crate) struct OnNewCanonicalStateOutcome { diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 33056dd6ec5..d3e90b6e3c1 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -131,7 +131,7 @@ impl ParkedPool { /// Returns an iterator over all transactions in the pool pub(crate) fn all( &self, - ) -> impl Iterator>> + '_ { + ) -> impl ExactSizeIterator>> + '_ { self.by_id.values().map(|tx| tx.transaction.clone().into()) } diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index d3e65f711fa..162e3aa1979 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -158,7 +158,7 @@ impl PendingPool { /// Returns an iterator over all transactions in the pool pub(crate) fn all( &self, - ) -> impl Iterator>> + '_ { + ) -> impl ExactSizeIterator>> + '_ { self.by_id.values().map(|tx| tx.transaction.clone()) } @@ -292,7 +292,7 @@ impl PendingPool { tx: Arc>, base_fee: u64, ) { - assert!( + debug_assert!( !self.contains(tx.id()), "transaction already included {:?}", self.get(tx.id()).unwrap().transaction diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 66ba5368f00..1763e19cf0f 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -434,6 +434,11 @@ impl TxPool { self.pending_pool.all() } + /// Returns the number of transactions from the pending sub-pool + pub(crate) fn pending_transactions_count(&self) -> usize { + self.pending_pool.len() + } + /// Returns all pending transactions filtered by predicate pub(crate) fn pending_transactions_with_predicate( &self, @@ -462,6 +467,11 @@ impl TxPool { self.basefee_pool.all().chain(self.queued_pool.all()) } + /// Returns the number of transactions in parked pools + pub(crate) fn queued_transactions_count(&self) -> usize { + self.basefee_pool.len() + self.queued_pool.len() + } + /// Returns queued and pending transactions for the specified sender pub fn queued_and_pending_txs_by_sender( &self, @@ -832,11 +842,11 @@ impl TxPool { /// This will move/discard the given transaction according to the `PoolUpdate` fn process_updates(&mut self, updates: Vec) -> UpdateOutcome { let mut outcome = UpdateOutcome::default(); - for PoolUpdate { id, hash, current, destination } in updates { + for PoolUpdate { id, current, destination } in updates { match destination { Destination::Discard => { // remove the transaction from the pool and subpool - if let Some(tx) = self.prune_transaction_by_hash(&hash) { + if let Some(tx) = self.prune_transaction_by_id(&id) { outcome.discarded.push(tx); } self.metrics.removed_transactions.increment(1); @@ -853,6 +863,9 @@ impl TxPool { } } } + + self.update_size_metrics(); + outcome } @@ -958,6 +971,17 @@ impl TxPool { let (tx, pool) = self.all_transactions.remove_transaction_by_hash(tx_hash)?; self.remove_from_subpool(pool, tx.id()) } + /// This removes the transaction from the pool and advances any descendant state inside the + /// subpool. + /// + /// This is intended to be used when we call [`Self::process_updates`]. + fn prune_transaction_by_id( + &mut self, + tx_id: &TransactionId, + ) -> Option>> { + let (tx, pool) = self.all_transactions.remove_transaction_by_id(tx_id)?; + self.remove_from_subpool(pool, tx.id()) + } /// Removes the transaction from the given pool. /// @@ -1354,6 +1378,7 @@ impl AllTransactions { } }; } + // track the balance if the sender was changed in the block // check if this is a changed account let changed_balance = if let Some(info) = changed_accounts.get(&id.sender) { @@ -1361,7 +1386,6 @@ impl AllTransactions { if id.nonce < info.state_nonce { updates.push(PoolUpdate { id: *tx.transaction.id(), - hash: *tx.transaction.hash(), current: tx.subpool, destination: Destination::Discard, }); @@ -1473,7 +1497,6 @@ impl AllTransactions { if current_pool != tx.subpool { updates.push(PoolUpdate { id: *tx.transaction.id(), - hash: *tx.transaction.hash(), current: current_pool, destination: tx.subpool.into(), }) @@ -1563,6 +1586,21 @@ impl AllTransactions { Some((tx, internal.subpool)) } + /// Removes a transaction from the set using its id. + /// + /// This is intended for processing updates after state changes. + pub(crate) fn remove_transaction_by_id( + &mut self, + tx_id: &TransactionId, + ) -> Option<(Arc>, SubPool)> { + let internal = self.txs.remove(tx_id)?; + let tx = self.by_hash.remove(internal.transaction.hash())?; + self.remove_auths(&internal); + // decrement the counter for the sender. + self.tx_decr(tx.sender_id()); + Some((tx, internal.subpool)) + } + /// If a tx is removed (_not_ mined), all descendants are set to parked due to the nonce gap pub(crate) fn park_descendant_transactions( &mut self, @@ -1582,7 +1620,6 @@ impl AllTransactions { if current_pool != tx.subpool { updates.push(PoolUpdate { id: *id, - hash: *tx.transaction.hash(), current: current_pool, destination: tx.subpool.into(), }) @@ -1942,7 +1979,6 @@ impl AllTransactions { if current_pool != tx.subpool { updates.push(PoolUpdate { id: *id, - hash: *tx.transaction.hash(), current: current_pool, destination: tx.subpool.into(), }) diff --git a/crates/transaction-pool/src/pool/update.rs b/crates/transaction-pool/src/pool/update.rs index ca2b3358201..2322ccf6e65 100644 --- a/crates/transaction-pool/src/pool/update.rs +++ b/crates/transaction-pool/src/pool/update.rs @@ -3,7 +3,6 @@ use crate::{ identifier::TransactionId, pool::state::SubPool, PoolTransaction, ValidPoolTransaction, }; -use alloy_primitives::TxHash; use std::sync::Arc; /// A change of the transaction's location @@ -13,8 +12,6 @@ use std::sync::Arc; pub(crate) struct PoolUpdate { /// Internal tx id. pub(crate) id: TransactionId, - /// Hash of the transaction. - pub(crate) hash: TxHash, /// Where the transaction is currently held. pub(crate) current: SubPool, /// Where to move the transaction to. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index e9f58c27a32..090f59169b0 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -52,13 +52,13 @@ use crate::{ blobstore::BlobStoreError, - error::{InvalidPoolTransactionError, PoolResult}, + error::{InvalidPoolTransactionError, PoolError, PoolResult}, pool::{ state::SubPool, BestTransactionFilter, NewTransactionEvent, TransactionEvents, TransactionListenerKind, }, validate::ValidPoolTransaction, - AllTransactionsEvents, + AddedTransactionOutcome, AllTransactionsEvents, }; use alloy_consensus::{error::ValueError, BlockHeader, Signed, Typed2718}; use alloy_eips::{ @@ -130,7 +130,7 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { fn add_external_transaction( &self, transaction: Self::Transaction, - ) -> impl Future> + Send { + ) -> impl Future> + Send { self.add_transaction(TransactionOrigin::External, transaction) } @@ -140,7 +140,7 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { fn add_external_transactions( &self, transactions: Vec, - ) -> impl Future>> + Send { + ) -> impl Future>> + Send { self.add_transactions(TransactionOrigin::External, transactions) } @@ -163,7 +163,7 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { &self, origin: TransactionOrigin, transaction: Self::Transaction, - ) -> impl Future> + Send; + ) -> impl Future> + Send; /// Adds the given _unvalidated_ transaction into the pool. /// @@ -174,7 +174,43 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { &self, origin: TransactionOrigin, transactions: Vec, - ) -> impl Future>> + Send; + ) -> impl Future>> + Send; + + /// Submit a consensus transaction directly to the pool + fn add_consensus_transaction( + &self, + tx: Recovered<::Consensus>, + origin: TransactionOrigin, + ) -> impl Future> + Send { + async move { + let tx_hash = *tx.tx_hash(); + + let pool_transaction = match Self::Transaction::try_from_consensus(tx) { + Ok(tx) => tx, + Err(e) => return Err(PoolError::other(tx_hash, e.to_string())), + }; + + self.add_transaction(origin, pool_transaction).await + } + } + + /// Submit a consensus transaction and subscribe to event stream + fn add_consensus_transaction_and_subscribe( + &self, + tx: Recovered<::Consensus>, + origin: TransactionOrigin, + ) -> impl Future> + Send { + async move { + let tx_hash = *tx.tx_hash(); + + let pool_transaction = match Self::Transaction::try_from_consensus(tx) { + Ok(tx) => tx, + Err(e) => return Err(PoolError::other(tx_hash, e.to_string())), + }; + + self.add_transaction_and_subscribe(origin, pool_transaction).await + } + } /// Returns a new transaction change event stream for the given transaction. /// @@ -355,6 +391,10 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { /// Consumer: RPC fn queued_transactions(&self) -> Vec>>; + /// Returns the number of transactions that are ready for inclusion in the next block and the + /// number of transactions that are ready for inclusion in future blocks: `(pending, queued)`. + fn pending_and_queued_txn_count(&self) -> (usize, usize); + /// Returns all transactions that are currently in the pool grouped by whether they are ready /// for inclusion in the next block or not. /// diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 90a61b86ec5..fe152b057b1 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -232,6 +232,8 @@ pub(crate) struct EthTransactionValidatorInner { local_transactions_config: LocalTransactionConfig, /// Maximum size in bytes a single transaction can have in order to be accepted into the pool. max_tx_input_bytes: usize, + /// Maximum gas limit for individual transactions + max_tx_gas_limit: Option, /// Marker for the transaction type _marker: PhantomData, /// Metrics for tsx pool validation @@ -361,12 +363,30 @@ where } // Reject transactions over defined size to prevent DOS attacks - let tx_input_len = transaction.input().len(); - if tx_input_len > self.max_tx_input_bytes { - return Err(TransactionValidationOutcome::Invalid( - transaction, - InvalidPoolTransactionError::OversizedData(tx_input_len, self.max_tx_input_bytes), - )) + if transaction.is_eip4844() { + // Since blob transactions are pulled instead of pushed, and only the consensus data is + // kept in memory while the sidecar is cached on disk, there is no critical limit that + // should be enforced. Still, enforcing some cap on the input bytes. blob txs also must + // be executable right away when they enter the pool. + let tx_input_len = transaction.input().len(); + if tx_input_len > self.max_tx_input_bytes { + return Err(TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::OversizedData( + tx_input_len, + self.max_tx_input_bytes, + ), + )) + } + } else { + // ensure the size of the non-blob transaction + let tx_size = transaction.encoded_length(); + if tx_size > self.max_tx_input_bytes { + return Err(TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::OversizedData(tx_size, self.max_tx_input_bytes), + )) + } } // Check whether the init code size has been exceeded. @@ -389,6 +409,19 @@ where )) } + // Check individual transaction gas limit if configured + if let Some(max_tx_gas_limit) = self.max_tx_gas_limit { + if transaction_gas_limit > max_tx_gas_limit { + return Err(TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::MaxTxGasLimitExceeded( + transaction_gas_limit, + max_tx_gas_limit, + ), + )) + } + } + // Ensure max_priority_fee_per_gas (if EIP1559) is less than max_fee_per_gas if any. if transaction.max_priority_fee_per_gas() > Some(transaction.max_fee_per_gas()) { return Err(TransactionValidationOutcome::Invalid( @@ -431,7 +464,11 @@ where { return Err(TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::Underpriced, + InvalidPoolTransactionError::PriorityFeeBelowMinimum { + minimum_priority_fee: self + .minimum_priority_fee + .expect("minimum priority fee is expected inside if statement"), + }, )) } @@ -476,8 +513,7 @@ where )) } - let blob_count = - transaction.blob_versioned_hashes().map(|b| b.len() as u64).unwrap_or(0); + let blob_count = transaction.blob_count().unwrap_or(0); if blob_count == 0 { // no blobs return Err(TransactionValidationOutcome::Invalid( @@ -773,6 +809,8 @@ pub struct EthTransactionValidatorBuilder { local_transactions_config: LocalTransactionConfig, /// Max size in bytes of a single transaction allowed max_tx_input_bytes: usize, + /// Maximum gas limit for individual transactions + max_tx_gas_limit: Option, } impl EthTransactionValidatorBuilder { @@ -795,6 +833,7 @@ impl EthTransactionValidatorBuilder { local_transactions_config: Default::default(), max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES, tx_fee_cap: Some(1e18 as u128), + max_tx_gas_limit: None, // by default all transaction types are allowed eip2718: true, eip1559: true, @@ -911,8 +950,8 @@ impl EthTransactionValidatorBuilder { } /// Sets a minimum priority fee that's enforced for acceptance into the pool. - pub const fn with_minimum_priority_fee(mut self, minimum_priority_fee: u128) -> Self { - self.minimum_priority_fee = Some(minimum_priority_fee); + pub const fn with_minimum_priority_fee(mut self, minimum_priority_fee: Option) -> Self { + self.minimum_priority_fee = minimum_priority_fee; self } @@ -964,6 +1003,12 @@ impl EthTransactionValidatorBuilder { self } + /// Sets the maximum gas limit for individual transactions + pub const fn with_max_tx_gas_limit(mut self, max_tx_gas_limit: Option) -> Self { + self.max_tx_gas_limit = max_tx_gas_limit; + self + } + /// Builds a the [`EthTransactionValidator`] without spawning validator tasks. pub fn build(self, blob_store: S) -> EthTransactionValidator where @@ -985,6 +1030,7 @@ impl EthTransactionValidatorBuilder { kzg_settings, local_transactions_config, max_tx_input_bytes, + max_tx_gas_limit, .. } = self; @@ -1002,9 +1048,6 @@ impl EthTransactionValidatorBuilder { max_blob_count: AtomicU64::new(max_blob_count), }; - // Ensure the kzg setup is loaded right away. - let _kzg_settings = kzg_settings.get(); - let inner = EthTransactionValidatorInner { client, eip2718, @@ -1019,6 +1062,7 @@ impl EthTransactionValidatorBuilder { kzg_settings, local_transactions_config, max_tx_input_bytes, + max_tx_gas_limit, _marker: Default::default(), validation_metrics: TxPoolValidationMetrics::default(), }; @@ -1323,4 +1367,255 @@ mod tests { let outcome = validator.validate_one(TransactionOrigin::Local, transaction); assert!(outcome.is_valid()); } + + #[tokio::test] + async fn invalid_on_max_tx_gas_limit_exceeded() { + let transaction = get_transaction(); + let provider = MockEthProvider::default(); + provider.add_account( + transaction.sender(), + ExtendedAccount::new(transaction.nonce(), U256::MAX), + ); + + let blob_store = InMemoryBlobStore::default(); + let validator = EthTransactionValidatorBuilder::new(provider) + .with_max_tx_gas_limit(Some(500_000)) // Set limit lower than transaction gas limit (1_015_288) + .build(blob_store.clone()); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); + assert!(outcome.is_invalid()); + + let pool = + Pool::new(validator, CoinbaseTipOrdering::default(), blob_store, Default::default()); + + let res = pool.add_external_transaction(transaction.clone()).await; + assert!(res.is_err()); + assert!(matches!( + res.unwrap_err().kind, + PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::MaxTxGasLimitExceeded( + 1_015_288, 500_000 + )) + )); + let tx = pool.get(transaction.hash()); + assert!(tx.is_none()); + } + + #[tokio::test] + async fn valid_on_max_tx_gas_limit_disabled() { + let transaction = get_transaction(); + let provider = MockEthProvider::default(); + provider.add_account( + transaction.sender(), + ExtendedAccount::new(transaction.nonce(), U256::MAX), + ); + + let blob_store = InMemoryBlobStore::default(); + let validator = EthTransactionValidatorBuilder::new(provider) + .with_max_tx_gas_limit(None) // disabled + .build(blob_store); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + assert!(outcome.is_valid()); + } + + #[tokio::test] + async fn valid_on_max_tx_gas_limit_within_limit() { + let transaction = get_transaction(); + let provider = MockEthProvider::default(); + provider.add_account( + transaction.sender(), + ExtendedAccount::new(transaction.nonce(), U256::MAX), + ); + + let blob_store = InMemoryBlobStore::default(); + let validator = EthTransactionValidatorBuilder::new(provider) + .with_max_tx_gas_limit(Some(2_000_000)) // Set limit higher than transaction gas limit (1_015_288) + .build(blob_store); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + assert!(outcome.is_valid()); + } + + // Helper function to set up common test infrastructure for priority fee tests + fn setup_priority_fee_test() -> (EthPooledTransaction, MockEthProvider) { + let transaction = get_transaction(); + let provider = MockEthProvider::default(); + provider.add_account( + transaction.sender(), + ExtendedAccount::new(transaction.nonce(), U256::MAX), + ); + (transaction, provider) + } + + // Helper function to create a validator with minimum priority fee + fn create_validator_with_minimum_fee( + provider: MockEthProvider, + minimum_priority_fee: Option, + local_config: Option, + ) -> EthTransactionValidator { + let blob_store = InMemoryBlobStore::default(); + let mut builder = EthTransactionValidatorBuilder::new(provider) + .with_minimum_priority_fee(minimum_priority_fee); + + if let Some(config) = local_config { + builder = builder.with_local_transactions_config(config); + } + + builder.build(blob_store) + } + + #[tokio::test] + async fn invalid_on_priority_fee_lower_than_configured_minimum() { + let (transaction, provider) = setup_priority_fee_test(); + + // Verify the test transaction is a dynamic fee transaction + assert!(transaction.is_dynamic_fee()); + + // Set minimum priority fee to be double the transaction's priority fee + let minimum_priority_fee = + transaction.max_priority_fee_per_gas().expect("priority fee is expected") * 2; + + let validator = + create_validator_with_minimum_fee(provider, Some(minimum_priority_fee), None); + + // External transaction should be rejected due to low priority fee + let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); + assert!(outcome.is_invalid()); + + if let TransactionValidationOutcome::Invalid(_, err) = outcome { + assert!(matches!( + err, + InvalidPoolTransactionError::PriorityFeeBelowMinimum { minimum_priority_fee: min_fee } + if min_fee == minimum_priority_fee + )); + } + + // Test pool integration + let blob_store = InMemoryBlobStore::default(); + let pool = + Pool::new(validator, CoinbaseTipOrdering::default(), blob_store, Default::default()); + + let res = pool.add_external_transaction(transaction.clone()).await; + assert!(res.is_err()); + assert!(matches!( + res.unwrap_err().kind, + PoolErrorKind::InvalidTransaction( + InvalidPoolTransactionError::PriorityFeeBelowMinimum { .. } + ) + )); + let tx = pool.get(transaction.hash()); + assert!(tx.is_none()); + + // Local transactions should still be accepted regardless of minimum priority fee + let (_, local_provider) = setup_priority_fee_test(); + let validator_local = + create_validator_with_minimum_fee(local_provider, Some(minimum_priority_fee), None); + + let local_outcome = validator_local.validate_one(TransactionOrigin::Local, transaction); + assert!(local_outcome.is_valid()); + } + + #[tokio::test] + async fn valid_on_priority_fee_equal_to_minimum() { + let (transaction, provider) = setup_priority_fee_test(); + + // Set minimum priority fee equal to transaction's priority fee + let tx_priority_fee = + transaction.max_priority_fee_per_gas().expect("priority fee is expected"); + let validator = create_validator_with_minimum_fee(provider, Some(tx_priority_fee), None); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + assert!(outcome.is_valid()); + } + + #[tokio::test] + async fn valid_on_priority_fee_above_minimum() { + let (transaction, provider) = setup_priority_fee_test(); + + // Set minimum priority fee below transaction's priority fee + let tx_priority_fee = + transaction.max_priority_fee_per_gas().expect("priority fee is expected"); + let minimum_priority_fee = tx_priority_fee / 2; // Half of transaction's priority fee + + let validator = + create_validator_with_minimum_fee(provider, Some(minimum_priority_fee), None); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + assert!(outcome.is_valid()); + } + + #[tokio::test] + async fn valid_on_minimum_priority_fee_disabled() { + let (transaction, provider) = setup_priority_fee_test(); + + // No minimum priority fee set (default is None) + let validator = create_validator_with_minimum_fee(provider, None, None); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + assert!(outcome.is_valid()); + } + + #[tokio::test] + async fn priority_fee_validation_applies_to_private_transactions() { + let (transaction, provider) = setup_priority_fee_test(); + + // Set minimum priority fee to be double the transaction's priority fee + let minimum_priority_fee = + transaction.max_priority_fee_per_gas().expect("priority fee is expected") * 2; + + let validator = + create_validator_with_minimum_fee(provider, Some(minimum_priority_fee), None); + + // Private transactions are also subject to minimum priority fee validation + // because they are not considered "local" by default unless specifically configured + let outcome = validator.validate_one(TransactionOrigin::Private, transaction); + assert!(outcome.is_invalid()); + + if let TransactionValidationOutcome::Invalid(_, err) = outcome { + assert!(matches!( + err, + InvalidPoolTransactionError::PriorityFeeBelowMinimum { minimum_priority_fee: min_fee } + if min_fee == minimum_priority_fee + )); + } + } + + #[tokio::test] + async fn valid_on_local_config_exempts_private_transactions() { + let (transaction, provider) = setup_priority_fee_test(); + + // Set minimum priority fee to be double the transaction's priority fee + let minimum_priority_fee = + transaction.max_priority_fee_per_gas().expect("priority fee is expected") * 2; + + // Configure local transactions to include all private transactions + let local_config = + LocalTransactionConfig { propagate_local_transactions: true, ..Default::default() }; + + let validator = create_validator_with_minimum_fee( + provider, + Some(minimum_priority_fee), + Some(local_config), + ); + + // With appropriate local config, the behavior depends on the local transaction logic + // This test documents the current behavior - private transactions are still validated + // unless the sender is specifically whitelisted in local_transactions_config + let outcome = validator.validate_one(TransactionOrigin::Private, transaction); + assert!(outcome.is_invalid()); // Still invalid because sender not in whitelist + } + + #[test] + fn reject_oversized_tx() { + let mut transaction = get_transaction(); + transaction.encoded_length = DEFAULT_MAX_TX_INPUT_BYTES + 1; + let provider = MockEthProvider::default(); + + // No minimum priority fee set (default is None) + let validator = create_validator_with_minimum_fee(provider, None, None); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + let invalid = outcome.as_invalid().unwrap(); + assert!(invalid.is_oversized()); + } } diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 36d9f14addb..bef1297aff1 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -66,6 +66,14 @@ impl TransactionValidationOutcome { } } + /// Returns the [`InvalidPoolTransactionError`] if this is an invalid variant. + pub const fn as_invalid(&self) -> Option<&InvalidPoolTransactionError> { + match self { + Self::Invalid(_, err) => Some(err), + _ => None, + } + } + /// Returns true if the transaction is valid. pub const fn is_valid(&self) -> bool { matches!(self, Self::Valid { .. }) diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index 7e417681fe8..93f16a585b0 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -106,6 +106,11 @@ impl TransactionValidationTaskExecutor { to_validation_task: self.to_validation_task, } } + + /// Returns the validator. + pub const fn validator(&self) -> &V { + &self.validator + } } impl TransactionValidationTaskExecutor> { diff --git a/crates/transaction-pool/tests/it/blobs.rs b/crates/transaction-pool/tests/it/blobs.rs index 9417c62278b..9f7e224a235 100644 --- a/crates/transaction-pool/tests/it/blobs.rs +++ b/crates/transaction-pool/tests/it/blobs.rs @@ -3,7 +3,7 @@ use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{MockTransaction, MockTransactionFactory, TestPoolBuilder}, - PoolTransaction, TransactionOrigin, TransactionPool, + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, }; #[tokio::test(flavor = "multi_thread")] @@ -12,7 +12,7 @@ async fn blobs_exclusive() { let mut mock_tx_factory = MockTransactionFactory::default(); let blob_tx = mock_tx_factory.create_eip4844(); - let hash = txpool + let AddedTransactionOutcome { hash, .. } = txpool .add_transaction(TransactionOrigin::External, blob_tx.transaction.clone()) .await .unwrap(); diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index 721988888b3..5a869702457 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -9,7 +9,8 @@ use reth_transaction_pool::{ test_utils::{ MockFeeRange, MockTransactionDistribution, MockTransactionRatio, TestPool, TestPoolBuilder, }, - BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionPool, TransactionPoolExt, + AddedTransactionOutcome, BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, + TransactionPool, TransactionPoolExt, }; #[tokio::test(flavor = "multi_thread")] @@ -97,7 +98,7 @@ async fn only_blobs_eviction() { let results = pool.add_transactions(TransactionOrigin::External, set).await; for (i, result) in results.iter().enumerate() { match result { - Ok(hash) => { + Ok(AddedTransactionOutcome { hash, .. }) => { println!("✅ Inserted tx into pool with hash: {hash}"); } Err(e) => { diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs index 5eb296e8ae7..d0a9c9c5aa8 100644 --- a/crates/transaction-pool/tests/it/listeners.rs +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -113,7 +113,7 @@ async fn txpool_listener_all() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(outcome) if outcome.hash == *transaction.transaction.get_hash()); assert_matches!( all_tx_events.next().await, diff --git a/crates/transaction-pool/tests/it/pending.rs b/crates/transaction-pool/tests/it/pending.rs index be559c71eec..095dcfe5085 100644 --- a/crates/transaction-pool/tests/it/pending.rs +++ b/crates/transaction-pool/tests/it/pending.rs @@ -12,7 +12,7 @@ async fn txpool_new_pending_txs() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(outcome) if outcome.hash == *transaction.transaction.get_hash()); let mut best_txns = txpool.best_transactions(); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); @@ -20,6 +20,6 @@ async fn txpool_new_pending_txs() { let transaction = mock_tx_factory.create_eip1559(); let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(outcome) if outcome.hash == *transaction.transaction.get_hash()); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); } diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index ff6c5a58539..0aa93adb598 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -104,6 +104,8 @@ serde-bincode-compat = [ "reth-primitives-traits/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", "dep:serde_with", + "alloy-genesis/serde-bincode-compat", + "alloy-rpc-types-eth?/serde-bincode-compat", ] test-utils = [ "dep:plain_hasher", diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index b6f60e2b2a1..8e4ca75e808 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -334,6 +334,41 @@ impl HashedPostState { HashedPostStateSorted { accounts, storages } } + + /// Converts hashed post state into [`HashedPostStateSorted`], but keeping the maps allocated by + /// draining. + /// + /// This effectively clears all the fields in the [`HashedPostStateSorted`]. + /// + /// This allows us to reuse the allocated space. This allocates new space for the sorted hashed + /// post state, like `into_sorted`. + pub fn drain_into_sorted(&mut self) -> HashedPostStateSorted { + let mut updated_accounts = Vec::new(); + let mut destroyed_accounts = HashSet::default(); + for (hashed_address, info) in self.accounts.drain() { + if let Some(info) = info { + updated_accounts.push((hashed_address, info)); + } else { + destroyed_accounts.insert(hashed_address); + } + } + updated_accounts.sort_unstable_by_key(|(address, _)| *address); + let accounts = HashedAccountsSorted { accounts: updated_accounts, destroyed_accounts }; + + let storages = self + .storages + .drain() + .map(|(hashed_address, storage)| (hashed_address, storage.into_sorted())) + .collect(); + + HashedPostStateSorted { accounts, storages } + } + + /// Clears the account and storage maps of this `HashedPostState`. + pub fn clear(&mut self) { + self.accounts.clear(); + self.storages.clear(); + } } /// Representation of in-memory hashed storage. diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index dd82f4e192c..de5ee3ef740 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -118,6 +118,30 @@ impl TrieUpdates { TrieUpdatesSorted { removed_nodes: self.removed_nodes, account_nodes, storage_tries } } + /// Converts trie updates into [`TrieUpdatesSorted`], but keeping the maps allocated by + /// draining. + /// + /// This effectively clears all the fields in the [`TrieUpdatesSorted`]. + /// + /// This allows us to reuse the allocated space. This allocates new space for the sorted + /// updates, like `into_sorted`. + pub fn drain_into_sorted(&mut self) -> TrieUpdatesSorted { + let mut account_nodes = self.account_nodes.drain().collect::>(); + account_nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + + let storage_tries = self + .storage_tries + .drain() + .map(|(hashed_address, updates)| (hashed_address, updates.into_sorted())) + .collect(); + + TrieUpdatesSorted { + removed_nodes: self.removed_nodes.clone(), + account_nodes, + storage_tries, + } + } + /// Converts trie updates into [`TrieUpdatesSortedRef`]. pub fn into_sorted_ref<'a>(&'a self) -> TrieUpdatesSortedRef<'a> { let mut account_nodes = self.account_nodes.iter().collect::>(); @@ -133,6 +157,13 @@ impl TrieUpdates { .collect(), } } + + /// Clears the nodes and storage trie maps in this `TrieUpdates`. + pub fn clear(&mut self) { + self.account_nodes.clear(); + self.removed_nodes.clear(); + self.storage_tries.clear(); + } } /// Trie updates for storage trie of a single account. diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 3ee2c8b653d..a29268c2465 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -48,7 +48,6 @@ reth-trie = { workspace = true, features = ["test-utils"] } # misc rand.workspace = true -rayon.workspace = true criterion.workspace = true proptest.workspace = true proptest-arbitrary-interop.workspace = true diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 4dc78106963..2e5813d55b0 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -19,14 +19,14 @@ use reth_provider::{ use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, prefix_set::TriePrefixSetsMut, - proof::{ProofBlindedProviderFactory, StorageProof}, + proof::{ProofTrieNodeProviderFactory, StorageProof}, trie_cursor::InMemoryTrieCursorFactory, updates::TrieUpdatesSorted, DecodedStorageMultiProof, HashedPostStateSorted, Nibbles, }; use reth_trie_common::prefix_set::{PrefixSet, PrefixSetMut}; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use reth_trie_sparse::blinded::{BlindedProvider, BlindedProviderFactory, RevealedNode}; +use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ collections::VecDeque, sync::{ @@ -40,7 +40,7 @@ use tokio::runtime::Handle; use tracing::debug; type StorageProofResult = Result; -type BlindedNodeResult = Result, SparseTrieError>; +type TrieNodeProviderResult = Result, SparseTrieError>; /// A task that manages sending multiproof requests to a number of tasks that have longer-running /// database transactions @@ -291,7 +291,7 @@ where fn blinded_account_node( self, path: Nibbles, - result_sender: Sender, + result_sender: Sender, tx_sender: Sender>, ) { debug!( @@ -302,14 +302,14 @@ where let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - let blinded_provider_factory = ProofBlindedProviderFactory::new( + let blinded_provider_factory = ProofTrieNodeProviderFactory::new( trie_cursor_factory, hashed_cursor_factory, self.task_ctx.prefix_sets.clone(), ); let start = Instant::now(); - let result = blinded_provider_factory.account_node_provider().blinded_node(&path); + let result = blinded_provider_factory.account_node_provider().trie_node(&path); debug!( target: "trie::proof_task", ?path, @@ -335,7 +335,7 @@ where self, account: B256, path: Nibbles, - result_sender: Sender, + result_sender: Sender, tx_sender: Sender>, ) { debug!( @@ -347,14 +347,14 @@ where let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - let blinded_provider_factory = ProofBlindedProviderFactory::new( + let blinded_provider_factory = ProofTrieNodeProviderFactory::new( trie_cursor_factory, hashed_cursor_factory, self.task_ctx.prefix_sets.clone(), ); let start = Instant::now(); - let result = blinded_provider_factory.storage_node_provider(account).blinded_node(&path); + let result = blinded_provider_factory.storage_node_provider(account).trie_node(&path); debug!( target: "trie::proof_task", ?account, @@ -449,9 +449,9 @@ pub enum ProofTaskKind { /// A storage proof request. StorageProof(StorageProofInput, Sender), /// A blinded account node request. - BlindedAccountNode(Nibbles, Sender), + BlindedAccountNode(Nibbles, Sender), /// A blinded storage node request. - BlindedStorageNode(B256, Nibbles, Sender), + BlindedStorageNode(B256, Nibbles, Sender), } /// A handle that wraps a single proof task sender that sends a terminate message on `Drop` if the @@ -498,22 +498,22 @@ impl Drop for ProofTaskManagerHandle { } } -impl BlindedProviderFactory for ProofTaskManagerHandle { - type AccountNodeProvider = ProofTaskBlindedNodeProvider; - type StorageNodeProvider = ProofTaskBlindedNodeProvider; +impl TrieNodeProviderFactory for ProofTaskManagerHandle { + type AccountNodeProvider = ProofTaskTrieNodeProvider; + type StorageNodeProvider = ProofTaskTrieNodeProvider; fn account_node_provider(&self) -> Self::AccountNodeProvider { - ProofTaskBlindedNodeProvider::AccountNode { sender: self.sender.clone() } + ProofTaskTrieNodeProvider::AccountNode { sender: self.sender.clone() } } fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { - ProofTaskBlindedNodeProvider::StorageNode { account, sender: self.sender.clone() } + ProofTaskTrieNodeProvider::StorageNode { account, sender: self.sender.clone() } } } -/// Blinded node provider for retrieving trie nodes by path. +/// Trie node provider for retrieving trie nodes by path. #[derive(Debug)] -pub enum ProofTaskBlindedNodeProvider { +pub enum ProofTaskTrieNodeProvider { /// Blinded account trie node provider. AccountNode { /// Sender to the proof task. @@ -528,8 +528,8 @@ pub enum ProofTaskBlindedNodeProvider { }, } -impl BlindedProvider for ProofTaskBlindedNodeProvider { - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError> { +impl TrieNodeProvider for ProofTaskTrieNodeProvider { + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let (tx, rx) = channel(); match self { Self::AccountNode { sender } => { diff --git a/crates/trie/sparse-parallel/Cargo.toml b/crates/trie/sparse-parallel/Cargo.toml index 21764ff429f..41f9ab9ab1f 100644 --- a/crates/trie/sparse-parallel/Cargo.toml +++ b/crates/trie/sparse-parallel/Cargo.toml @@ -25,18 +25,37 @@ alloy-rlp.workspace = true # misc smallvec.workspace = true +rayon = { workspace = true, optional = true } [dev-dependencies] # reth reth-primitives-traits.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } -reth-trie.workspace = true +reth-trie-db.workspace = true reth-trie-sparse = { workspace = true, features = ["test-utils"] } +reth-trie.workspace = true +# misc arbitrary.workspace = true assert_matches.workspace = true itertools.workspace = true +pretty_assertions.workspace = true proptest-arbitrary-interop.workspace = true proptest.workspace = true rand.workspace = true rand_08.workspace = true + +[features] +default = ["std"] +std = [ + "dep:rayon", + "alloy-primitives/std", + "alloy-rlp/std", + "alloy-trie/std", + "reth-execution-errors/std", + "reth-primitives-traits/std", + "reth-trie-common/std", + "reth-trie-sparse/std", + "tracing/std", +] diff --git a/crates/trie/sparse-parallel/src/lib.rs b/crates/trie/sparse-parallel/src/lib.rs index 6a8a7048930..c4b7b10ea51 100644 --- a/crates/trie/sparse-parallel/src/lib.rs +++ b/crates/trie/sparse-parallel/src/lib.rs @@ -2,5 +2,10 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +extern crate alloc; + mod trie; pub use trie::*; + +mod lower; +use lower::*; diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs new file mode 100644 index 00000000000..449c3a7b29b --- /dev/null +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -0,0 +1,109 @@ +use crate::SparseSubtrie; +use reth_trie_common::Nibbles; + +/// Tracks the state of the lower subtries. +/// +/// When a [`crate::ParallelSparseTrie`] is initialized/cleared then its `LowerSparseSubtrie`s are +/// all blinded, meaning they have no nodes. A blinded `LowerSparseSubtrie` may hold onto a cleared +/// [`SparseSubtrie`] in order to reuse allocations. +#[derive(Clone, Debug, Eq, PartialEq)] +pub(crate) enum LowerSparseSubtrie { + Blind(Option>), + Revealed(Box), +} + +impl Default for LowerSparseSubtrie { + /// Creates a new blinded subtrie with no allocated storage. + fn default() -> Self { + Self::Blind(None) + } +} + +impl LowerSparseSubtrie { + /// Returns a reference to the underlying [`SparseSubtrie`] if this subtrie is revealed. + /// + /// Returns `None` if the subtrie is blinded (has no nodes). + pub(crate) fn as_revealed_ref(&self) -> Option<&SparseSubtrie> { + match self { + Self::Blind(_) => None, + Self::Revealed(subtrie) => Some(subtrie.as_ref()), + } + } + + /// Returns a mutable reference to the underlying [`SparseSubtrie`] if this subtrie is revealed. + /// + /// Returns `None` if the subtrie is blinded (has no nodes). + pub(crate) fn as_revealed_mut(&mut self) -> Option<&mut SparseSubtrie> { + match self { + Self::Blind(_) => None, + Self::Revealed(subtrie) => Some(subtrie.as_mut()), + } + } + + /// Reveals the lower [`SparseSubtrie`], transitioning it from the Blinded to the Revealed + /// variant, preserving allocations if possible. + /// + /// The given path is the path of a node which will be set into the [`SparseSubtrie`]'s `nodes` + /// map immediately upon being revealed. If the subtrie is blinded, or if its current root path + /// is longer than this one, than this one becomes the new root path of the subtrie. + pub(crate) fn reveal(&mut self, path: &Nibbles) { + match self { + Self::Blind(allocated) => { + debug_assert!(allocated.as_ref().is_none_or(|subtrie| subtrie.is_empty())); + *self = if let Some(mut subtrie) = allocated.take() { + subtrie.path = *path; + Self::Revealed(subtrie) + } else { + Self::Revealed(Box::new(SparseSubtrie::new(*path))) + } + } + Self::Revealed(subtrie) => { + if path.len() < subtrie.path.len() { + subtrie.path = *path; + } + } + }; + } + + /// Clears the subtrie and transitions it to the blinded state, preserving a cleared + /// [`SparseSubtrie`] if possible. + pub(crate) fn clear(&mut self) { + *self = match core::mem::take(self) { + Self::Blind(allocated) => { + debug_assert!(allocated.as_ref().is_none_or(|subtrie| subtrie.is_empty())); + Self::Blind(allocated) + } + Self::Revealed(mut subtrie) => { + subtrie.clear(); + Self::Blind(Some(subtrie)) + } + } + } + + /// Takes ownership of the underlying [`SparseSubtrie`] if revealed, putting this + /// `LowerSparseSubtrie` will be put into the blinded state. + /// + /// Otherwise returns None. + pub(crate) fn take_revealed(&mut self) -> Option> { + self.take_revealed_if(|_| true) + } + + /// Takes ownership of the underlying [`SparseSubtrie`] if revealed and the predicate returns + /// true. + /// + /// If the subtrie is revealed, and the predicate function returns `true` when called with it, + /// then this method will take ownership of the subtrie and transition this `LowerSparseSubtrie` + /// to the blinded state. Otherwise, returns `None`. + pub(crate) fn take_revealed_if

(&mut self, predicate: P) -> Option> + where + P: FnOnce(&SparseSubtrie) -> bool, + { + match self { + Self::Revealed(subtrie) if predicate(subtrie) => { + let Self::Revealed(subtrie) = core::mem::take(self) else { unreachable!() }; + Some(subtrie) + } + Self::Revealed(_) | Self::Blind(_) => None, + } + } +} diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index b2d8d147f8c..ffc40ded86b 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -1,3 +1,5 @@ +use crate::LowerSparseSubtrie; +use alloc::borrow::Cow; use alloy_primitives::{ map::{Entry, HashMap}, B256, @@ -10,11 +12,15 @@ use reth_trie_common::{ BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, RlpNode, TrieNode, CHILD_INDEX_RANGE, }; use reth_trie_sparse::{ - blinded::{BlindedProvider, RevealedNode}, - RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrieUpdates, TrieMasks, + provider::{RevealedNode, TrieNodeProvider}, + LeafLookup, LeafLookupError, RevealedSparseNode, RlpNodeStackItem, SparseNode, SparseNodeType, + SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; use smallvec::SmallVec; -use std::sync::mpsc; +use std::{ + cmp::{Ord, Ordering, PartialOrd}, + sync::mpsc, +}; use tracing::{instrument, trace}; /// The maximum length of a path, in nibbles, which belongs to the upper subtrie of a @@ -36,394 +42,385 @@ pub struct ParallelSparseTrie { /// This contains the trie nodes for the upper part of the trie. upper_subtrie: Box, /// An array containing the subtries at the second level of the trie. - lower_subtries: [Option>; NUM_LOWER_SUBTRIES], + lower_subtries: [LowerSparseSubtrie; NUM_LOWER_SUBTRIES], /// Set of prefixes (key paths) that have been marked as updated. /// This is used to track which parts of the trie need to be recalculated. prefix_set: PrefixSetMut, /// Optional tracking of trie updates for later use. updates: Option, + /// When a bit is set, the corresponding child subtree is stored in the database. + branch_node_tree_masks: HashMap, + /// When a bit is set, the corresponding child is stored as a hash in the database. + branch_node_hash_masks: HashMap, + /// Reusable buffer pool used for collecting [`SparseTrieUpdatesAction`]s during hash + /// computations. + update_actions_buffers: Vec>, } impl Default for ParallelSparseTrie { fn default() -> Self { Self { - upper_subtrie: Box::default(), - lower_subtries: [const { None }; NUM_LOWER_SUBTRIES], + upper_subtrie: Box::new(SparseSubtrie { + nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), + ..Default::default() + }), + lower_subtries: [const { LowerSparseSubtrie::Blind(None) }; NUM_LOWER_SUBTRIES], prefix_set: PrefixSetMut::default(), updates: None, + branch_node_tree_masks: HashMap::default(), + branch_node_hash_masks: HashMap::default(), + update_actions_buffers: Vec::default(), } } } -impl ParallelSparseTrie { - /// Returns a mutable reference to the lower `SparseSubtrie` for the given path, or None if the - /// path belongs to the upper trie. - /// - /// This method will create a new lower subtrie if one doesn't exist for the given path. - fn lower_subtrie_for_path(&mut self, path: &Nibbles) -> Option<&mut Box> { - match SparseSubtrieType::from_path(path) { - SparseSubtrieType::Upper => None, - SparseSubtrieType::Lower(idx) => { - if self.lower_subtries[idx].is_none() { - let upper_path = path.slice(..UPPER_TRIE_MAX_DEPTH); - self.lower_subtries[idx] = Some(Box::new(SparseSubtrie::new(upper_path))); - } +impl SparseTrieInterface for ParallelSparseTrie { + fn with_root( + mut self, + root: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult { + // A fresh/cleared `ParallelSparseTrie` has a `SparseNode::Empty` at its root in the upper + // subtrie. Delete that so we can reveal the new root node. + let path = Nibbles::default(); + let _removed_root = self.upper_subtrie.nodes.remove(&path).expect("root node should exist"); + debug_assert_eq!(_removed_root, SparseNode::Empty); - self.lower_subtries[idx].as_mut() - } - } + self = self.with_updates(retain_updates); + + self.reveal_upper_node(Nibbles::default(), &root, masks)?; + Ok(self) } - /// Returns a mutable reference to either the lower or upper `SparseSubtrie` for the given path, - /// depending on the path's length. - /// - /// This method will create a new lower subtrie if one doesn't exist for the given path. - fn subtrie_for_path(&mut self, path: &Nibbles) -> &mut Box { - match SparseSubtrieType::from_path(path) { - SparseSubtrieType::Upper => &mut self.upper_subtrie, - SparseSubtrieType::Lower(idx) => { - if self.lower_subtries[idx].is_none() { - let upper_path = path.slice(..UPPER_TRIE_MAX_DEPTH); - self.lower_subtries[idx] = Some(Box::new(SparseSubtrie::new(upper_path))); - } + fn with_updates(mut self, retain_updates: bool) -> Self { + self.updates = retain_updates.then(Default::default); + self + } + + fn reveal_nodes(&mut self, mut nodes: Vec) -> SparseTrieResult<()> { + if nodes.is_empty() { + return Ok(()) + } + + // Sort nodes first by their subtrie, and secondarily by their path. This allows for + // grouping nodes by their subtrie using `chunk_by`. + nodes.sort_unstable_by( + |RevealedSparseNode { path: path_a, .. }, RevealedSparseNode { path: path_b, .. }| { + let subtrie_type_a = SparseSubtrieType::from_path(path_a); + let subtrie_type_b = SparseSubtrieType::from_path(path_b); + subtrie_type_a.cmp(&subtrie_type_b).then(path_a.cmp(path_b)) + }, + ); - self.lower_subtries[idx].as_mut().unwrap() + // Update the top-level branch node masks. This is simple and can't be done in parallel. + for RevealedSparseNode { path, masks, .. } in &nodes { + if let Some(tree_mask) = masks.tree_mask { + self.branch_node_tree_masks.insert(*path, tree_mask); + } + if let Some(hash_mask) = masks.hash_mask { + self.branch_node_hash_masks.insert(*path, hash_mask); } } - } - /// Creates a new revealed sparse trie from the given root node. - /// - /// # Returns - /// - /// A [`ParallelSparseTrie`] if successful, or an error if revealing fails. - pub fn from_root( - root_node: TrieNode, - masks: TrieMasks, - retain_updates: bool, - ) -> SparseTrieResult { - let mut trie = Self::default().with_updates(retain_updates); - trie.reveal_node(Nibbles::default(), root_node, masks)?; - Ok(trie) - } + // Due to the sorting all upper subtrie nodes will be at the front of the slice. We split + // them off from the rest to be handled specially by + // `ParallelSparseTrie::reveal_upper_node`. + let num_upper_nodes = nodes + .iter() + .position(|n| !SparseSubtrieType::path_len_is_upper(n.path.len())) + .unwrap_or(nodes.len()); - /// Reveals a trie node if it has not been revealed before. - /// - /// This internal function decodes a trie node and inserts it into the nodes map. - /// It handles different node types (leaf, extension, branch) by appropriately - /// adding them to the trie structure and recursively revealing their children. - /// - /// # Returns - /// - /// `Ok(())` if successful, or an error if node was not revealed. - pub fn reveal_node( - &mut self, - path: Nibbles, - node: TrieNode, - masks: TrieMasks, - ) -> SparseTrieResult<()> { - if let Some(subtrie) = self.lower_subtrie_for_path(&path) { - return subtrie.reveal_node(path, &node, masks); + let upper_nodes = &nodes[..num_upper_nodes]; + let lower_nodes = &nodes[num_upper_nodes..]; + + // Reserve the capacity of the upper subtrie's `nodes` HashMap before iterating, so we don't + // end up making many small capacity changes as we loop. + self.upper_subtrie.nodes.reserve(upper_nodes.len()); + for node in upper_nodes { + self.reveal_upper_node(node.path, &node.node, node.masks)?; } - // If there is no subtrie for the path it means the path is UPPER_TRIE_MAX_DEPTH or less - // nibbles, and so belongs to the upper trie. - self.upper_subtrie.reveal_node(path, &node, masks)?; + #[cfg(not(feature = "std"))] + // Reveal lower subtrie nodes serially if nostd + { + for node in lower_nodes { + if let Some(subtrie) = self.lower_subtrie_for_path_mut(&node.path) { + subtrie.reveal_node(node.path, &node.node, &node.masks)?; + } else { + panic!("upper subtrie node {node:?} found amongst lower nodes"); + } + } + Ok(()) + } - // The previous upper_trie.reveal_node call will not have revealed any child nodes via - // reveal_node_or_hash if the child node would be found on a lower subtrie. We handle that - // here by manually checking the specific cases where this could happen, and calling - // reveal_node_or_hash for each. - match node { - TrieNode::Branch(branch) => { - // If a branch is at the cutoff level of the trie then it will be in the upper trie, - // but all of its children will be in a lower trie. Check if a child node would be - // in the lower subtrie, and reveal accordingly. - if !SparseSubtrieType::path_len_is_upper(path.len() + 1) { - let mut stack_ptr = branch.as_ref().first_child_index(); - for idx in CHILD_INDEX_RANGE { - if branch.state_mask.is_bit_set(idx) { - let mut child_path = path; - child_path.push_unchecked(idx); - self.lower_subtrie_for_path(&child_path) - .expect("child_path must have a lower subtrie") - .reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; - stack_ptr += 1; + #[cfg(feature = "std")] + // Reveal lower subtrie nodes in parallel + { + use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; + + // Group the nodes by lower subtrie. This must be collected into a Vec in order for + // rayon's `zip` to be happy. + let node_groups: Vec<_> = lower_nodes + .chunk_by(|node_a, node_b| { + SparseSubtrieType::from_path(&node_a.path) == + SparseSubtrieType::from_path(&node_b.path) + }) + .collect(); + + // Take the lower subtries in the same order that the nodes were grouped into, so that + // the two can be zipped together. This also must be collected into a Vec for rayon's + // `zip` to be happy. + let lower_subtries: Vec<_> = node_groups + .iter() + .map(|nodes| { + // NOTE: chunk_by won't produce empty groups + let node = &nodes[0]; + let idx = + SparseSubtrieType::from_path(&node.path).lower_index().unwrap_or_else( + || panic!("upper subtrie node {node:?} found amongst lower nodes"), + ); + // due to the nodes being sorted secondarily on their path, and chunk_by keeping + // the first element of each group, the `path` here will necessarily be the + // shortest path being revealed for each subtrie. Therefore we can reveal the + // subtrie itself using this path and retain correct behavior. + self.lower_subtries[idx].reveal(&node.path); + (idx, self.lower_subtries[idx].take_revealed().expect("just revealed")) + }) + .collect(); + + let (tx, rx) = mpsc::channel(); + + // Zip the lower subtries and their corresponding node groups, and reveal lower subtrie + // nodes in parallel + lower_subtries + .into_par_iter() + .zip(node_groups.into_par_iter()) + .map(|((subtrie_idx, mut subtrie), nodes)| { + // reserve space in the HashMap ahead of time; doing it on a node-by-node basis + // can cause multiple re-allocations as the hashmap grows. + subtrie.nodes.reserve(nodes.len()); + + for node in nodes { + // Reveal each node in the subtrie, returning early on any errors + let res = subtrie.reveal_node(node.path, &node.node, node.masks); + if res.is_err() { + return (subtrie_idx, subtrie, res) } } + (subtrie_idx, subtrie, Ok(())) + }) + .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); + + drop(tx); + + // Take back all lower subtries which were sent to the rayon pool, collecting the last + // seen error in the process and returning that. If we don't fully drain the channel + // then we lose lower sparse tries, putting the whole ParallelSparseTrie in an + // inconsistent state. + let mut any_err = Ok(()); + for (subtrie_idx, subtrie, res) in rx { + self.lower_subtries[subtrie_idx] = LowerSparseSubtrie::Revealed(subtrie); + if res.is_err() { + any_err = res; } } - TrieNode::Extension(ext) => { - let mut child_path = path; - child_path.extend(&ext.key); - if let Some(subtrie) = self.lower_subtrie_for_path(&child_path) { - subtrie.reveal_node_or_hash(child_path, &ext.child)?; - } - } - TrieNode::EmptyRoot | TrieNode::Leaf(_) => (), - } - Ok(()) + any_err + } } - /// Updates or inserts a leaf node at the specified key path with the provided RLP-encoded - /// value. - /// - /// This method updates the internal prefix set and, if the leaf did not previously exist, - /// adjusts the trie structure by inserting new leaf nodes, splitting branch nodes, or - /// collapsing extension nodes as needed. - /// - /// # Returns - /// - /// Returns `Ok(())` if the update is successful. - /// - /// Note: If an update requires revealing a blinded node, an error is returned if the blinded - /// provider returns an error. - pub fn update_leaf( + fn update_leaf( &mut self, - key_path: Nibbles, + full_path: Nibbles, value: Vec, - masks: TrieMasks, - provider: impl BlindedProvider, + provider: P, ) -> SparseTrieResult<()> { - let _key_path = key_path; - let _value = value; - let _masks = masks; - let _provider = provider; - todo!() - } + self.prefix_set.insert(full_path); + let existing = self.upper_subtrie.inner.values.insert(full_path, value.clone()); + if existing.is_some() { + // upper trie structure unchanged, return immediately + return Ok(()) + } - /// Returns the next node in the traversal path from the given path towards the leaf for the - /// given full leaf path, or an error if any node along the traversal path is not revealed. - /// - /// - /// ## Panics - /// - /// If `from_path` is not a prefix of `leaf_full_path`. - fn find_next_to_leaf( - from_path: &Nibbles, - from_node: &SparseNode, - leaf_full_path: &Nibbles, - ) -> SparseTrieResult { - debug_assert!(leaf_full_path.len() >= from_path.len()); - debug_assert!(leaf_full_path.starts_with(from_path)); + let retain_updates = self.updates_enabled(); - match from_node { - SparseNode::Empty => Err(SparseTrieErrorKind::Blind.into()), - SparseNode::Hash(hash) => { - Err(SparseTrieErrorKind::BlindedNode { path: *from_path, hash: *hash }.into()) - } - SparseNode::Leaf { key, .. } => { - let mut found_full_path = *from_path; - found_full_path.extend(key); + // Start at the root, traversing until we find either the node to update or a subtrie to + // update. + // + // We first traverse the upper subtrie for two levels, and moving any created nodes to a + // lower subtrie if necessary. + // + // We use `next` to keep track of the next node that we need to traverse to, and + // `new_nodes` to keep track of any nodes that were created during the traversal. + let mut new_nodes = Vec::new(); + let mut next = Some(Nibbles::default()); - if &found_full_path == leaf_full_path { - return Ok(FindNextToLeafOutcome::Found) - } - Ok(FindNextToLeafOutcome::NotFound) - } - SparseNode::Extension { key, .. } => { - if leaf_full_path.len() == from_path.len() { - return Ok(FindNextToLeafOutcome::NotFound) + // Traverse the upper subtrie to find the node to update or the subtrie to update. + // + // We stop when the next node to traverse would be in a lower subtrie, or if there are no + // more nodes to traverse. + while let Some(current) = + next.filter(|next| SparseSubtrieType::path_len_is_upper(next.len())) + { + // Traverse the next node, keeping track of any changed nodes and the next step in the + // trie + match self.upper_subtrie.update_next_node(current, &full_path, retain_updates)? { + LeafUpdateStep::Continue { next_node } => { + next = Some(next_node); } + LeafUpdateStep::Complete { inserted_nodes, reveal_path } => { + new_nodes.extend(inserted_nodes); - let mut child_path = *from_path; - child_path.extend(key); + if let Some(reveal_path) = reveal_path { + let subtrie = self.subtrie_for_path_mut(&reveal_path); + if subtrie.nodes.get(&reveal_path).expect("node must exist").is_hash() { + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(&reveal_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?reveal_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing child", + ); + subtrie.reveal_node( + reveal_path, + &decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: reveal_path, + } + .into()) + } + } + } - if !leaf_full_path.starts_with(&child_path) { - return Ok(FindNextToLeafOutcome::NotFound) + next = None; } - Ok(FindNextToLeafOutcome::ContinueFrom(child_path)) - } - SparseNode::Branch { state_mask, .. } => { - if leaf_full_path.len() == from_path.len() { - return Ok(FindNextToLeafOutcome::NotFound) + LeafUpdateStep::NodeNotFound => { + next = None; } + } + } - let nibble = leaf_full_path.get_unchecked(from_path.len()); - if !state_mask.is_bit_set(nibble) { - return Ok(FindNextToLeafOutcome::NotFound) - } + // Move nodes from upper subtrie to lower subtries + for node_path in &new_nodes { + // Skip nodes that belong in the upper subtrie + if SparseSubtrieType::path_len_is_upper(node_path.len()) { + continue + } - let mut child_path = *from_path; - child_path.push_unchecked(nibble); + let node = + self.upper_subtrie.nodes.remove(node_path).expect("node belongs to upper subtrie"); + + // If it's a leaf node, extract its value before getting mutable reference to subtrie. + // We also add the leaf the prefix set, so that whichever lower subtrie it belongs to + // will have its hash recalculated as part of `update_subtrie_hashes`. + let leaf_value = if let SparseNode::Leaf { key, .. } = &node { + let mut leaf_full_path = *node_path; + leaf_full_path.extend(key); + self.prefix_set.insert(leaf_full_path); + Some(( + leaf_full_path, + self.upper_subtrie + .inner + .values + .remove(&leaf_full_path) + .expect("leaf nodes have associated values entries"), + )) + } else { + None + }; + + // Get or create the subtrie with the exact node path (not truncated to 2 nibbles). + let subtrie = self.subtrie_for_path_mut(node_path); + + // Insert the leaf value if we have one + if let Some((leaf_full_path, value)) = leaf_value { + subtrie.inner.values.insert(leaf_full_path, value); + } + + // Insert the node into the lower subtrie + subtrie.nodes.insert(*node_path, node); + } - Ok(FindNextToLeafOutcome::ContinueFrom(child_path)) + // If we reached the max depth of the upper trie, we may have had more nodes to insert. + if let Some(next_path) = next.filter(|n| !SparseSubtrieType::path_len_is_upper(n.len())) { + // The value was inserted into the upper subtrie's `values` at the top of this method. + // At this point we know the value is not in the upper subtrie, and the call to + // `update_leaf` below will insert it into the lower subtrie. So remove it from the + // upper subtrie. + self.upper_subtrie.inner.values.remove(&full_path); + + // Use subtrie_for_path to ensure the subtrie has the correct path. + // + // The next_path here represents where we need to continue traversal, which may + // be longer than 2 nibbles if we're following an extension node. + let subtrie = self.subtrie_for_path_mut(&next_path); + + // Create an empty root at the subtrie path if the subtrie is empty + if subtrie.nodes.is_empty() { + subtrie.nodes.insert(subtrie.path, SparseNode::Empty); } + + // If we didn't update the target leaf, we need to call update_leaf on the subtrie + // to ensure that the leaf is updated correctly. + subtrie.update_leaf(full_path, value, provider, retain_updates)?; } + + Ok(()) } - /// Called when a child node has collapsed into its parent as part of `remove_leaf`. If the - /// new parent node is a leaf, then the previous child also was, and if the previous child was - /// on a lower subtrie while the parent is on an upper then the leaf value needs to be moved to - /// the upper. - fn move_value_on_leaf_removal( + fn remove_leaf( &mut self, - parent_path: &Nibbles, - new_parent_node: &SparseNode, - prev_child_path: &Nibbles, - ) { - // If the parent path isn't in the upper then it doesn't matter what the new node is, - // there's no situation where a leaf value needs to be moved. - if SparseSubtrieType::from_path(parent_path).lower_index().is_some() { - return; - } + full_path: &Nibbles, + provider: P, + ) -> SparseTrieResult<()> { + // When removing a leaf node it's possibly necessary to modify its parent node, and possibly + // the parent's parent node. It is not ever necessary to descend further than that; once an + // extension node is hit it must terminate in a branch or the root, which won't need further + // updates. So the situation with maximum updates is: + // + // - Leaf + // - Branch with 2 children, one being this leaf + // - Extension + // + // ...which will result in just a leaf or extension, depending on what the branch's other + // child is. + // + // Therefore, first traverse the trie in order to find the leaf node and at most its parent + // and grandparent. - if let SparseNode::Leaf { key, .. } = new_parent_node { - let Some(prev_child_subtrie) = self.lower_subtrie_for_path(prev_child_path) else { - return; - }; + let leaf_path; + let leaf_subtrie; - let mut leaf_full_path = *parent_path; - leaf_full_path.extend(key); + let mut branch_parent_path: Option = None; + let mut branch_parent_node: Option = None; - let val = prev_child_subtrie.inner.values.remove(&leaf_full_path).expect("ParallelSparseTrie is in an inconsistent state, expected value on subtrie which wasn't found"); - self.upper_subtrie.inner.values.insert(leaf_full_path, val); - } - } + let mut ext_grandparent_path: Option = None; + let mut ext_grandparent_node: Option = None; - /// Given the path to a parent branch node and a child node which is the sole remaining child on - /// that branch after removing a leaf, returns a node to replace the parent branch node and a - /// boolean indicating if the child should be deleted. - /// - /// ## Panics - /// - /// - If either parent or child node is not already revealed. - /// - If parent's path is not a prefix of the child's path. - fn branch_changes_on_leaf_removal( - parent_path: &Nibbles, - remaining_child_path: &Nibbles, - remaining_child_node: &SparseNode, - ) -> (SparseNode, bool) { - debug_assert!(remaining_child_path.len() > parent_path.len()); - debug_assert!(remaining_child_path.starts_with(parent_path)); + let mut curr_path = Nibbles::new(); // start traversal from root + let mut curr_subtrie = self.upper_subtrie.as_mut(); + let mut curr_subtrie_is_upper = true; - let remaining_child_nibble = remaining_child_path.get_unchecked(parent_path.len()); - - // If we swap the branch node out either an extension or leaf, depending on - // what its remaining child is. - match remaining_child_node { - SparseNode::Empty | SparseNode::Hash(_) => { - panic!("remaining child must have been revealed already") - } - // If the only child is a leaf node, we downgrade the branch node into a - // leaf node, prepending the nibble to the key, and delete the old - // child. - SparseNode::Leaf { key, .. } => { - let mut new_key = Nibbles::from_nibbles_unchecked([remaining_child_nibble]); - new_key.extend(key); - (SparseNode::new_leaf(new_key), true) - } - // If the only child node is an extension node, we downgrade the branch - // node into an even longer extension node, prepending the nibble to the - // key, and delete the old child. - SparseNode::Extension { key, .. } => { - let mut new_key = Nibbles::from_nibbles_unchecked([remaining_child_nibble]); - new_key.extend(key); - (SparseNode::new_ext(new_key), true) - } - // If the only child is a branch node, we downgrade the current branch - // node into a one-nibble extension node. - SparseNode::Branch { .. } => ( - SparseNode::new_ext(Nibbles::from_nibbles_unchecked([remaining_child_nibble])), - false, - ), - } - } - - /// Given the path to a parent extension and its key, and a child node (not necessarily on this - /// subtrie), returns an optional replacement parent node. If a replacement is returned then the - /// child node should be deleted. - /// - /// ## Panics - /// - /// - If either parent or child node is not already revealed. - /// - If parent's path is not a prefix of the child's path. - fn extension_changes_on_leaf_removal( - parent_path: &Nibbles, - parent_key: &Nibbles, - child_path: &Nibbles, - child: &SparseNode, - ) -> Option { - debug_assert!(child_path.len() > parent_path.len()); - debug_assert!(child_path.starts_with(parent_path)); - - // If the parent node is an extension node, we need to look at its child to see - // if we need to merge it. - match child { - SparseNode::Empty | SparseNode::Hash(_) => { - panic!("child must be revealed") - } - // For a leaf node, we collapse the extension node into a leaf node, - // extending the key. While it's impossible to encounter an extension node - // followed by a leaf node in a complete trie, it's possible here because we - // could have downgraded the extension node's child into a leaf node from a - // branch in a previous call to `branch_changes_on_leaf_removal`. - SparseNode::Leaf { key, .. } => { - let mut new_key = *parent_key; - new_key.extend(key); - Some(SparseNode::new_leaf(new_key)) - } - // Similar to the leaf node, for an extension node, we collapse them into one - // extension node, extending the key. - SparseNode::Extension { key, .. } => { - let mut new_key = *parent_key; - new_key.extend(key); - Some(SparseNode::new_ext(new_key)) - } - // For a branch node, we just leave the extension node as-is. - SparseNode::Branch { .. } => None, - } - } - - /// Removes a leaf node from the trie at the specified full path of a value (that is, the leaf's - /// path + its key). - /// - /// This function removes the leaf value from the internal values map and then traverses - /// the trie to remove or adjust intermediate nodes, merging or collapsing them as necessary. - /// - /// # Returns - /// - /// Returns `Ok(())` if the leaf is successfully removed or was not present in the trie, - /// otherwise returns an error if a blinded node prevents removal. - pub fn remove_leaf( - &mut self, - leaf_full_path: &Nibbles, - provider: impl BlindedProvider, - ) -> SparseTrieResult<()> { - // When removing a leaf node it's possibly necessary to modify its parent node, and possibly - // the parent's parent node. It is not ever necessary to descend further than that; once an - // extension node is hit it must terminate in a branch or the root, which won't need further - // updates. So the situation with maximum updates is: - // - // - Leaf - // - Branch with 2 children, one being this leaf - // - Extension - // - // ...which will result in just a leaf or extension, depending on what the branch's other - // child is. - // - // Therefore, first traverse the trie in order to find the leaf node and at most its parent - // and grandparent. - - let leaf_path; - let leaf_subtrie; - - let mut branch_parent_path: Option = None; - let mut branch_parent_node: Option = None; - - let mut ext_grandparent_path: Option = None; - let mut ext_grandparent_node: Option = None; - - let mut curr_path = Nibbles::new(); // start traversal from root - let mut curr_subtrie = self.upper_subtrie.as_mut(); - let mut curr_subtrie_is_upper = true; + // List of node paths which need to have their hashes reset + let mut paths_to_reset_hashes = Vec::new(); loop { let curr_node = curr_subtrie.nodes.get_mut(&curr_path).unwrap(); - match Self::find_next_to_leaf(&curr_path, curr_node, leaf_full_path)? { + match Self::find_next_to_leaf(&curr_path, curr_node, full_path) { FindNextToLeafOutcome::NotFound => return Ok(()), // leaf isn't in the trie + FindNextToLeafOutcome::BlindedNode(hash) => { + return Err(SparseTrieErrorKind::BlindedNode { path: curr_path, hash }.into()) + } FindNextToLeafOutcome::Found => { // this node is the target leaf leaf_path = curr_path; @@ -435,7 +432,10 @@ impl ParallelSparseTrie { // field unset, as it will no longer be valid once the leaf is removed. match curr_node { SparseNode::Branch { hash, .. } => { - *hash = None; + if hash.is_some() { + paths_to_reset_hashes + .push((SparseSubtrieType::from_path(&curr_path), curr_path)); + } // If there is already an extension leading into a branch, then that // extension is no longer relevant. @@ -450,7 +450,10 @@ impl ParallelSparseTrie { branch_parent_node = Some(curr_node.clone()); } SparseNode::Extension { hash, .. } => { - *hash = None; + if hash.is_some() { + paths_to_reset_hashes + .push((SparseSubtrieType::from_path(&curr_path), curr_path)); + } // We can assume a new branch node will be found after the extension, so // there's no need to modify branch_parent_path/node even if it's @@ -459,7 +462,9 @@ impl ParallelSparseTrie { ext_grandparent_node = Some(curr_node.clone()); } SparseNode::Empty | SparseNode::Hash(_) | SparseNode::Leaf { .. } => { - unreachable!("find_next_to_leaf errors on non-revealed node, and return Found or NotFound on Leaf") + unreachable!( + "find_next_to_leaf only continues to a branch or extension" + ) } } @@ -471,7 +476,9 @@ impl ParallelSparseTrie { if let SparseSubtrieType::Lower(idx) = SparseSubtrieType::from_path(&curr_path) { - curr_subtrie = self.lower_subtries[idx].as_mut().unwrap(); + curr_subtrie = self.lower_subtries[idx] + .as_revealed_mut() + .expect("lower subtrie is revealed"); curr_subtrie_is_upper = false; } } @@ -480,10 +487,30 @@ impl ParallelSparseTrie { } // We've traversed to the leaf and collected its ancestors as necessary. Remove the leaf - // from its SparseSubtrie. - self.prefix_set.insert(*leaf_full_path); - leaf_subtrie.inner.values.remove(leaf_full_path); - leaf_subtrie.nodes.remove(&leaf_path); + // from its SparseSubtrie and reset the hashes of the nodes along the path. + self.prefix_set.insert(*full_path); + leaf_subtrie.inner.values.remove(full_path); + for (subtrie_type, path) in paths_to_reset_hashes { + let node = match subtrie_type { + SparseSubtrieType::Upper => self.upper_subtrie.nodes.get_mut(&path), + SparseSubtrieType::Lower(idx) => self.lower_subtries[idx] + .as_revealed_mut() + .expect("lower subtrie is revealed") + .nodes + .get_mut(&path), + } + .expect("node exists"); + + match node { + SparseNode::Extension { hash, .. } | SparseNode::Branch { hash, .. } => { + *hash = None + } + SparseNode::Empty | SparseNode::Hash(_) | SparseNode::Leaf { .. } => { + unreachable!("only branch and extension node hashes can be reset") + } + } + } + self.remove_node(&leaf_path); // If the leaf was at the root replace its node with the empty value. We can stop execution // here, all remaining logic is related to the ancestors of the leaf. @@ -519,7 +546,7 @@ impl ParallelSparseTrie { "Branch node has only one child", ); - let remaining_child_subtrie = self.subtrie_for_path(&remaining_child_path); + let remaining_child_subtrie = self.subtrie_for_path_mut(&remaining_child_path); // If the remaining child node is not yet revealed then we have to reveal it here, // otherwise it's not possible to know how to collapse the branch. @@ -532,7 +559,7 @@ impl ParallelSparseTrie { "Retrieving remaining blinded branch child", ); if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.blinded_node(&remaining_child_path)? + provider.trie_node(&remaining_child_path)? { let decoded = TrieNode::decode(&mut &node[..])?; trace!( @@ -566,12 +593,12 @@ impl ParallelSparseTrie { ); if remove_child { - remaining_child_subtrie.nodes.remove(&remaining_child_path); self.move_value_on_leaf_removal( branch_path, &new_branch_node, &remaining_child_path, ); + self.remove_node(&remaining_child_path); } if let Some(updates) = self.updates.as_mut() { @@ -586,7 +613,7 @@ impl ParallelSparseTrie { SparseNode::new_branch(state_mask) }; - let branch_subtrie = self.subtrie_for_path(branch_path); + let branch_subtrie = self.subtrie_for_path_mut(branch_path); branch_subtrie.nodes.insert(*branch_path, new_branch_node.clone()); branch_parent_node = Some(new_branch_node); }; @@ -597,7 +624,7 @@ impl ParallelSparseTrie { if let (Some(ext_path), Some(SparseNode::Extension { key: shortkey, .. })) = (ext_grandparent_path, &ext_grandparent_node) { - let ext_subtrie = self.subtrie_for_path(&ext_path); + let ext_subtrie = self.subtrie_for_path_mut(&ext_path); let branch_path = branch_parent_path.as_ref().unwrap(); if let Some(new_ext_node) = Self::extension_changes_on_leaf_removal( @@ -607,23 +634,30 @@ impl ParallelSparseTrie { branch_parent_node.as_ref().unwrap(), ) { ext_subtrie.nodes.insert(ext_path, new_ext_node.clone()); - self.subtrie_for_path(branch_path).nodes.remove(branch_path); self.move_value_on_leaf_removal(&ext_path, &new_ext_node, branch_path); + self.remove_node(branch_path); } } Ok(()) } - /// Recalculates and updates the RLP hashes of nodes up to level [`UPPER_TRIE_MAX_DEPTH`] of the - /// trie. - /// - /// The root node is considered to be at level 0. This method is useful for optimizing - /// hash recalculations after localized changes to the trie structure. - /// - /// This function first identifies all nodes that have changed (based on the prefix set) below - /// level [`UPPER_TRIE_MAX_DEPTH`] of the trie, then recalculates their RLP representation. - pub fn update_lower_subtrie_hashes(&mut self) { + fn root(&mut self) -> B256 { + trace!(target: "trie::parallel_sparse", "Calculating trie root hash"); + + // Update all lower subtrie hashes + self.update_subtrie_hashes(); + + // Update hashes for the upper subtrie using our specialized function + // that can access both upper and lower subtrie nodes + let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); + let root_rlp = self.update_upper_subtrie_hashes(&mut prefix_set); + + // Return the root hash + root_rlp.as_hash().unwrap_or(EMPTY_ROOT_HASH) + } + + fn update_subtrie_hashes(&mut self) { trace!(target: "trie::parallel_sparse", "Updating subtrie hashes"); // Take changed subtries according to the prefix set @@ -633,2206 +667,5712 @@ impl ParallelSparseTrie { // Update the prefix set with the keys that didn't have matching subtries self.prefix_set = unchanged_prefix_set; - // Update subtrie hashes in parallel - // TODO: call `update_hashes` on each subtrie in parallel let (tx, rx) = mpsc::channel(); - for ChangedSubtrie { index, mut subtrie, mut prefix_set } in subtries { - subtrie.update_hashes(&mut prefix_set); - tx.send((index, subtrie)).unwrap(); - } - drop(tx); - // Return updated subtries back to the trie - for (index, subtrie) in rx { - self.lower_subtries[index] = Some(subtrie); + #[cfg(not(feature = "std"))] + // Update subtrie hashes serially if nostd + for ChangedSubtrie { index, mut subtrie, mut prefix_set, mut update_actions_buf } in + subtries + { + subtrie.update_hashes( + &mut prefix_set, + &mut update_actions_buf, + &self.branch_node_tree_masks, + &self.branch_node_hash_masks, + ); + tx.send((index, subtrie, update_actions_buf)).unwrap(); } - } - /// Updates hashes for the upper subtrie, using nodes from both upper and lower subtries. - #[instrument(level = "trace", target = "engine::tree", skip_all, ret)] - fn update_upper_subtrie_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { - trace!(target: "trie::parallel_sparse", "Updating upper subtrie hashes"); + #[cfg(feature = "std")] + // Update subtrie hashes in parallel + { + use rayon::iter::{IntoParallelIterator, ParallelIterator}; + let branch_node_tree_masks = &self.branch_node_tree_masks; + let branch_node_hash_masks = &self.branch_node_hash_masks; + subtries + .into_par_iter() + .map( + |ChangedSubtrie { + index, + mut subtrie, + mut prefix_set, + mut update_actions_buf, + }| { + subtrie.update_hashes( + &mut prefix_set, + &mut update_actions_buf, + branch_node_tree_masks, + branch_node_hash_masks, + ); + (index, subtrie, update_actions_buf) + }, + ) + .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); + } - debug_assert!(self.upper_subtrie.inner.buffers.path_stack.is_empty()); - self.upper_subtrie.inner.buffers.path_stack.push(RlpNodePathStackItem { - path: Nibbles::default(), // Start from root - is_in_prefix_set: None, - }); + drop(tx); - while let Some(stack_item) = self.upper_subtrie.inner.buffers.path_stack.pop() { - let path = stack_item.path; - let node = if path.len() < UPPER_TRIE_MAX_DEPTH { - self.upper_subtrie.nodes.get_mut(&path).expect("upper subtrie node must exist") - } else { - let index = path_subtrie_index_unchecked(&path); - let node = self.lower_subtries[index] - .as_mut() - .expect("lower subtrie must exist") - .nodes - .get_mut(&path) - .expect("lower subtrie node must exist"); - // Lower subtrie root node hashes must be computed before updating upper subtrie - // hashes - debug_assert!(node.hash().is_some()); - node - }; + // Return updated subtries back to the trie after executing any actions required on the + // top-level `SparseTrieUpdates`. + for (index, subtrie, update_actions_buf) in rx { + if let Some(mut update_actions_buf) = update_actions_buf { + self.apply_subtrie_update_actions( + #[allow(clippy::iter_with_drain)] + update_actions_buf.drain(..), + ); + self.update_actions_buffers.push(update_actions_buf); + } - // Calculate the RLP node for the current node using upper subtrie - self.upper_subtrie.inner.rlp_node(prefix_set, stack_item, node); + self.lower_subtries[index] = LowerSparseSubtrie::Revealed(subtrie); } - - debug_assert_eq!(self.upper_subtrie.inner.buffers.rlp_node_stack.len(), 1); - self.upper_subtrie.inner.buffers.rlp_node_stack.pop().unwrap().rlp_node } - /// Calculates and returns the root hash of the trie. - /// - /// Before computing the hash, this function processes any remaining (dirty) nodes by - /// updating their RLP encodings. The root hash is either: - /// 1. The cached hash (if no dirty nodes were found) - /// 2. The keccak256 hash of the root node's RLP representation - pub fn root(&mut self) -> B256 { - trace!(target: "trie::parallel_sparse", "Calculating trie root hash"); - - // Update all lower subtrie hashes - self.update_lower_subtrie_hashes(); + fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec> { + self.subtrie_for_path(full_path).and_then(|subtrie| subtrie.inner.values.get(full_path)) + } - // Update hashes for the upper subtrie using our specialized function - // that can access both upper and lower subtrie nodes - let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); - let root_rlp = self.update_upper_subtrie_hashes(&mut prefix_set); + fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) + } - // Return the root hash - root_rlp.as_hash().unwrap_or(EMPTY_ROOT_HASH) + fn take_updates(&mut self) -> SparseTrieUpdates { + self.updates.take().unwrap_or_default() } - /// Configures the trie to retain information about updates. - /// - /// If `retain_updates` is true, the trie will record branch node updates and deletions. - /// This information can then be used to efficiently update an external database. - pub fn with_updates(mut self, retain_updates: bool) -> Self { - self.updates = retain_updates.then_some(SparseTrieUpdates::default()); - self + fn wipe(&mut self) { + self.upper_subtrie.wipe(); + self.lower_subtries = [const { LowerSparseSubtrie::Blind(None) }; NUM_LOWER_SUBTRIES]; + self.prefix_set = PrefixSetMut::all(); } - /// Consumes and returns the currently accumulated trie updates. - /// - /// This is useful when you want to apply the updates to an external database, - /// and then start tracking a new set of updates. - pub fn take_updates(&mut self) -> SparseTrieUpdates { - core::iter::once(&mut self.upper_subtrie) - .chain(self.lower_subtries.iter_mut().flatten()) - .fold(SparseTrieUpdates::default(), |mut acc, subtrie| { - acc.extend(subtrie.take_updates()); - acc - }) + fn clear(&mut self) { + self.upper_subtrie.clear(); + self.upper_subtrie.nodes.insert(Nibbles::default(), SparseNode::Empty); + for subtrie in &mut self.lower_subtries { + subtrie.clear(); + } + self.prefix_set.clear(); + self.updates = None; + // `update_actions_buffers` doesn't need to be cleared; we want to reuse the Vecs it has + // buffered, and all of those are already inherently cleared when they get used. } - /// Returns: - /// 1. List of lower [subtries](SparseSubtrie) that have changed according to the provided - /// [prefix set](PrefixSet). See documentation of [`ChangedSubtrie`] for more details. - /// 2. Prefix set of keys that do not belong to any lower subtrie. - /// - /// This method helps optimize hash recalculations by identifying which specific - /// lower subtries need to be updated. Each lower subtrie can then be updated in parallel. - /// - /// IMPORTANT: The method removes the subtries from `lower_subtries`, and the caller is - /// responsible for returning them back into the array. - fn take_changed_lower_subtries( - &mut self, - prefix_set: &mut PrefixSet, - ) -> (Vec, PrefixSetMut) { - // Clone the prefix set to iterate over its keys. Cloning is cheap, it's just an Arc. - let prefix_set_clone = prefix_set.clone(); - let mut prefix_set_iter = prefix_set_clone.into_iter().copied().peekable(); - let mut changed_subtries = Vec::new(); - let mut unchanged_prefix_set = PrefixSetMut::default(); + fn find_leaf( + &self, + full_path: &Nibbles, + expected_value: Option<&Vec>, + ) -> Result { + // Inclusion proof + // + // First, do a quick check if the value exists in either the upper or lower subtrie's values + // map. We assume that if there exists a leaf node, then its value will be in the `values` + // map. + if let Some(actual_value) = std::iter::once(self.upper_subtrie.as_ref()) + .chain(self.lower_subtrie_for_path(full_path)) + .filter_map(|subtrie| subtrie.inner.values.get(full_path)) + .next() + { + // We found the leaf, check if the value matches (if expected value was provided) + return expected_value + .is_none_or(|v| v == actual_value) + .then_some(LeafLookup::Exists) + .ok_or_else(|| LeafLookupError::ValueMismatch { + path: *full_path, + expected: expected_value.cloned(), + actual: actual_value.clone(), + }) + } - for (index, subtrie) in self.lower_subtries.iter_mut().enumerate() { - if let Some(subtrie) = subtrie.take_if(|subtrie| prefix_set.contains(&subtrie.path)) { - let prefix_set = if prefix_set.all() { - unchanged_prefix_set = PrefixSetMut::all(); - PrefixSetMut::all() - } else { - // Take those keys from the original prefix set that start with the subtrie path - // - // Subtries are stored in the order of their paths, so we can use the same - // prefix set iterator. - let mut new_prefix_set = Vec::new(); - while let Some(key) = prefix_set_iter.peek() { - if key.starts_with(&subtrie.path) { - // If the key starts with the subtrie path, add it to the new prefix set - new_prefix_set.push(prefix_set_iter.next().unwrap()); - } else if new_prefix_set.is_empty() && key < &subtrie.path { - // If we didn't yet have any keys that belong to this subtrie, and the - // current key is still less than the subtrie path, add it to the - // unchanged prefix set - unchanged_prefix_set.insert(prefix_set_iter.next().unwrap()); - } else { - // If we're past the subtrie path, we're done with this subtrie. Do not - // advance the iterator, the next key will be processed either by the - // next subtrie or inserted into the unchanged prefix set. - break + // If the value does not exist in the `values` map, then this means that the leaf either: + // - Does not exist in the trie + // - Is missing from the witness + // We traverse the trie to find the location where this leaf would have been, showing + // that it is not in the trie. Or we find a blinded node, showing that the witness is + // not complete. + let mut curr_path = Nibbles::new(); // start traversal from root + let mut curr_subtrie = self.upper_subtrie.as_ref(); + let mut curr_subtrie_is_upper = true; + + loop { + let curr_node = curr_subtrie.nodes.get(&curr_path).unwrap(); + + match Self::find_next_to_leaf(&curr_path, curr_node, full_path) { + FindNextToLeafOutcome::NotFound => return Ok(LeafLookup::NonExistent), + FindNextToLeafOutcome::BlindedNode(hash) => { + // We hit a blinded node - cannot determine if leaf exists + return Err(LeafLookupError::BlindedNode { path: curr_path, hash }); + } + FindNextToLeafOutcome::Found => { + panic!("target leaf {full_path:?} found at path {curr_path:?}, even though value wasn't in values hashmap"); + } + FindNextToLeafOutcome::ContinueFrom(next_path) => { + curr_path = next_path; + // If we were previously looking at the upper trie, and the new path is in the + // lower trie, we need to pull out a ref to the lower trie. + if curr_subtrie_is_upper { + if let Some(lower_subtrie) = self.lower_subtrie_for_path(&curr_path) { + curr_subtrie = lower_subtrie; + curr_subtrie_is_upper = false; } } - PrefixSetMut::from(new_prefix_set) } - .freeze(); - - changed_subtries.push(ChangedSubtrie { index, subtrie, prefix_set }); } } - - // Extend the unchanged prefix set with the remaining keys that are not part of any subtries - unchanged_prefix_set.extend_keys(prefix_set_iter); - - (changed_subtries, unchanged_prefix_set) } } -/// This is a subtrie of the [`ParallelSparseTrie`] that contains a map from path to sparse trie -/// nodes. -#[derive(Clone, PartialEq, Eq, Debug, Default)] -pub struct SparseSubtrie { - /// The root path of this subtrie. +impl ParallelSparseTrie { + /// Returns true if retaining updates is enabled for the overall trie. + const fn updates_enabled(&self) -> bool { + self.updates.is_some() + } + + /// Creates a new revealed sparse trie from the given root node. /// - /// This is the _full_ path to this subtrie, meaning it includes the first - /// [`UPPER_TRIE_MAX_DEPTH`] nibbles that we also use for indexing subtries in the - /// [`ParallelSparseTrie`]. + /// This function initializes the internal structures and then reveals the root. + /// It is a convenient method to create a trie when you already have the root node available. /// - /// There should be a node for this path in `nodes` map. - path: Nibbles, - /// The map from paths to sparse trie nodes within this subtrie. - nodes: HashMap, - /// Subset of fields for mutable access while `nodes` field is also being mutably borrowed. - inner: SparseSubtrieInner, -} - -/// Returned by the `find_next_to_leaf` method to indicate either that the leaf has been found, -/// traversal should be continued from the given path, or the leaf is not in the trie. -enum FindNextToLeafOutcome { - /// `Found` indicates that the leaf was found at the given path. - Found, - /// `ContinueFrom` indicates that traversal should continue from the given path. - ContinueFrom(Nibbles), - /// `NotFound` indicates that there is no way to traverse to the leaf, as it is not in the - /// trie. - NotFound, -} + /// # Arguments + /// + /// * `root` - The root node of the trie + /// * `masks` - Trie masks for root branch node + /// * `retain_updates` - Whether to track updates + /// + /// # Returns + /// + /// Self if successful, or an error if revealing fails. + pub fn from_root( + root: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult { + Self::default().with_root(root, masks, retain_updates) + } -impl SparseSubtrie { - fn new(path: Nibbles) -> Self { - Self { path, ..Default::default() } + /// Returns a reference to the lower `SparseSubtrie` for the given path, or None if the + /// path belongs to the upper trie, or if the lower subtrie for the path doesn't exist or is + /// blinded. + fn lower_subtrie_for_path(&self, path: &Nibbles) -> Option<&SparseSubtrie> { + match SparseSubtrieType::from_path(path) { + SparseSubtrieType::Upper => None, + SparseSubtrieType::Lower(idx) => self.lower_subtries[idx].as_revealed_ref(), + } } - /// Configures the subtrie to retain information about updates. + /// Returns a mutable reference to the lower `SparseSubtrie` for the given path, or None if the + /// path belongs to the upper trie. /// - /// If `retain_updates` is true, the trie will record branch node updates and deletions. - /// This information can then be used to efficiently update an external database. - pub fn with_updates(mut self, retain_updates: bool) -> Self { - self.inner.updates = retain_updates.then_some(SparseTrieUpdates::default()); - self + /// This method will create/reveal a new lower subtrie for the given path if one isn't already. + /// If one does exist, but its path field is longer than the given path, then the field will be + /// set to the given path. + fn lower_subtrie_for_path_mut(&mut self, path: &Nibbles) -> Option<&mut SparseSubtrie> { + match SparseSubtrieType::from_path(path) { + SparseSubtrieType::Upper => None, + SparseSubtrieType::Lower(idx) => { + self.lower_subtries[idx].reveal(path); + Some(self.lower_subtries[idx].as_revealed_mut().expect("just revealed")) + } + } } - /// Returns true if the current path and its child are both found in the same level. - fn is_child_same_level(current_path: &Nibbles, child_path: &Nibbles) -> bool { - let current_level = core::mem::discriminant(&SparseSubtrieType::from_path(current_path)); - let child_level = core::mem::discriminant(&SparseSubtrieType::from_path(child_path)); - current_level == child_level + /// Returns a reference to either the lower or upper `SparseSubtrie` for the given path, + /// depending on the path's length. + /// + /// Returns `None` if a lower subtrie does not exist for the given path. + fn subtrie_for_path(&self, path: &Nibbles) -> Option<&SparseSubtrie> { + // We can't just call `lower_subtrie_for_path` and return `upper_subtrie` if it returns + // None, because Rust complains about double mutable borrowing `self`. + if SparseSubtrieType::path_len_is_upper(path.len()) { + Some(&self.upper_subtrie) + } else { + self.lower_subtrie_for_path(path) + } } - /// Internal implementation of the method of the same name on `ParallelSparseTrie`. - fn reveal_node( - &mut self, - path: Nibbles, - node: &TrieNode, - masks: TrieMasks, - ) -> SparseTrieResult<()> { - debug_assert!(path.starts_with(&self.path)); - - // If the node is already revealed and it's not a hash node, do nothing. - if self.nodes.get(&path).is_some_and(|node| !node.is_hash()) { - return Ok(()) + /// Returns a mutable reference to either the lower or upper `SparseSubtrie` for the given path, + /// depending on the path's length. + /// + /// This method will create/reveal a new lower subtrie for the given path if one isn't already. + /// If one does exist, but its path field is longer than the given path, then the field will be + /// set to the given path. + fn subtrie_for_path_mut(&mut self, path: &Nibbles) -> &mut SparseSubtrie { + // We can't just call `lower_subtrie_for_path` and return `upper_subtrie` if it returns + // None, because Rust complains about double mutable borrowing `self`. + if SparseSubtrieType::path_len_is_upper(path.len()) { + &mut self.upper_subtrie + } else { + self.lower_subtrie_for_path_mut(path).unwrap() } + } - if let Some(tree_mask) = masks.tree_mask { - self.inner.branch_node_tree_masks.insert(path, tree_mask); - } - if let Some(hash_mask) = masks.hash_mask { - self.inner.branch_node_hash_masks.insert(path, hash_mask); - } + /// Returns the next node in the traversal path from the given path towards the leaf for the + /// given full leaf path, or an error if any node along the traversal path is not revealed. + /// + /// + /// ## Panics + /// + /// If `from_path` is not a prefix of `leaf_full_path`. + fn find_next_to_leaf( + from_path: &Nibbles, + from_node: &SparseNode, + leaf_full_path: &Nibbles, + ) -> FindNextToLeafOutcome { + debug_assert!(leaf_full_path.len() >= from_path.len()); + debug_assert!(leaf_full_path.starts_with(from_path)); - match node { - TrieNode::EmptyRoot => { - // For an empty root, ensure that we are at the root path, and at the upper subtrie. - debug_assert!(path.is_empty()); - debug_assert!(self.path.is_empty()); - self.nodes.insert(path, SparseNode::Empty); + match from_node { + // If empty node is found it means the subtrie doesn't have any nodes in it, let alone + // the target leaf. + SparseNode::Empty => FindNextToLeafOutcome::NotFound, + SparseNode::Hash(hash) => FindNextToLeafOutcome::BlindedNode(*hash), + SparseNode::Leaf { key, .. } => { + let mut found_full_path = *from_path; + found_full_path.extend(key); + + if &found_full_path == leaf_full_path { + return FindNextToLeafOutcome::Found + } + FindNextToLeafOutcome::NotFound } - TrieNode::Branch(branch) => { - // For a branch node, iterate over all potential children - let mut stack_ptr = branch.as_ref().first_child_index(); - for idx in CHILD_INDEX_RANGE { - if branch.state_mask.is_bit_set(idx) { - let mut child_path = path; - child_path.push_unchecked(idx); - if Self::is_child_same_level(&path, &child_path) { - // Reveal each child node or hash it has, but only if the child is on - // the same level as the parent. - self.reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; - } - stack_ptr += 1; - } + SparseNode::Extension { key, .. } => { + if leaf_full_path.len() == from_path.len() { + return FindNextToLeafOutcome::NotFound } - // Update the branch node entry in the nodes map, handling cases where a blinded - // node is now replaced with a revealed node. - match self.nodes.entry(path) { - Entry::Occupied(mut entry) => match entry.get() { - // Replace a hash node with a fully revealed branch node. - SparseNode::Hash(hash) => { - entry.insert(SparseNode::Branch { - state_mask: branch.state_mask, - // Memoize the hash of a previously blinded node in a new branch - // node. - hash: Some(*hash), - store_in_db_trie: Some( - masks.hash_mask.is_some_and(|mask| !mask.is_empty()) || - masks.tree_mask.is_some_and(|mask| !mask.is_empty()), - ), - }); - } - // Branch node already exists, or an extension node was placed where a - // branch node was before. - SparseNode::Branch { .. } | SparseNode::Extension { .. } => {} - // All other node types can't be handled. - node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { - return Err(SparseTrieErrorKind::Reveal { - path: *entry.key(), - node: Box::new(node.clone()), - } - .into()) - } - }, - Entry::Vacant(entry) => { - entry.insert(SparseNode::new_branch(branch.state_mask)); - } + + let mut child_path = *from_path; + child_path.extend(key); + + if !leaf_full_path.starts_with(&child_path) { + return FindNextToLeafOutcome::NotFound } + FindNextToLeafOutcome::ContinueFrom(child_path) } - TrieNode::Extension(ext) => match self.nodes.entry(path) { - Entry::Occupied(mut entry) => match entry.get() { - // Replace a hash node with a revealed extension node. - SparseNode::Hash(hash) => { - let mut child_path = *entry.key(); - child_path.extend(&ext.key); - entry.insert(SparseNode::Extension { - key: ext.key, - // Memoize the hash of a previously blinded node in a new extension - // node. - hash: Some(*hash), - store_in_db_trie: None, - }); - if Self::is_child_same_level(&path, &child_path) { - self.reveal_node_or_hash(child_path, &ext.child)?; - } - } - // Extension node already exists, or an extension node was placed where a branch - // node was before. - SparseNode::Extension { .. } | SparseNode::Branch { .. } => {} - // All other node types can't be handled. - node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { - return Err(SparseTrieErrorKind::Reveal { - path: *entry.key(), - node: Box::new(node.clone()), - } - .into()) - } - }, - Entry::Vacant(entry) => { - let mut child_path = *entry.key(); - child_path.extend(&ext.key); - entry.insert(SparseNode::new_ext(ext.key)); - if Self::is_child_same_level(&path, &child_path) { - self.reveal_node_or_hash(child_path, &ext.child)?; - } + SparseNode::Branch { state_mask, .. } => { + if leaf_full_path.len() == from_path.len() { + return FindNextToLeafOutcome::NotFound } - }, - TrieNode::Leaf(leaf) => match self.nodes.entry(path) { - Entry::Occupied(mut entry) => match entry.get() { - // Replace a hash node with a revealed leaf node and store leaf node value. - SparseNode::Hash(hash) => { - let mut full = *entry.key(); - full.extend(&leaf.key); - self.inner.values.insert(full, leaf.value.clone()); - entry.insert(SparseNode::Leaf { - key: leaf.key, - // Memoize the hash of a previously blinded node in a new leaf - // node. - hash: Some(*hash), - }); - } - // Leaf node already exists. - SparseNode::Leaf { .. } => {} - // All other node types can't be handled. - node @ (SparseNode::Empty | - SparseNode::Extension { .. } | - SparseNode::Branch { .. }) => { - return Err(SparseTrieErrorKind::Reveal { - path: *entry.key(), - node: Box::new(node.clone()), - } - .into()) - } - }, - Entry::Vacant(entry) => { - let mut full = *entry.key(); - full.extend(&leaf.key); - entry.insert(SparseNode::new_leaf(leaf.key)); - self.inner.values.insert(full, leaf.value.clone()); + + let nibble = leaf_full_path.get_unchecked(from_path.len()); + if !state_mask.is_bit_set(nibble) { + return FindNextToLeafOutcome::NotFound } - }, - } - Ok(()) + let mut child_path = *from_path; + child_path.push_unchecked(nibble); + + FindNextToLeafOutcome::ContinueFrom(child_path) + } + } } - /// Reveals either a node or its hash placeholder based on the provided child data. - /// - /// When traversing the trie, we often encounter references to child nodes that - /// are either directly embedded or represented by their hash. This method - /// handles both cases: - /// - /// 1. If the child data represents a hash (32+1=33 bytes), store it as a hash node - /// 2. Otherwise, decode the data as a [`TrieNode`] and recursively reveal it using - /// `reveal_node` - /// - /// # Returns - /// - /// Returns `Ok(())` if successful, or an error if the node cannot be revealed. - /// - /// # Error Handling - /// - /// Will error if there's a conflict between a new hash node and an existing one - /// at the same path - fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { - if child.len() == B256::len_bytes() + 1 { - let hash = B256::from_slice(&child[1..]); - match self.nodes.entry(path) { - Entry::Occupied(entry) => match entry.get() { - // Hash node with a different hash can't be handled. - SparseNode::Hash(previous_hash) if previous_hash != &hash => { - return Err(SparseTrieErrorKind::Reveal { - path: *entry.key(), - node: Box::new(SparseNode::Hash(hash)), - } - .into()) - } - _ => {} - }, - Entry::Vacant(entry) => { - entry.insert(SparseNode::Hash(hash)); - } - } - return Ok(()) + /// Called when a child node has collapsed into its parent as part of `remove_leaf`. If the + /// new parent node is a leaf, then the previous child also was, and if the previous child was + /// on a lower subtrie while the parent is on an upper then the leaf value needs to be moved to + /// the upper. + fn move_value_on_leaf_removal( + &mut self, + parent_path: &Nibbles, + new_parent_node: &SparseNode, + prev_child_path: &Nibbles, + ) { + // If the parent path isn't in the upper then it doesn't matter what the new node is, + // there's no situation where a leaf value needs to be moved. + if SparseSubtrieType::from_path(parent_path).lower_index().is_some() { + return; } - self.reveal_node(path, &TrieNode::decode(&mut &child[..])?, TrieMasks::none()) + if let SparseNode::Leaf { key, .. } = new_parent_node { + let Some(prev_child_subtrie) = self.lower_subtrie_for_path_mut(prev_child_path) else { + return; + }; + + let mut leaf_full_path = *parent_path; + leaf_full_path.extend(key); + + let val = prev_child_subtrie.inner.values.remove(&leaf_full_path).expect("ParallelSparseTrie is in an inconsistent state, expected value on subtrie which wasn't found"); + self.upper_subtrie.inner.values.insert(leaf_full_path, val); + } } - /// Recalculates and updates the RLP hashes for the changed nodes in this subtrie. + /// Used by `remove_leaf` to ensure that when a node is removed from a lower subtrie that any + /// externalities are handled. These can include: + /// - Removing the lower subtrie completely, if it is now empty. + /// - Updating the `path` field of the lower subtrie to indicate that its root node has changed. /// - /// The function starts from the subtrie root, traverses down to leaves, and then calculates - /// the hashes from leaves back up to the root. It uses a stack from [`SparseSubtrieBuffers`] to - /// track the traversal and accumulate RLP encodings. + /// This method assumes that the caller will deal with putting all other nodes in the trie into + /// a consistent state after the removal of this one. /// - /// # Parameters + /// ## Panics /// - /// - `prefix_set`: The set of trie paths whose nodes have changed. + /// - If the removed node was not a leaf or extension. + fn remove_node(&mut self, path: &Nibbles) { + let subtrie = self.subtrie_for_path_mut(path); + let node = subtrie.nodes.remove(path); + + let Some(idx) = SparseSubtrieType::from_path(path).lower_index() else { + // When removing a node from the upper trie there's nothing special we need to do to fix + // its path field; the upper trie's path is always empty. + return; + }; + + match node { + Some(SparseNode::Leaf { .. }) => { + // If the leaf was the final node in its lower subtrie then we can blind the + // subtrie, effectively marking it as empty. + if subtrie.nodes.is_empty() { + self.lower_subtries[idx].clear(); + } + } + Some(SparseNode::Extension { key, .. }) => { + // If the removed extension was the root node of a lower subtrie then the lower + // subtrie's `path` needs to be updated to be whatever node the extension used to + // point to. + if &subtrie.path == path { + subtrie.path.extend(&key); + } + } + _ => panic!("Expected to remove a leaf or extension, but removed {node:?}"), + } + } + + /// Given the path to a parent branch node and a child node which is the sole remaining child on + /// that branch after removing a leaf, returns a node to replace the parent branch node and a + /// boolean indicating if the child should be deleted. /// - /// # Returns + /// ## Panics /// - /// A tuple containing the root node of the updated subtrie and an optional set of updates. - /// Updates are [`Some`] if [`Self::with_updates`] was set to `true`. + /// - If either parent or child node is not already revealed. + /// - If parent's path is not a prefix of the child's path. + fn branch_changes_on_leaf_removal( + parent_path: &Nibbles, + remaining_child_path: &Nibbles, + remaining_child_node: &SparseNode, + ) -> (SparseNode, bool) { + debug_assert!(remaining_child_path.len() > parent_path.len()); + debug_assert!(remaining_child_path.starts_with(parent_path)); + + let remaining_child_nibble = remaining_child_path.get_unchecked(parent_path.len()); + + // If we swap the branch node out either an extension or leaf, depending on + // what its remaining child is. + match remaining_child_node { + SparseNode::Empty | SparseNode::Hash(_) => { + panic!("remaining child must have been revealed already") + } + // If the only child is a leaf node, we downgrade the branch node into a + // leaf node, prepending the nibble to the key, and delete the old + // child. + SparseNode::Leaf { key, .. } => { + let mut new_key = Nibbles::from_nibbles_unchecked([remaining_child_nibble]); + new_key.extend(key); + (SparseNode::new_leaf(new_key), true) + } + // If the only child node is an extension node, we downgrade the branch + // node into an even longer extension node, prepending the nibble to the + // key, and delete the old child. + SparseNode::Extension { key, .. } => { + let mut new_key = Nibbles::from_nibbles_unchecked([remaining_child_nibble]); + new_key.extend(key); + (SparseNode::new_ext(new_key), true) + } + // If the only child is a branch node, we downgrade the current branch + // node into a one-nibble extension node. + SparseNode::Branch { .. } => ( + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([remaining_child_nibble])), + false, + ), + } + } + + /// Given the path to a parent extension and its key, and a child node (not necessarily on this + /// subtrie), returns an optional replacement parent node. If a replacement is returned then the + /// child node should be deleted. /// - /// # Panics + /// ## Panics /// - /// If the node at the root path does not exist. - #[instrument(level = "trace", target = "engine::tree", skip_all, fields(root = ?self.path), ret)] - pub fn update_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { - trace!(target: "trie::parallel_sparse", "Updating subtrie hashes"); + /// - If either parent or child node is not already revealed. + /// - If parent's path is not a prefix of the child's path. + fn extension_changes_on_leaf_removal( + parent_path: &Nibbles, + parent_key: &Nibbles, + child_path: &Nibbles, + child: &SparseNode, + ) -> Option { + debug_assert!(child_path.len() > parent_path.len()); + debug_assert!(child_path.starts_with(parent_path)); - debug_assert!(prefix_set.iter().all(|path| path.starts_with(&self.path))); + // If the parent node is an extension node, we need to look at its child to see + // if we need to merge it. + match child { + SparseNode::Empty | SparseNode::Hash(_) => { + panic!("child must be revealed") + } + // For a leaf node, we collapse the extension node into a leaf node, + // extending the key. While it's impossible to encounter an extension node + // followed by a leaf node in a complete trie, it's possible here because we + // could have downgraded the extension node's child into a leaf node from a + // branch in a previous call to `branch_changes_on_leaf_removal`. + SparseNode::Leaf { key, .. } => { + let mut new_key = *parent_key; + new_key.extend(key); + Some(SparseNode::new_leaf(new_key)) + } + // Similar to the leaf node, for an extension node, we collapse them into one + // extension node, extending the key. + SparseNode::Extension { key, .. } => { + let mut new_key = *parent_key; + new_key.extend(key); + Some(SparseNode::new_ext(new_key)) + } + // For a branch node, we just leave the extension node as-is. + SparseNode::Branch { .. } => None, + } + } - debug_assert!(self.inner.buffers.path_stack.is_empty()); - self.inner - .buffers - .path_stack - .push(RlpNodePathStackItem { path: self.path, is_in_prefix_set: None }); + /// Drains any [`SparseTrieUpdatesAction`]s from the given subtrie, and applies each action to + /// the given `updates` set. If the given set is None then this is a no-op. + fn apply_subtrie_update_actions( + &mut self, + update_actions: impl Iterator, + ) { + if let Some(updates) = self.updates.as_mut() { + for action in update_actions { + match action { + SparseTrieUpdatesAction::InsertRemoved(path) => { + updates.updated_nodes.remove(&path); + updates.removed_nodes.insert(path); + } + SparseTrieUpdatesAction::RemoveUpdated(path) => { + updates.updated_nodes.remove(&path); + } + SparseTrieUpdatesAction::InsertUpdated(path, branch_node) => { + updates.updated_nodes.insert(path, branch_node); + } + } + } + }; + } - while let Some(stack_item) = self.inner.buffers.path_stack.pop() { + /// Updates hashes for the upper subtrie, using nodes from both upper and lower subtries. + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, ret)] + fn update_upper_subtrie_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { + trace!(target: "trie::parallel_sparse", "Updating upper subtrie hashes"); + + debug_assert!(self.upper_subtrie.inner.buffers.path_stack.is_empty()); + self.upper_subtrie.inner.buffers.path_stack.push(RlpNodePathStackItem { + path: Nibbles::default(), // Start from root + is_in_prefix_set: None, + }); + + let mut update_actions_buf = + self.updates_enabled().then(|| self.update_actions_buffers.pop().unwrap_or_default()); + + while let Some(stack_item) = self.upper_subtrie.inner.buffers.path_stack.pop() { let path = stack_item.path; - let node = self - .nodes - .get_mut(&path) - .unwrap_or_else(|| panic!("node at path {path:?} does not exist")); + let node = if path.len() < UPPER_TRIE_MAX_DEPTH { + self.upper_subtrie.nodes.get_mut(&path).expect("upper subtrie node must exist") + } else { + let index = path_subtrie_index_unchecked(&path); + let node = self.lower_subtries[index] + .as_revealed_mut() + .expect("lower subtrie must exist") + .nodes + .get_mut(&path) + .expect("lower subtrie node must exist"); + // Lower subtrie root node hashes must be computed before updating upper subtrie + // hashes + debug_assert!( + node.hash().is_some(), + "Lower subtrie root node at path {path:?} has no hash" + ); + node + }; - self.inner.rlp_node(prefix_set, stack_item, node); + // Calculate the RLP node for the current node using upper subtrie + self.upper_subtrie.inner.rlp_node( + prefix_set, + &mut update_actions_buf, + stack_item, + node, + &self.branch_node_tree_masks, + &self.branch_node_hash_masks, + ); } - debug_assert_eq!(self.inner.buffers.rlp_node_stack.len(), 1); - self.inner.buffers.rlp_node_stack.pop().unwrap().rlp_node - } + // If there were any branch node updates as a result of calculating the RLP node for the + // upper trie then apply them to the top-level set. + if let Some(mut update_actions_buf) = update_actions_buf { + self.apply_subtrie_update_actions( + #[allow(clippy::iter_with_drain)] + update_actions_buf.drain(..), + ); + self.update_actions_buffers.push(update_actions_buf); + } - /// Consumes and returns the currently accumulated trie updates. - /// - /// This is useful when you want to apply the updates to an external database, - /// and then start tracking a new set of updates. - fn take_updates(&mut self) -> SparseTrieUpdates { - self.inner.updates.take().unwrap_or_default() + debug_assert_eq!(self.upper_subtrie.inner.buffers.rlp_node_stack.len(), 1); + self.upper_subtrie.inner.buffers.rlp_node_stack.pop().unwrap().rlp_node } -} - -/// Helper type for [`SparseSubtrie`] to mutably access only a subset of fields from the original -/// struct. -#[derive(Clone, PartialEq, Eq, Debug, Default)] -struct SparseSubtrieInner { - /// When a branch is set, the corresponding child subtree is stored in the database. - branch_node_tree_masks: HashMap, - /// When a bit is set, the corresponding child is stored as a hash in the database. - branch_node_hash_masks: HashMap, - /// Map from leaf key paths to their values. - /// All values are stored here instead of directly in leaf nodes. - values: HashMap>, - /// Optional tracking of trie updates for later use. - updates: Option, - /// Reusable buffers for [`SparseSubtrie::update_hashes`]. - buffers: SparseSubtrieBuffers, -} -impl SparseSubtrieInner { - /// Computes the RLP encoding and its hash for a single (trie node)[`SparseNode`]. - /// - /// # Deferred Processing - /// - /// When an extension or a branch node depends on child nodes that haven't been computed yet, - /// the function pushes the current node back onto the path stack along with its children, - /// then returns early. This allows the iterative algorithm to process children first before - /// retrying the parent. - /// - /// # Parameters - /// - /// - `prefix_set`: Set of prefixes (key paths) that have been marked as updated - /// - `stack_item`: The stack item to process - /// - `node`: The sparse node to process (will be mutated to update hash) + /// Returns: + /// 1. List of lower [subtries](SparseSubtrie) that have changed according to the provided + /// [prefix set](PrefixSet). See documentation of [`ChangedSubtrie`] for more details. + /// 2. Prefix set of keys that do not belong to any lower subtrie. /// - /// # Side Effects + /// This method helps optimize hash recalculations by identifying which specific + /// lower subtries need to be updated. Each lower subtrie can then be updated in parallel. /// - /// - Updates the node's hash field after computing RLP - /// - Pushes nodes to [`SparseSubtrieBuffers::path_stack`] to manage traversal - /// - Updates the (trie updates)[`SparseTrieUpdates`] accumulator when tracking changes, if - /// [`Some`] - /// - May push items onto the path stack for deferred processing - /// - /// # Exit condition - /// - /// Once all nodes have been processed and all RLPs and hashes calculated, pushes the root node - /// onto the [`SparseSubtrieBuffers::rlp_node_stack`] and exits. - fn rlp_node( + /// IMPORTANT: The method removes the subtries from `lower_subtries`, and the caller is + /// responsible for returning them back into the array. + fn take_changed_lower_subtries( &mut self, prefix_set: &mut PrefixSet, - mut stack_item: RlpNodePathStackItem, - node: &mut SparseNode, - ) { - let path = stack_item.path; - trace!( - target: "trie::parallel_sparse", - ?path, - ?node, - "Calculating node RLP" - ); - - // Check if the path is in the prefix set. - // First, check the cached value. If it's `None`, then check the prefix set, and update - // the cached value. - let mut prefix_set_contains = |path: &Nibbles| { - *stack_item.is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)) - }; + ) -> (Vec, PrefixSetMut) { + // Clone the prefix set to iterate over its keys. Cloning is cheap, it's just an Arc. + let prefix_set_clone = prefix_set.clone(); + let mut prefix_set_iter = prefix_set_clone.into_iter().copied().peekable(); + let mut changed_subtries = Vec::new(); + let mut unchanged_prefix_set = PrefixSetMut::default(); + let updates_enabled = self.updates_enabled(); - let (rlp_node, node_type) = match node { - SparseNode::Empty => (RlpNode::word_rlp(&EMPTY_ROOT_HASH), SparseNodeType::Empty), - SparseNode::Hash(hash) => { - // Return pre-computed hash of a blinded node immediately - (RlpNode::word_rlp(hash), SparseNodeType::Hash) - } - SparseNode::Leaf { key, hash } => { - let mut path = path; - path.extend(key); - if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - // If the node hash is already computed, and the node path is not in - // the prefix set, return the pre-computed hash - (RlpNode::word_rlp(&hash), SparseNodeType::Leaf) + for (index, subtrie) in self.lower_subtries.iter_mut().enumerate() { + if let Some(subtrie) = + subtrie.take_revealed_if(|subtrie| prefix_set.contains(&subtrie.path)) + { + let prefix_set = if prefix_set.all() { + unchanged_prefix_set = PrefixSetMut::all(); + PrefixSetMut::all() } else { - // Encode the leaf node and update its hash - let value = self.values.get(&path).unwrap(); - self.buffers.rlp_buf.clear(); - let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.buffers.rlp_buf); - *hash = rlp_node.as_hash(); - (rlp_node, SparseNodeType::Leaf) + // Take those keys from the original prefix set that start with the subtrie path + // + // Subtries are stored in the order of their paths, so we can use the same + // prefix set iterator. + let mut new_prefix_set = Vec::new(); + while let Some(key) = prefix_set_iter.peek() { + if key.starts_with(&subtrie.path) { + // If the key starts with the subtrie path, add it to the new prefix set + new_prefix_set.push(prefix_set_iter.next().unwrap()); + } else if new_prefix_set.is_empty() && key < &subtrie.path { + // If we didn't yet have any keys that belong to this subtrie, and the + // current key is still less than the subtrie path, add it to the + // unchanged prefix set + unchanged_prefix_set.insert(prefix_set_iter.next().unwrap()); + } else { + // If we're past the subtrie path, we're done with this subtrie. Do not + // advance the iterator, the next key will be processed either by the + // next subtrie or inserted into the unchanged prefix set. + break + } + } + PrefixSetMut::from(new_prefix_set) + } + .freeze(); + + // We need the full path of root node of the lower subtrie to the unchanged prefix + // set, so that we don't skip it when calculating hashes for the upper subtrie. + match subtrie.nodes.get(&subtrie.path) { + Some(SparseNode::Extension { key, .. } | SparseNode::Leaf { key, .. }) => { + unchanged_prefix_set.insert(subtrie.path.join(key)); + } + Some(SparseNode::Branch { .. }) => { + unchanged_prefix_set.insert(subtrie.path); + } + _ => {} } + + let update_actions_buf = + updates_enabled.then(|| self.update_actions_buffers.pop().unwrap_or_default()); + + changed_subtries.push(ChangedSubtrie { + index, + subtrie, + prefix_set, + update_actions_buf, + }); } - SparseNode::Extension { key, hash, store_in_db_trie } => { - let mut child_path = path; - child_path.extend(key); - if let Some((hash, store_in_db_trie)) = - hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) - { - // If the node hash is already computed, and the node path is not in - // the prefix set, return the pre-computed hash - ( - RlpNode::word_rlp(&hash), - SparseNodeType::Extension { store_in_db_trie: Some(store_in_db_trie) }, - ) - } else if self.buffers.rlp_node_stack.last().is_some_and(|e| e.path == child_path) { - // Top of the stack has the child node, we can encode the extension node and - // update its hash - let RlpNodeStackItem { path: _, rlp_node: child, node_type: child_node_type } = - self.buffers.rlp_node_stack.pop().unwrap(); - self.buffers.rlp_buf.clear(); - let rlp_node = - ExtensionNodeRef::new(key, &child).rlp(&mut self.buffers.rlp_buf); - *hash = rlp_node.as_hash(); + } - let store_in_db_trie_value = child_node_type.store_in_db_trie(); + // Extend the unchanged prefix set with the remaining keys that are not part of any subtries + unchanged_prefix_set.extend_keys(prefix_set_iter); - trace!( - target: "trie::parallel_sparse", - ?path, - ?child_path, - ?child_node_type, - "Extension node" - ); + (changed_subtries, unchanged_prefix_set) + } - *store_in_db_trie = store_in_db_trie_value; + /// Returns an iterator over all nodes in the trie in no particular order. + #[cfg(test)] + fn all_nodes(&self) -> impl IntoIterator { + let mut nodes = vec![]; + for subtrie in self.lower_subtries.iter().filter_map(LowerSparseSubtrie::as_revealed_ref) { + nodes.extend(subtrie.nodes.iter()) + } + nodes.extend(self.upper_subtrie.nodes.iter()); + nodes + } - ( - rlp_node, - SparseNodeType::Extension { - // Inherit the `store_in_db_trie` flag from the child node, which is - // always the branch node - store_in_db_trie: store_in_db_trie_value, - }, - ) - } else { - // Need to defer processing until child is computed, on the next - // invocation update the node's hash. - self.buffers.path_stack.extend([ - RlpNodePathStackItem { - path, - is_in_prefix_set: Some(prefix_set_contains(&path)), - }, - RlpNodePathStackItem { path: child_path, is_in_prefix_set: None }, - ]); - return + /// Reveals a trie node in the upper trie if it has not been revealed before. When revealing + /// branch/extension nodes this may recurse into a lower trie to reveal a child. + /// + /// This function decodes a trie node and inserts it into the trie structure. It handles + /// different node types (leaf, extension, branch) by appropriately adding them to the trie and + /// recursively revealing their children. + /// + /// # Arguments + /// + /// * `path` - The path where the node should be revealed + /// * `node` - The trie node to reveal + /// * `masks` - Trie masks for branch nodes + /// + /// # Returns + /// + /// `Ok(())` if successful, or an error if the node was not revealed. + fn reveal_upper_node( + &mut self, + path: Nibbles, + node: &TrieNode, + masks: TrieMasks, + ) -> SparseTrieResult<()> { + // If there is no subtrie for the path it means the path is UPPER_TRIE_MAX_DEPTH or less + // nibbles, and so belongs to the upper trie. + self.upper_subtrie.reveal_node(path, node, masks)?; + + // The previous upper_trie.reveal_node call will not have revealed any child nodes via + // reveal_node_or_hash if the child node would be found on a lower subtrie. We handle that + // here by manually checking the specific cases where this could happen, and calling + // reveal_node_or_hash for each. + match node { + TrieNode::Branch(branch) => { + // If a branch is at the cutoff level of the trie then it will be in the upper trie, + // but all of its children will be in a lower trie. Check if a child node would be + // in the lower subtrie, and reveal accordingly. + if !SparseSubtrieType::path_len_is_upper(path.len() + 1) { + let mut stack_ptr = branch.as_ref().first_child_index(); + for idx in CHILD_INDEX_RANGE { + if branch.state_mask.is_bit_set(idx) { + let mut child_path = path; + child_path.push_unchecked(idx); + self.lower_subtrie_for_path_mut(&child_path) + .expect("child_path must have a lower subtrie") + .reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; + stack_ptr += 1; + } + } } } - SparseNode::Branch { state_mask, hash, store_in_db_trie } => { - if let Some((hash, store_in_db_trie)) = - hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) - { - // If the node hash is already computed, and the node path is not in - // the prefix set, return the pre-computed hash - self.buffers.rlp_node_stack.push(RlpNodeStackItem { - path, - rlp_node: RlpNode::word_rlp(&hash), - node_type: SparseNodeType::Branch { - store_in_db_trie: Some(store_in_db_trie), - }, - }); - return + TrieNode::Extension(ext) => { + let mut child_path = path; + child_path.extend(&ext.key); + if let Some(subtrie) = self.lower_subtrie_for_path_mut(&child_path) { + subtrie.reveal_node_or_hash(child_path, &ext.child)?; } + } + TrieNode::EmptyRoot | TrieNode::Leaf(_) => (), + } - let retain_updates = self.updates.is_some() && prefix_set_contains(&path); + Ok(()) + } +} - self.buffers.branch_child_buf.clear(); - // Walk children in a reverse order from `f` to `0`, so we pop the `0` first - // from the stack and keep walking in the sorted order. - for bit in CHILD_INDEX_RANGE.rev() { - if state_mask.is_bit_set(bit) { - let mut child = path; - child.push_unchecked(bit); - self.buffers.branch_child_buf.push(child); - } - } +/// This is a subtrie of the [`ParallelSparseTrie`] that contains a map from path to sparse trie +/// nodes. +#[derive(Clone, PartialEq, Eq, Debug, Default)] +pub struct SparseSubtrie { + /// The root path of this subtrie. + /// + /// This is the _full_ path to this subtrie, meaning it includes the first + /// [`UPPER_TRIE_MAX_DEPTH`] nibbles that we also use for indexing subtries in the + /// [`ParallelSparseTrie`]. + /// + /// There should be a node for this path in `nodes` map. + pub(crate) path: Nibbles, + /// The map from paths to sparse trie nodes within this subtrie. + nodes: HashMap, + /// Subset of fields for mutable access while `nodes` field is also being mutably borrowed. + inner: SparseSubtrieInner, +} - self.buffers - .branch_value_stack_buf - .resize(self.buffers.branch_child_buf.len(), Default::default()); - let mut added_children = false; +/// Returned by the `find_next_to_leaf` method to indicate either that the leaf has been found, +/// traversal should be continued from the given path, or the leaf is not in the trie. +enum FindNextToLeafOutcome { + /// `Found` indicates that the leaf was found at the given path. + Found, + /// `ContinueFrom` indicates that traversal should continue from the given path. + ContinueFrom(Nibbles), + /// `NotFound` indicates that there is no way to traverse to the leaf, as it is not in the + /// trie. + NotFound, + /// `BlindedNode` indicates that the node is blinded with the contained hash and cannot be + /// traversed. + BlindedNode(B256), +} - let mut tree_mask = TrieMask::default(); - let mut hash_mask = TrieMask::default(); - let mut hashes = Vec::new(); - for (i, child_path) in self.buffers.branch_child_buf.iter().enumerate() { - if self.buffers.rlp_node_stack.last().is_some_and(|e| &e.path == child_path) { - let RlpNodeStackItem { - path: _, - rlp_node: child, - node_type: child_node_type, - } = self.buffers.rlp_node_stack.pop().unwrap(); +impl SparseSubtrie { + /// Creates a new empty subtrie with the specified root path. + pub(crate) fn new(path: Nibbles) -> Self { + Self { path, ..Default::default() } + } - // Update the masks only if we need to retain trie updates - if retain_updates { - // SAFETY: it's a child, so it's never empty - let last_child_nibble = child_path.last().unwrap(); + /// Returns true if this subtrie has any nodes, false otherwise. + pub(crate) fn is_empty(&self) -> bool { + self.nodes.is_empty() + } - // Determine whether we need to set trie mask bit. - let should_set_tree_mask_bit = if let Some(store_in_db_trie) = - child_node_type.store_in_db_trie() + /// Returns true if the current path and its child are both found in the same level. + fn is_child_same_level(current_path: &Nibbles, child_path: &Nibbles) -> bool { + let current_level = core::mem::discriminant(&SparseSubtrieType::from_path(current_path)); + let child_level = core::mem::discriminant(&SparseSubtrieType::from_path(child_path)); + current_level == child_level + } + + /// Updates or inserts a leaf node at the specified key path with the provided RLP-encoded + /// value. + /// + /// If the leaf did not previously exist, this method adjusts the trie structure by inserting + /// new leaf nodes, splitting branch nodes, or collapsing extension nodes as needed. + /// + /// # Returns + /// + /// Returns the `Ok` if the update is successful. + /// + /// Note: If an update requires revealing a blinded node, an error is returned if the blinded + /// provider returns an error. + pub fn update_leaf( + &mut self, + full_path: Nibbles, + value: Vec, + provider: impl TrieNodeProvider, + retain_updates: bool, + ) -> SparseTrieResult<()> { + debug_assert!(full_path.starts_with(&self.path)); + let existing = self.inner.values.insert(full_path, value); + if existing.is_some() { + // trie structure unchanged, return immediately + return Ok(()) + } + + // Here we are starting at the root of the subtrie, and traversing from there. + let mut current = Some(self.path); + while let Some(current_path) = current { + match self.update_next_node(current_path, &full_path, retain_updates)? { + LeafUpdateStep::Continue { next_node } => { + current = Some(next_node); + } + LeafUpdateStep::Complete { reveal_path, .. } => { + if let Some(reveal_path) = reveal_path { + if self.nodes.get(&reveal_path).expect("node must exist").is_hash() { + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(&reveal_path)? { - // A branch or an extension node explicitly set the - // `store_in_db_trie` flag - store_in_db_trie + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?reveal_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing child", + ); + self.reveal_node( + reveal_path, + &decoded, + TrieMasks { hash_mask, tree_mask }, + )?; } else { - // A blinded node has the tree mask bit set - child_node_type.is_hash() && - self.branch_node_tree_masks - .get(&path) - .is_some_and(|mask| mask.is_bit_set(last_child_nibble)) - }; - if should_set_tree_mask_bit { - tree_mask.set_bit(last_child_nibble); - } - - // Set the hash mask. If a child node is a revealed branch node OR - // is a blinded node that has its hash mask bit set according to the - // database, set the hash mask bit and save the hash. - let hash = child.as_hash().filter(|_| { - child_node_type.is_branch() || - (child_node_type.is_hash() && - self.branch_node_hash_masks.get(&path).is_some_and( - |mask| mask.is_bit_set(last_child_nibble), - )) - }); - if let Some(hash) = hash { - hash_mask.set_bit(last_child_nibble); - hashes.push(hash); + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: reveal_path, + } + .into()) } } - - // Insert children in the resulting buffer in a normal order, - // because initially we iterated in reverse. - // SAFETY: i < len and len is never 0 - let original_idx = self.buffers.branch_child_buf.len() - i - 1; - self.buffers.branch_value_stack_buf[original_idx] = child; - added_children = true; - } else { - // Need to defer processing until children are computed, on the next - // invocation update the node's hash. - debug_assert!(!added_children); - self.buffers.path_stack.push(RlpNodePathStackItem { - path, - is_in_prefix_set: Some(prefix_set_contains(&path)), - }); - self.buffers.path_stack.extend( - self.buffers - .branch_child_buf - .drain(..) - .map(|path| RlpNodePathStackItem { path, is_in_prefix_set: None }), - ); - return } + + current = None; + } + LeafUpdateStep::NodeNotFound => { + current = None; } + } + } - trace!( - target: "trie::parallel_sparse", - ?path, - ?tree_mask, - ?hash_mask, - "Branch node masks" + Ok(()) + } + + /// Processes the current node, returning what to do next in the leaf update process. + /// + /// This will add or update any nodes in the trie as necessary. + /// + /// Returns a `LeafUpdateStep` containing the next node to process (if any) and + /// the paths of nodes that were inserted during this step. + fn update_next_node( + &mut self, + mut current: Nibbles, + path: &Nibbles, + retain_updates: bool, + ) -> SparseTrieResult { + debug_assert!(path.starts_with(&self.path)); + debug_assert!(current.starts_with(&self.path)); + debug_assert!(path.starts_with(¤t)); + let Some(node) = self.nodes.get_mut(¤t) else { + return Ok(LeafUpdateStep::NodeNotFound); + }; + match node { + SparseNode::Empty => { + // We need to insert the node with a different path and key depending on the path of + // the subtrie. + let path = path.slice(self.path.len()..); + *node = SparseNode::new_leaf(path); + Ok(LeafUpdateStep::complete_with_insertions(vec![current], None)) + } + SparseNode::Hash(hash) => { + Err(SparseTrieErrorKind::BlindedNode { path: current, hash: *hash }.into()) + } + SparseNode::Leaf { key: current_key, .. } => { + current.extend(current_key); + + // this leaf is being updated + debug_assert!( + ¤t != path, + "we already checked leaf presence in the beginning" ); - // Top of the stack has all children node, we can encode the branch node and - // update its hash - self.buffers.rlp_buf.clear(); - let branch_node_ref = - BranchNodeRef::new(&self.buffers.branch_value_stack_buf, *state_mask); - let rlp_node = branch_node_ref.rlp(&mut self.buffers.rlp_buf); - *hash = rlp_node.as_hash(); + // find the common prefix + let common = current.common_prefix_length(path); - // Save a branch node update only if it's not a root node, and we need to - // persist updates. - let store_in_db_trie_value = if let Some(updates) = - self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) - { - let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); - if store_in_db_trie { - // Store in DB trie if there are either any children that are stored in - // the DB trie, or any children represent hashed values - hashes.reverse(); - let branch_node = BranchNodeCompact::new( - *state_mask, - tree_mask, - hash_mask, - hashes, - hash.filter(|_| path.is_empty()), - ); - updates.updated_nodes.insert(path, branch_node); - } else if self - .branch_node_tree_masks - .get(&path) - .is_some_and(|mask| !mask.is_empty()) || - self.branch_node_hash_masks - .get(&path) - .is_some_and(|mask| !mask.is_empty()) - { - // If new tree and hash masks are empty, but previously they weren't, we - // need to remove the node update and add the node itself to the list of - // removed nodes. - updates.updated_nodes.remove(&path); - updates.removed_nodes.insert(path); - } else if self - .branch_node_hash_masks - .get(&path) - .is_none_or(|mask| mask.is_empty()) && - self.branch_node_hash_masks.get(&path).is_none_or(|mask| mask.is_empty()) - { - // If new tree and hash masks are empty, and they were previously empty - // as well, we need to remove the node update. - updates.updated_nodes.remove(&path); - } + // update existing node + let new_ext_key = current.slice(current.len() - current_key.len()..common); + *node = SparseNode::new_ext(new_ext_key); - store_in_db_trie - } else { - false - }; - *store_in_db_trie = Some(store_in_db_trie_value); + // create a branch node and corresponding leaves + self.nodes.reserve(3); + let branch_path = current.slice(..common); + let new_leaf_path = path.slice(..=common); + let existing_leaf_path = current.slice(..=common); - ( - rlp_node, - SparseNodeType::Branch { store_in_db_trie: Some(store_in_db_trie_value) }, - ) + self.nodes.insert( + branch_path, + SparseNode::new_split_branch( + current.get_unchecked(common), + path.get_unchecked(common), + ), + ); + self.nodes.insert(new_leaf_path, SparseNode::new_leaf(path.slice(common + 1..))); + self.nodes + .insert(existing_leaf_path, SparseNode::new_leaf(current.slice(common + 1..))); + + Ok(LeafUpdateStep::complete_with_insertions( + vec![branch_path, new_leaf_path, existing_leaf_path], + None, + )) } - }; + SparseNode::Extension { key, .. } => { + current.extend(key); + + if !path.starts_with(¤t) { + // find the common prefix + let common = current.common_prefix_length(path); + *key = current.slice(current.len() - key.len()..common); + + // If branch node updates retention is enabled, we need to query the + // extension node child to later set the hash mask for a parent branch node + // correctly. + let reveal_path = retain_updates.then_some(current); + + // create state mask for new branch node + // NOTE: this might overwrite the current extension node + self.nodes.reserve(3); + let branch_path = current.slice(..common); + let new_leaf_path = path.slice(..=common); + let branch = SparseNode::new_split_branch( + current.get_unchecked(common), + path.get_unchecked(common), + ); - self.buffers.rlp_node_stack.push(RlpNodeStackItem { path, rlp_node, node_type }); - trace!( - target: "trie::parallel_sparse", - ?path, - ?node_type, - "Added node to RLP node stack" - ); - } -} + self.nodes.insert(branch_path, branch); -/// Sparse Subtrie Type. -/// -/// Used to determine the type of subtrie a certain path belongs to: -/// - Paths in the range `0x..=0xf` belong to the upper subtrie. -/// - Paths in the range `0x00..` belong to one of the lower subtries. The index of the lower -/// subtrie is determined by the first [`UPPER_TRIE_MAX_DEPTH`] nibbles of the path. -/// -/// There can be at most [`NUM_LOWER_SUBTRIES`] lower subtries. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum SparseSubtrieType { - /// Upper subtrie with paths in the range `0x..=0xf` - Upper, - /// Lower subtrie with paths in the range `0x00..`. Includes the index of the subtrie, - /// according to the path prefix. - Lower(usize), -} + // create new leaf + let new_leaf = SparseNode::new_leaf(path.slice(common + 1..)); + self.nodes.insert(new_leaf_path, new_leaf); -impl SparseSubtrieType { - /// Returns true if a node at a path of the given length would be placed in the upper subtrie. - /// - /// Nodes with paths shorter than [`UPPER_TRIE_MAX_DEPTH`] nibbles belong to the upper subtrie, - /// while longer paths belong to the lower subtries. - pub const fn path_len_is_upper(len: usize) -> bool { - len < UPPER_TRIE_MAX_DEPTH - } + let mut inserted_nodes = vec![branch_path, new_leaf_path]; - /// Returns the type of subtrie based on the given path. - pub fn from_path(path: &Nibbles) -> Self { - if Self::path_len_is_upper(path.len()) { - Self::Upper - } else { - Self::Lower(path_subtrie_index_unchecked(path)) - } - } + // recreate extension to previous child if needed + let key = current.slice(common + 1..); + if !key.is_empty() { + let ext_path = current.slice(..=common); + self.nodes.insert(ext_path, SparseNode::new_ext(key)); + inserted_nodes.push(ext_path); + } - /// Returns the index of the lower subtrie, if it exists. - pub const fn lower_index(&self) -> Option { - match self { - Self::Upper => None, - Self::Lower(index) => Some(*index), + return Ok(LeafUpdateStep::complete_with_insertions(inserted_nodes, reveal_path)) + } + + Ok(LeafUpdateStep::continue_with(current)) + } + SparseNode::Branch { state_mask, .. } => { + let nibble = path.get_unchecked(current.len()); + current.push_unchecked(nibble); + if !state_mask.is_bit_set(nibble) { + state_mask.set_bit(nibble); + let new_leaf = SparseNode::new_leaf(path.slice(current.len()..)); + self.nodes.insert(current, new_leaf); + return Ok(LeafUpdateStep::complete_with_insertions(vec![current], None)) + } + + // If the nibble is set, we can continue traversing the branch. + Ok(LeafUpdateStep::continue_with(current)) + } } } -} -/// Collection of reusable buffers for calculating subtrie hashes. -/// -/// These buffers reduce allocations when computing RLP representations during trie updates. -#[derive(Clone, PartialEq, Eq, Debug, Default)] -pub struct SparseSubtrieBuffers { - /// Stack of RLP node paths - path_stack: Vec, - /// Stack of RLP nodes - rlp_node_stack: Vec, - /// Reusable branch child path - branch_child_buf: SmallVec<[Nibbles; 16]>, - /// Reusable branch value stack - branch_value_stack_buf: SmallVec<[RlpNode; 16]>, - /// Reusable RLP buffer - rlp_buf: Vec, -} + /// Internal implementation of the method of the same name on `ParallelSparseTrie`. + fn reveal_node( + &mut self, + path: Nibbles, + node: &TrieNode, + masks: TrieMasks, + ) -> SparseTrieResult<()> { + debug_assert!(path.starts_with(&self.path)); -/// RLP node path stack item. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct RlpNodePathStackItem { - /// Path to the node. - pub path: Nibbles, - /// Whether the path is in the prefix set. If [`None`], then unknown yet. - pub is_in_prefix_set: Option, -} + // If the node is already revealed and it's not a hash node, do nothing. + if self.nodes.get(&path).is_some_and(|node| !node.is_hash()) { + return Ok(()) + } -/// Changed subtrie. -#[derive(Debug)] -struct ChangedSubtrie { - /// Lower subtrie index in the range [0, [`NUM_LOWER_SUBTRIES`]). - index: usize, - /// Changed subtrie - subtrie: Box, - /// Prefix set of keys that belong to the subtrie. - #[allow(unused)] - prefix_set: PrefixSet, + match node { + TrieNode::EmptyRoot => { + // For an empty root, ensure that we are at the root path, and at the upper subtrie. + debug_assert!(path.is_empty()); + debug_assert!(self.path.is_empty()); + self.nodes.insert(path, SparseNode::Empty); + } + TrieNode::Branch(branch) => { + // For a branch node, iterate over all potential children + let mut stack_ptr = branch.as_ref().first_child_index(); + for idx in CHILD_INDEX_RANGE { + if branch.state_mask.is_bit_set(idx) { + let mut child_path = path; + child_path.push_unchecked(idx); + if Self::is_child_same_level(&path, &child_path) { + // Reveal each child node or hash it has, but only if the child is on + // the same level as the parent. + self.reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; + } + stack_ptr += 1; + } + } + // Update the branch node entry in the nodes map, handling cases where a blinded + // node is now replaced with a revealed node. + match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + // Replace a hash node with a fully revealed branch node. + SparseNode::Hash(hash) => { + entry.insert(SparseNode::Branch { + state_mask: branch.state_mask, + // Memoize the hash of a previously blinded node in a new branch + // node. + hash: Some(*hash), + store_in_db_trie: Some( + masks.hash_mask.is_some_and(|mask| !mask.is_empty()) || + masks.tree_mask.is_some_and(|mask| !mask.is_empty()), + ), + }); + } + // Branch node already exists, or an extension node was placed where a + // branch node was before. + SparseNode::Branch { .. } | SparseNode::Extension { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { + return Err(SparseTrieErrorKind::Reveal { + path: *entry.key(), + node: Box::new(node.clone()), + } + .into()) + } + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::new_branch(branch.state_mask)); + } + } + } + TrieNode::Extension(ext) => match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + // Replace a hash node with a revealed extension node. + SparseNode::Hash(hash) => { + let mut child_path = *entry.key(); + child_path.extend(&ext.key); + entry.insert(SparseNode::Extension { + key: ext.key, + // Memoize the hash of a previously blinded node in a new extension + // node. + hash: Some(*hash), + store_in_db_trie: None, + }); + if Self::is_child_same_level(&path, &child_path) { + self.reveal_node_or_hash(child_path, &ext.child)?; + } + } + // Extension node already exists, or an extension node was placed where a branch + // node was before. + SparseNode::Extension { .. } | SparseNode::Branch { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { + return Err(SparseTrieErrorKind::Reveal { + path: *entry.key(), + node: Box::new(node.clone()), + } + .into()) + } + }, + Entry::Vacant(entry) => { + let mut child_path = *entry.key(); + child_path.extend(&ext.key); + entry.insert(SparseNode::new_ext(ext.key)); + if Self::is_child_same_level(&path, &child_path) { + self.reveal_node_or_hash(child_path, &ext.child)?; + } + } + }, + TrieNode::Leaf(leaf) => match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + // Replace a hash node with a revealed leaf node and store leaf node value. + SparseNode::Hash(hash) => { + let mut full = *entry.key(); + full.extend(&leaf.key); + self.inner.values.insert(full, leaf.value.clone()); + entry.insert(SparseNode::Leaf { + key: leaf.key, + // Memoize the hash of a previously blinded node in a new leaf + // node. + hash: Some(*hash), + }); + } + // Leaf node already exists. + SparseNode::Leaf { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | + SparseNode::Extension { .. } | + SparseNode::Branch { .. }) => { + return Err(SparseTrieErrorKind::Reveal { + path: *entry.key(), + node: Box::new(node.clone()), + } + .into()) + } + }, + Entry::Vacant(entry) => { + let mut full = *entry.key(); + full.extend(&leaf.key); + entry.insert(SparseNode::new_leaf(leaf.key)); + self.inner.values.insert(full, leaf.value.clone()); + } + }, + } + + Ok(()) + } + + /// Reveals either a node or its hash placeholder based on the provided child data. + /// + /// When traversing the trie, we often encounter references to child nodes that + /// are either directly embedded or represented by their hash. This method + /// handles both cases: + /// + /// 1. If the child data represents a hash (32+1=33 bytes), store it as a hash node + /// 2. Otherwise, decode the data as a [`TrieNode`] and recursively reveal it using + /// `reveal_node` + /// + /// # Returns + /// + /// Returns `Ok(())` if successful, or an error if the node cannot be revealed. + /// + /// # Error Handling + /// + /// Will error if there's a conflict between a new hash node and an existing one + /// at the same path + fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { + if child.len() == B256::len_bytes() + 1 { + let hash = B256::from_slice(&child[1..]); + match self.nodes.entry(path) { + Entry::Occupied(entry) => match entry.get() { + // Hash node with a different hash can't be handled. + SparseNode::Hash(previous_hash) if previous_hash != &hash => { + return Err(SparseTrieErrorKind::Reveal { + path: *entry.key(), + node: Box::new(SparseNode::Hash(hash)), + } + .into()) + } + _ => {} + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::Hash(hash)); + } + } + return Ok(()) + } + + self.reveal_node(path, &TrieNode::decode(&mut &child[..])?, TrieMasks::none()) + } + + /// Recalculates and updates the RLP hashes for the changed nodes in this subtrie. + /// + /// The function starts from the subtrie root, traverses down to leaves, and then calculates + /// the hashes from leaves back up to the root. It uses a stack from [`SparseSubtrieBuffers`] to + /// track the traversal and accumulate RLP encodings. + /// + /// # Parameters + /// + /// - `prefix_set`: The set of trie paths whose nodes have changed. + /// - `update_actions`: A buffer which `SparseTrieUpdatesAction`s will be written to in the + /// event that any changes to the top-level updates are required. If None then update + /// retention is disabled. + /// - `branch_node_tree_masks`: The tree masks for branch nodes + /// - `branch_node_hash_masks`: The hash masks for branch nodes + /// + /// # Returns + /// + /// A tuple containing the root node of the updated subtrie. + /// + /// # Panics + /// + /// If the node at the root path does not exist. + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret)] + fn update_hashes( + &mut self, + prefix_set: &mut PrefixSet, + update_actions: &mut Option>, + branch_node_tree_masks: &HashMap, + branch_node_hash_masks: &HashMap, + ) -> RlpNode { + trace!(target: "trie::parallel_sparse", "Updating subtrie hashes"); + + debug_assert!(prefix_set.iter().all(|path| path.starts_with(&self.path))); + + debug_assert!(self.inner.buffers.path_stack.is_empty()); + self.inner + .buffers + .path_stack + .push(RlpNodePathStackItem { path: self.path, is_in_prefix_set: None }); + + while let Some(stack_item) = self.inner.buffers.path_stack.pop() { + let path = stack_item.path; + let node = self + .nodes + .get_mut(&path) + .unwrap_or_else(|| panic!("node at path {path:?} does not exist")); + + self.inner.rlp_node( + prefix_set, + update_actions, + stack_item, + node, + branch_node_tree_masks, + branch_node_hash_masks, + ); + } + + debug_assert_eq!(self.inner.buffers.rlp_node_stack.len(), 1); + self.inner.buffers.rlp_node_stack.pop().unwrap().rlp_node + } + + /// Removes all nodes and values from the subtrie, resetting it to a blank state + /// with only an empty root node. This is used when a storage root is deleted. + fn wipe(&mut self) { + self.nodes = HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]); + self.inner.clear(); + } + + /// Clears the subtrie, keeping the data structures allocated. + pub(crate) fn clear(&mut self) { + self.nodes.clear(); + self.inner.clear(); + } } -/// Convert first [`UPPER_TRIE_MAX_DEPTH`] nibbles of the path into a lower subtrie index in the -/// range [0, [`NUM_LOWER_SUBTRIES`]). -/// -/// # Panics -/// -/// If the path is shorter than [`UPPER_TRIE_MAX_DEPTH`] nibbles. -fn path_subtrie_index_unchecked(path: &Nibbles) -> usize { - debug_assert_eq!(UPPER_TRIE_MAX_DEPTH, 2); - path.get_byte_unchecked(0) as usize +/// Helper type for [`SparseSubtrie`] to mutably access only a subset of fields from the original +/// struct. +#[derive(Clone, PartialEq, Eq, Debug, Default)] +struct SparseSubtrieInner { + /// Map from leaf key paths to their values. + /// All values are stored here instead of directly in leaf nodes. + values: HashMap>, + /// Reusable buffers for [`SparseSubtrie::update_hashes`]. + buffers: SparseSubtrieBuffers, } -#[cfg(test)] -mod tests { - use super::{ - path_subtrie_index_unchecked, ParallelSparseTrie, SparseSubtrie, SparseSubtrieType, - }; - use crate::trie::ChangedSubtrie; - use alloy_primitives::{ - map::{foldhash::fast::RandomState, B256Set, DefaultHashBuilder, HashMap}, - B256, - }; +impl SparseSubtrieInner { + /// Computes the RLP encoding and its hash for a single (trie node)[`SparseNode`]. + /// + /// # Deferred Processing + /// + /// When an extension or a branch node depends on child nodes that haven't been computed yet, + /// the function pushes the current node back onto the path stack along with its children, + /// then returns early. This allows the iterative algorithm to process children first before + /// retrying the parent. + /// + /// # Parameters + /// + /// - `prefix_set`: Set of prefixes (key paths) that have been marked as updated + /// - `update_actions`: A buffer which `SparseTrieUpdatesAction`s will be written to in the + /// event that any changes to the top-level updates are required. If None then update + /// retention is disabled. + /// - `stack_item`: The stack item to process + /// - `node`: The sparse node to process (will be mutated to update hash) + /// - `branch_node_tree_masks`: The tree masks for branch nodes + /// - `branch_node_hash_masks`: The hash masks for branch nodes + /// + /// # Side Effects + /// + /// - Updates the node's hash field after computing RLP + /// - Pushes nodes to [`SparseSubtrieBuffers::path_stack`] to manage traversal + /// - May push items onto the path stack for deferred processing + /// + /// # Exit condition + /// + /// Once all nodes have been processed and all RLPs and hashes calculated, pushes the root node + /// onto the [`SparseSubtrieBuffers::rlp_node_stack`] and exits. + fn rlp_node( + &mut self, + prefix_set: &mut PrefixSet, + update_actions: &mut Option>, + mut stack_item: RlpNodePathStackItem, + node: &mut SparseNode, + branch_node_tree_masks: &HashMap, + branch_node_hash_masks: &HashMap, + ) { + let path = stack_item.path; + trace!( + target: "trie::parallel_sparse", + ?path, + ?node, + "Calculating node RLP" + ); + + // Check if the path is in the prefix set. + // First, check the cached value. If it's `None`, then check the prefix set, and update + // the cached value. + let mut prefix_set_contains = |path: &Nibbles| { + *stack_item.is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)) + }; + + let (rlp_node, node_type) = match node { + SparseNode::Empty => (RlpNode::word_rlp(&EMPTY_ROOT_HASH), SparseNodeType::Empty), + SparseNode::Hash(hash) => { + // Return pre-computed hash of a blinded node immediately + (RlpNode::word_rlp(hash), SparseNodeType::Hash) + } + SparseNode::Leaf { key, hash } => { + let mut path = path; + path.extend(key); + let value = self.values.get(&path); + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path) || value.is_none()) + { + // If the node hash is already computed, and either the node path is not in + // the prefix set or the leaf doesn't belong to the current trie (its value is + // absent), return the pre-computed hash + (RlpNode::word_rlp(&hash), SparseNodeType::Leaf) + } else { + // Encode the leaf node and update its hash + let value = self.values.get(&path).unwrap(); + self.buffers.rlp_buf.clear(); + let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.buffers.rlp_buf); + *hash = rlp_node.as_hash(); + (rlp_node, SparseNodeType::Leaf) + } + } + SparseNode::Extension { key, hash, store_in_db_trie } => { + let mut child_path = path; + child_path.extend(key); + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + // If the node hash is already computed, and the node path is not in + // the prefix set, return the pre-computed hash + ( + RlpNode::word_rlp(&hash), + SparseNodeType::Extension { store_in_db_trie: Some(store_in_db_trie) }, + ) + } else if self.buffers.rlp_node_stack.last().is_some_and(|e| e.path == child_path) { + // Top of the stack has the child node, we can encode the extension node and + // update its hash + let RlpNodeStackItem { path: _, rlp_node: child, node_type: child_node_type } = + self.buffers.rlp_node_stack.pop().unwrap(); + self.buffers.rlp_buf.clear(); + let rlp_node = + ExtensionNodeRef::new(key, &child).rlp(&mut self.buffers.rlp_buf); + *hash = rlp_node.as_hash(); + + let store_in_db_trie_value = child_node_type.store_in_db_trie(); + + trace!( + target: "trie::parallel_sparse", + ?path, + ?child_path, + ?child_node_type, + "Extension node" + ); + + *store_in_db_trie = store_in_db_trie_value; + + ( + rlp_node, + SparseNodeType::Extension { + // Inherit the `store_in_db_trie` flag from the child node, which is + // always the branch node + store_in_db_trie: store_in_db_trie_value, + }, + ) + } else { + // Need to defer processing until child is computed, on the next + // invocation update the node's hash. + self.buffers.path_stack.extend([ + RlpNodePathStackItem { + path, + is_in_prefix_set: Some(prefix_set_contains(&path)), + }, + RlpNodePathStackItem { path: child_path, is_in_prefix_set: None }, + ]); + return + } + } + SparseNode::Branch { state_mask, hash, store_in_db_trie } => { + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + // If the node hash is already computed, and the node path is not in + // the prefix set, return the pre-computed hash + self.buffers.rlp_node_stack.push(RlpNodeStackItem { + path, + rlp_node: RlpNode::word_rlp(&hash), + node_type: SparseNodeType::Branch { + store_in_db_trie: Some(store_in_db_trie), + }, + }); + return + } + + let retain_updates = update_actions.is_some() && prefix_set_contains(&path); + + self.buffers.branch_child_buf.clear(); + // Walk children in a reverse order from `f` to `0`, so we pop the `0` first + // from the stack and keep walking in the sorted order. + for bit in CHILD_INDEX_RANGE.rev() { + if state_mask.is_bit_set(bit) { + let mut child = path; + child.push_unchecked(bit); + self.buffers.branch_child_buf.push(child); + } + } + + self.buffers + .branch_value_stack_buf + .resize(self.buffers.branch_child_buf.len(), Default::default()); + let mut added_children = false; + + let mut tree_mask = TrieMask::default(); + let mut hash_mask = TrieMask::default(); + let mut hashes = Vec::new(); + for (i, child_path) in self.buffers.branch_child_buf.iter().enumerate() { + if self.buffers.rlp_node_stack.last().is_some_and(|e| &e.path == child_path) { + let RlpNodeStackItem { + path: _, + rlp_node: child, + node_type: child_node_type, + } = self.buffers.rlp_node_stack.pop().unwrap(); + + // Update the masks only if we need to retain trie updates + if retain_updates { + // SAFETY: it's a child, so it's never empty + let last_child_nibble = child_path.last().unwrap(); + + // Determine whether we need to set trie mask bit. + let should_set_tree_mask_bit = if let Some(store_in_db_trie) = + child_node_type.store_in_db_trie() + { + // A branch or an extension node explicitly set the + // `store_in_db_trie` flag + store_in_db_trie + } else { + // A blinded node has the tree mask bit set + child_node_type.is_hash() && + branch_node_tree_masks + .get(&path) + .is_some_and(|mask| mask.is_bit_set(last_child_nibble)) + }; + if should_set_tree_mask_bit { + tree_mask.set_bit(last_child_nibble); + } + + // Set the hash mask. If a child node is a revealed branch node OR + // is a blinded node that has its hash mask bit set according to the + // database, set the hash mask bit and save the hash. + let hash = child.as_hash().filter(|_| { + child_node_type.is_branch() || + (child_node_type.is_hash() && + branch_node_hash_masks.get(&path).is_some_and( + |mask| mask.is_bit_set(last_child_nibble), + )) + }); + if let Some(hash) = hash { + hash_mask.set_bit(last_child_nibble); + hashes.push(hash); + } + } + + // Insert children in the resulting buffer in a normal order, + // because initially we iterated in reverse. + // SAFETY: i < len and len is never 0 + let original_idx = self.buffers.branch_child_buf.len() - i - 1; + self.buffers.branch_value_stack_buf[original_idx] = child; + added_children = true; + } else { + // Need to defer processing until children are computed, on the next + // invocation update the node's hash. + debug_assert!(!added_children); + self.buffers.path_stack.push(RlpNodePathStackItem { + path, + is_in_prefix_set: Some(prefix_set_contains(&path)), + }); + self.buffers.path_stack.extend( + self.buffers + .branch_child_buf + .drain(..) + .map(|path| RlpNodePathStackItem { path, is_in_prefix_set: None }), + ); + return + } + } + + trace!( + target: "trie::parallel_sparse", + ?path, + ?tree_mask, + ?hash_mask, + "Branch node masks" + ); + + // Top of the stack has all children node, we can encode the branch node and + // update its hash + self.buffers.rlp_buf.clear(); + let branch_node_ref = + BranchNodeRef::new(&self.buffers.branch_value_stack_buf, *state_mask); + let rlp_node = branch_node_ref.rlp(&mut self.buffers.rlp_buf); + *hash = rlp_node.as_hash(); + + // Save a branch node update only if it's not a root node, and we need to + // persist updates. + let store_in_db_trie_value = if let Some(update_actions) = + update_actions.as_mut().filter(|_| retain_updates && !path.is_empty()) + { + let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); + if store_in_db_trie { + // Store in DB trie if there are either any children that are stored in + // the DB trie, or any children represent hashed values + hashes.reverse(); + let branch_node = BranchNodeCompact::new( + *state_mask, + tree_mask, + hash_mask, + hashes, + hash.filter(|_| path.is_empty()), + ); + update_actions + .push(SparseTrieUpdatesAction::InsertUpdated(path, branch_node)); + } else if branch_node_tree_masks.get(&path).is_some_and(|mask| !mask.is_empty()) || + branch_node_hash_masks.get(&path).is_some_and(|mask| !mask.is_empty()) + { + // If new tree and hash masks are empty, but previously they weren't, we + // need to remove the node update and add the node itself to the list of + // removed nodes. + update_actions.push(SparseTrieUpdatesAction::InsertRemoved(path)); + } else if branch_node_tree_masks.get(&path).is_none_or(|mask| mask.is_empty()) && + branch_node_hash_masks.get(&path).is_none_or(|mask| mask.is_empty()) + { + // If new tree and hash masks are empty, and they were previously empty + // as well, we need to remove the node update. + update_actions.push(SparseTrieUpdatesAction::RemoveUpdated(path)); + } + + store_in_db_trie + } else { + false + }; + *store_in_db_trie = Some(store_in_db_trie_value); + + ( + rlp_node, + SparseNodeType::Branch { store_in_db_trie: Some(store_in_db_trie_value) }, + ) + } + }; + + self.buffers.rlp_node_stack.push(RlpNodeStackItem { path, rlp_node, node_type }); + trace!( + target: "trie::parallel_sparse", + ?path, + ?node_type, + "Added node to RLP node stack" + ); + } + + /// Clears the subtrie, keeping the data structures allocated. + fn clear(&mut self) { + self.values.clear(); + self.buffers.clear(); + } +} + +/// Represents the outcome of processing a node during leaf insertion +#[derive(Clone, Debug, PartialEq, Eq, Default)] +pub enum LeafUpdateStep { + /// Continue traversing to the next node + Continue { + /// The next node path to process + next_node: Nibbles, + }, + /// Update is complete with nodes inserted + Complete { + /// The node paths that were inserted during this step + inserted_nodes: Vec, + /// Path to a node which may need to be revealed + reveal_path: Option, + }, + /// The node was not found + #[default] + NodeNotFound, +} + +impl LeafUpdateStep { + /// Creates a step to continue with the next node + pub const fn continue_with(next_node: Nibbles) -> Self { + Self::Continue { next_node } + } + + /// Creates a step indicating completion with inserted nodes + pub const fn complete_with_insertions( + inserted_nodes: Vec, + reveal_path: Option, + ) -> Self { + Self::Complete { inserted_nodes, reveal_path } + } +} + +/// Sparse Subtrie Type. +/// +/// Used to determine the type of subtrie a certain path belongs to: +/// - Paths in the range `0x..=0xf` belong to the upper subtrie. +/// - Paths in the range `0x00..` belong to one of the lower subtries. The index of the lower +/// subtrie is determined by the first [`UPPER_TRIE_MAX_DEPTH`] nibbles of the path. +/// +/// There can be at most [`NUM_LOWER_SUBTRIES`] lower subtries. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum SparseSubtrieType { + /// Upper subtrie with paths in the range `0x..=0xf` + Upper, + /// Lower subtrie with paths in the range `0x00..`. Includes the index of the subtrie, + /// according to the path prefix. + Lower(usize), +} + +impl SparseSubtrieType { + /// Returns true if a node at a path of the given length would be placed in the upper subtrie. + /// + /// Nodes with paths shorter than [`UPPER_TRIE_MAX_DEPTH`] nibbles belong to the upper subtrie, + /// while longer paths belong to the lower subtries. + pub const fn path_len_is_upper(len: usize) -> bool { + len < UPPER_TRIE_MAX_DEPTH + } + + /// Returns the type of subtrie based on the given path. + pub fn from_path(path: &Nibbles) -> Self { + if Self::path_len_is_upper(path.len()) { + Self::Upper + } else { + Self::Lower(path_subtrie_index_unchecked(path)) + } + } + + /// Returns the index of the lower subtrie, if it exists. + pub const fn lower_index(&self) -> Option { + match self { + Self::Upper => None, + Self::Lower(index) => Some(*index), + } + } +} + +impl Ord for SparseSubtrieType { + /// Orders two [`SparseSubtrieType`]s such that `Upper` is less than `Lower(_)`, and `Lower`s + /// are ordered by their index. + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (Self::Upper, Self::Upper) => Ordering::Equal, + (Self::Upper, Self::Lower(_)) => Ordering::Less, + (Self::Lower(_), Self::Upper) => Ordering::Greater, + (Self::Lower(idx_a), Self::Lower(idx_b)) if idx_a == idx_b => Ordering::Equal, + (Self::Lower(idx_a), Self::Lower(idx_b)) => idx_a.cmp(idx_b), + } + } +} + +impl PartialOrd for SparseSubtrieType { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +/// Collection of reusable buffers for calculating subtrie hashes. +/// +/// These buffers reduce allocations when computing RLP representations during trie updates. +#[derive(Clone, PartialEq, Eq, Debug, Default)] +pub struct SparseSubtrieBuffers { + /// Stack of RLP node paths + path_stack: Vec, + /// Stack of RLP nodes + rlp_node_stack: Vec, + /// Reusable branch child path + branch_child_buf: SmallVec<[Nibbles; 16]>, + /// Reusable branch value stack + branch_value_stack_buf: SmallVec<[RlpNode; 16]>, + /// Reusable RLP buffer + rlp_buf: Vec, +} + +impl SparseSubtrieBuffers { + /// Clears all buffers. + fn clear(&mut self) { + self.path_stack.clear(); + self.rlp_node_stack.clear(); + self.branch_child_buf.clear(); + self.branch_value_stack_buf.clear(); + self.rlp_buf.clear(); + } +} + +/// RLP node path stack item. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct RlpNodePathStackItem { + /// Path to the node. + pub path: Nibbles, + /// Whether the path is in the prefix set. If [`None`], then unknown yet. + pub is_in_prefix_set: Option, +} + +/// Changed subtrie. +#[derive(Debug)] +struct ChangedSubtrie { + /// Lower subtrie index in the range [0, [`NUM_LOWER_SUBTRIES`]). + index: usize, + /// Changed subtrie + subtrie: Box, + /// Prefix set of keys that belong to the subtrie. + prefix_set: PrefixSet, + /// Reusable buffer for collecting [`SparseTrieUpdatesAction`]s during computations. Will be + /// None if update retention is disabled. + update_actions_buf: Option>, +} + +/// Convert first [`UPPER_TRIE_MAX_DEPTH`] nibbles of the path into a lower subtrie index in the +/// range [0, [`NUM_LOWER_SUBTRIES`]). +/// +/// # Panics +/// +/// If the path is shorter than [`UPPER_TRIE_MAX_DEPTH`] nibbles. +fn path_subtrie_index_unchecked(path: &Nibbles) -> usize { + debug_assert_eq!(UPPER_TRIE_MAX_DEPTH, 2); + path.get_byte_unchecked(0) as usize +} + +/// Used by lower subtries to communicate updates to the the top-level [`SparseTrieUpdates`] set. +#[derive(Clone, Debug, Eq, PartialEq)] +enum SparseTrieUpdatesAction { + /// Remove the path from the `updated_nodes`, if it was present, and add it to `removed_nodes`. + InsertRemoved(Nibbles), + /// Remove the path from the `updated_nodes`, if it was present, leaving `removed_nodes` + /// unaffected. + RemoveUpdated(Nibbles), + /// Insert the branch node into `updated_nodes`. + InsertUpdated(Nibbles, BranchNodeCompact), +} + +#[cfg(test)] +mod tests { + use super::{ + path_subtrie_index_unchecked, LowerSparseSubtrie, ParallelSparseTrie, SparseSubtrie, + SparseSubtrieType, + }; + use crate::trie::ChangedSubtrie; + use alloy_primitives::{ + b256, hex, + map::{foldhash::fast::RandomState, B256Set, DefaultHashBuilder, HashMap}, + B256, U256, + }; use alloy_rlp::{Decodable, Encodable}; use alloy_trie::{BranchNodeCompact, Nibbles}; use assert_matches::assert_matches; use itertools::Itertools; - use reth_execution_errors::SparseTrieError; + use proptest::{prelude::*, sample::SizeRange}; + use proptest_arbitrary_interop::arb; + use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_primitives_traits::Account; + use reth_provider::{test_utils::create_test_provider_factory, TrieWriter}; use reth_trie::{ hashed_cursor::{noop::NoopHashedAccountCursor, HashedPostStateAccountCursor}, node_iter::{TrieElement, TrieNodeIter}, - trie_cursor::{noop::NoopAccountTrieCursor, TrieCursor}, + trie_cursor::{noop::NoopAccountTrieCursor, TrieCursor, TrieCursorFactory}, walker::TrieWalker, + HashedPostState, }; use reth_trie_common::{ prefix_set::PrefixSetMut, proof::{ProofNodes, ProofRetainer}, updates::TrieUpdates, - BranchNode, ExtensionNode, HashBuilder, HashedPostState, LeafNode, RlpNode, TrieMask, - TrieNode, EMPTY_ROOT_HASH, + BranchNode, ExtensionNode, HashBuilder, LeafNode, RlpNode, TrieMask, TrieNode, + EMPTY_ROOT_HASH, }; + use reth_trie_db::DatabaseTrieCursorFactory; use reth_trie_sparse::{ - blinded::{BlindedProvider, RevealedNode}, - SparseNode, TrieMasks, + provider::{DefaultTrieNodeProvider, RevealedNode, TrieNodeProvider}, + LeafLookup, LeafLookupError, RevealedSparseNode, SerialSparseTrie, SparseNode, + SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; + use std::collections::{BTreeMap, BTreeSet}; + + /// Pad nibbles to the length of a B256 hash with zeros on the right. + fn pad_nibbles_right(mut nibbles: Nibbles) -> Nibbles { + nibbles.extend(&Nibbles::from_nibbles_unchecked(vec![ + 0; + B256::len_bytes() * 2 - nibbles.len() + ])); + nibbles + } + + /// Mock trie node provider for testing that allows pre-setting nodes at specific paths. + /// + /// This provider can be used in tests to simulate trie nodes that need to be revealed + /// during trie operations, particularly when collapsing branch nodes during leaf removal. + #[derive(Debug, Clone)] + struct MockTrieNodeProvider { + /// Mapping from path to revealed node data + nodes: HashMap, + } + + impl MockTrieNodeProvider { + /// Creates a new empty mock provider + fn new() -> Self { + Self { nodes: HashMap::with_hasher(RandomState::default()) } + } + + /// Adds a revealed node at the specified path + fn add_revealed_node(&mut self, path: Nibbles, node: RevealedNode) { + self.nodes.insert(path, node); + } + } + + impl TrieNodeProvider for MockTrieNodeProvider { + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { + Ok(self.nodes.get(path).cloned()) + } + } + + fn create_account(nonce: u64) -> Account { + Account { nonce, ..Default::default() } + } + + fn encode_account_value(nonce: u64) -> Vec { + let account = Account { nonce, ..Default::default() }; + let trie_account = account.into_trie_account(EMPTY_ROOT_HASH); + let mut buf = Vec::new(); + trie_account.encode(&mut buf); + buf + } + + /// Test context that provides helper methods for trie testing + #[derive(Default)] + struct ParallelSparseTrieTestContext; + + impl ParallelSparseTrieTestContext { + /// Assert that a lower subtrie exists at the given path + fn assert_subtrie_exists(&self, trie: &ParallelSparseTrie, path: &Nibbles) { + let idx = path_subtrie_index_unchecked(path); + assert!( + trie.lower_subtries[idx].as_revealed_ref().is_some(), + "Expected lower subtrie at path {path:?} to exist", + ); + } + + /// Get a lower subtrie, panicking if it doesn't exist + fn get_subtrie<'a>( + &self, + trie: &'a ParallelSparseTrie, + path: &Nibbles, + ) -> &'a SparseSubtrie { + let idx = path_subtrie_index_unchecked(path); + trie.lower_subtries[idx] + .as_revealed_ref() + .unwrap_or_else(|| panic!("Lower subtrie at path {path:?} should exist")) + } + + /// Assert that a lower subtrie has a specific path field value + fn assert_subtrie_path( + &self, + trie: &ParallelSparseTrie, + subtrie_prefix: impl AsRef<[u8]>, + expected_path: impl AsRef<[u8]>, + ) { + let subtrie_prefix = Nibbles::from_nibbles(subtrie_prefix); + let expected_path = Nibbles::from_nibbles(expected_path); + let idx = path_subtrie_index_unchecked(&subtrie_prefix); + + let subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap_or_else(|| { + panic!("Lower subtrie at prefix {subtrie_prefix:?} should exist") + }); + + assert_eq!( + subtrie.path, expected_path, + "Subtrie at prefix {subtrie_prefix:?} should have path {expected_path:?}, but has {:?}", + subtrie.path + ); + } + + /// Create test leaves with consecutive account values + fn create_test_leaves(&self, paths: &[&[u8]]) -> Vec<(Nibbles, Vec)> { + paths + .iter() + .enumerate() + .map(|(i, path)| (Nibbles::from_nibbles(path), encode_account_value(i as u64 + 1))) + .collect() + } + + /// Create a single test leaf with the given path and value nonce + fn create_test_leaf(&self, path: impl AsRef<[u8]>, value_nonce: u64) -> (Nibbles, Vec) { + (Nibbles::from_nibbles(path), encode_account_value(value_nonce)) + } + + /// Update multiple leaves in the trie + fn update_leaves( + &self, + trie: &mut ParallelSparseTrie, + leaves: impl IntoIterator)>, + ) { + for (path, value) in leaves { + trie.update_leaf(path, value, DefaultTrieNodeProvider).unwrap(); + } + } + + /// Create an assertion builder for a subtrie + fn assert_subtrie<'a>( + &self, + trie: &'a ParallelSparseTrie, + path: Nibbles, + ) -> SubtrieAssertion<'a> { + self.assert_subtrie_exists(trie, &path); + let subtrie = self.get_subtrie(trie, &path); + SubtrieAssertion::new(subtrie) + } + + /// Create an assertion builder for the upper subtrie + fn assert_upper_subtrie<'a>(&self, trie: &'a ParallelSparseTrie) -> SubtrieAssertion<'a> { + SubtrieAssertion::new(&trie.upper_subtrie) + } + + /// Assert the root, trie updates, and nodes against the hash builder output. + fn assert_with_hash_builder( + &self, + trie: &mut ParallelSparseTrie, + hash_builder_root: B256, + hash_builder_updates: TrieUpdates, + hash_builder_proof_nodes: ProofNodes, + ) { + assert_eq!(trie.root(), hash_builder_root); + pretty_assertions::assert_eq!( + BTreeMap::from_iter(trie.updates_ref().updated_nodes.clone()), + BTreeMap::from_iter(hash_builder_updates.account_nodes) + ); + assert_eq_parallel_sparse_trie_proof_nodes(trie, hash_builder_proof_nodes); + } + } + + /// Assertion builder for subtrie structure + struct SubtrieAssertion<'a> { + subtrie: &'a SparseSubtrie, + } + + impl<'a> SubtrieAssertion<'a> { + fn new(subtrie: &'a SparseSubtrie) -> Self { + Self { subtrie } + } + + fn has_branch(self, path: &Nibbles, expected_mask_bits: &[u8]) -> Self { + match self.subtrie.nodes.get(path) { + Some(SparseNode::Branch { state_mask, .. }) => { + for bit in expected_mask_bits { + assert!( + state_mask.is_bit_set(*bit), + "Expected branch at {path:?} to have bit {bit} set, instead mask is: {state_mask:?}", + ); + } + } + node => panic!("Expected branch node at {path:?}, found {node:?}"), + } + self + } + + fn has_leaf(self, path: &Nibbles, expected_key: &Nibbles) -> Self { + match self.subtrie.nodes.get(path) { + Some(SparseNode::Leaf { key, .. }) => { + assert_eq!( + *key, *expected_key, + "Expected leaf at {path:?} to have key {expected_key:?}, found {key:?}", + ); + } + node => panic!("Expected leaf node at {path:?}, found {node:?}"), + } + self + } + + fn has_extension(self, path: &Nibbles, expected_key: &Nibbles) -> Self { + match self.subtrie.nodes.get(path) { + Some(SparseNode::Extension { key, .. }) => { + assert_eq!( + *key, *expected_key, + "Expected extension at {path:?} to have key {expected_key:?}, found {key:?}", + ); + } + node => panic!("Expected extension node at {path:?}, found {node:?}"), + } + self + } + + fn has_hash(self, path: &Nibbles, expected_hash: &B256) -> Self { + match self.subtrie.nodes.get(path) { + Some(SparseNode::Hash(hash)) => { + assert_eq!( + *hash, *expected_hash, + "Expected hash at {path:?} to be {expected_hash:?}, found {hash:?}", + ); + } + node => panic!("Expected hash node at {path:?}, found {node:?}"), + } + self + } + + fn has_value(self, path: &Nibbles, expected_value: &[u8]) -> Self { + let actual = self.subtrie.inner.values.get(path); + assert_eq!( + actual.map(|v| v.as_slice()), + Some(expected_value), + "Expected value at {path:?} to be {expected_value:?}, found {actual:?}", + ); + self + } + + fn has_no_value(self, path: &Nibbles) -> Self { + let actual = self.subtrie.inner.values.get(path); + assert!(actual.is_none(), "Expected no value at {path:?}, but found {actual:?}"); + self + } + } + + fn create_leaf_node(key: impl AsRef<[u8]>, value_nonce: u64) -> TrieNode { + TrieNode::Leaf(LeafNode::new(Nibbles::from_nibbles(key), encode_account_value(value_nonce))) + } + + fn create_extension_node(key: impl AsRef<[u8]>, child_hash: B256) -> TrieNode { + TrieNode::Extension(ExtensionNode::new( + Nibbles::from_nibbles(key), + RlpNode::word_rlp(&child_hash), + )) + } + + fn create_branch_node_with_children( + children_indices: &[u8], + child_hashes: impl IntoIterator, + ) -> TrieNode { + let mut stack = Vec::new(); + let mut state_mask = TrieMask::default(); + + for (&idx, hash) in children_indices.iter().zip(child_hashes.into_iter()) { + state_mask.set_bit(idx); + stack.push(hash); + } + + TrieNode::Branch(BranchNode::new(stack, state_mask)) + } + + /// Calculate the state root by feeding the provided state to the hash builder and retaining the + /// proofs for the provided targets. + /// + /// Returns the state root and the retained proof nodes. + fn run_hash_builder( + state: impl IntoIterator + Clone, + trie_cursor: impl TrieCursor, + destroyed_accounts: B256Set, + proof_targets: impl IntoIterator, + ) -> (B256, TrieUpdates, ProofNodes, HashMap, HashMap) + { + let mut account_rlp = Vec::new(); + + let mut hash_builder = HashBuilder::default() + .with_updates(true) + .with_proof_retainer(ProofRetainer::from_iter(proof_targets)); + + let mut prefix_set = PrefixSetMut::default(); + prefix_set.extend_keys(state.clone().into_iter().map(|(nibbles, _)| nibbles)); + prefix_set.extend_keys(destroyed_accounts.iter().map(Nibbles::unpack)); + let walker = + TrieWalker::state_trie(trie_cursor, prefix_set.freeze()).with_deletions_retained(true); + let hashed_post_state = HashedPostState::default() + .with_accounts(state.into_iter().map(|(nibbles, account)| { + (nibbles.pack().into_inner().unwrap().into(), Some(account)) + })) + .into_sorted(); + let mut node_iter = TrieNodeIter::state_trie( + walker, + HashedPostStateAccountCursor::new( + NoopHashedAccountCursor::default(), + hashed_post_state.accounts(), + ), + ); + + while let Some(node) = node_iter.try_next().unwrap() { + match node { + TrieElement::Branch(branch) => { + hash_builder.add_branch(branch.key, branch.value, branch.children_are_in_trie); + } + TrieElement::Leaf(key, account) => { + let account = account.into_trie_account(EMPTY_ROOT_HASH); + account.encode(&mut account_rlp); + + hash_builder.add_leaf(Nibbles::unpack(key), &account_rlp); + account_rlp.clear(); + } + } + } + let root = hash_builder.root(); + let proof_nodes = hash_builder.take_proof_nodes(); + let branch_node_hash_masks = hash_builder + .updated_branch_nodes + .clone() + .unwrap_or_default() + .iter() + .map(|(path, node)| (*path, node.hash_mask)) + .collect(); + let branch_node_tree_masks = hash_builder + .updated_branch_nodes + .clone() + .unwrap_or_default() + .iter() + .map(|(path, node)| (*path, node.tree_mask)) + .collect(); + + let mut trie_updates = TrieUpdates::default(); + let removed_keys = node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, destroyed_accounts); + + (root, trie_updates, proof_nodes, branch_node_hash_masks, branch_node_tree_masks) + } + + /// Returns a `ParallelSparseTrie` pre-loaded with the given nodes, as well as leaf values + /// inferred from any provided leaf nodes. + fn new_test_trie(nodes: Nodes) -> ParallelSparseTrie + where + Nodes: Iterator, + { + let mut trie = ParallelSparseTrie::default().with_updates(true); + + for (path, node) in nodes { + let subtrie = trie.subtrie_for_path_mut(&path); + if let SparseNode::Leaf { key, .. } = &node { + let mut full_key = path; + full_key.extend(key); + subtrie.inner.values.insert(full_key, "LEAF VALUE".into()); + } + subtrie.nodes.insert(path, node); + } + trie + } + + fn parallel_sparse_trie_nodes( + sparse_trie: &ParallelSparseTrie, + ) -> impl IntoIterator { + let lower_sparse_nodes = sparse_trie + .lower_subtries + .iter() + .filter_map(|subtrie| subtrie.as_revealed_ref()) + .flat_map(|subtrie| subtrie.nodes.iter()); + + let upper_sparse_nodes = sparse_trie.upper_subtrie.nodes.iter(); + + lower_sparse_nodes.chain(upper_sparse_nodes).sorted_by_key(|(path, _)| *path) + } + + /// Assert that the parallel sparse trie nodes and the proof nodes from the hash builder are + /// equal. + fn assert_eq_parallel_sparse_trie_proof_nodes( + sparse_trie: &ParallelSparseTrie, + proof_nodes: ProofNodes, + ) { + let proof_nodes = proof_nodes + .into_nodes_sorted() + .into_iter() + .map(|(path, node)| (path, TrieNode::decode(&mut node.as_ref()).unwrap())); + + let all_sparse_nodes = parallel_sparse_trie_nodes(sparse_trie); + + for ((proof_node_path, proof_node), (sparse_node_path, sparse_node)) in + proof_nodes.zip(all_sparse_nodes) + { + assert_eq!(&proof_node_path, sparse_node_path); + + let equals = match (&proof_node, &sparse_node) { + // Both nodes are empty + (TrieNode::EmptyRoot, SparseNode::Empty) => true, + // Both nodes are branches and have the same state mask + ( + TrieNode::Branch(BranchNode { state_mask: proof_state_mask, .. }), + SparseNode::Branch { state_mask: sparse_state_mask, .. }, + ) => proof_state_mask == sparse_state_mask, + // Both nodes are extensions and have the same key + ( + TrieNode::Extension(ExtensionNode { key: proof_key, .. }), + SparseNode::Extension { key: sparse_key, .. }, + ) | + // Both nodes are leaves and have the same key + ( + TrieNode::Leaf(LeafNode { key: proof_key, .. }), + SparseNode::Leaf { key: sparse_key, .. }, + ) => proof_key == sparse_key, + // Empty and hash nodes are specific to the sparse trie, skip them + (_, SparseNode::Empty | SparseNode::Hash(_)) => continue, + _ => false, + }; + assert!( + equals, + "path: {proof_node_path:?}\nproof node: {proof_node:?}\nsparse node: {sparse_node:?}" + ); + } + } + + #[test] + fn test_get_changed_subtries_empty() { + let mut trie = ParallelSparseTrie::default(); + let mut prefix_set = PrefixSetMut::from([Nibbles::default()]).freeze(); + + let (subtries, unchanged_prefix_set) = trie.take_changed_lower_subtries(&mut prefix_set); + assert!(subtries.is_empty()); + assert_eq!(unchanged_prefix_set, PrefixSetMut::from(prefix_set.iter().copied())); + } + + #[test] + fn test_get_changed_subtries() { + // Create a trie with three subtries + let mut trie = ParallelSparseTrie::default(); + let subtrie_1 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0]))); + let subtrie_1_index = path_subtrie_index_unchecked(&subtrie_1.path); + let subtrie_2 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x1, 0x0]))); + let subtrie_2_index = path_subtrie_index_unchecked(&subtrie_2.path); + let subtrie_3 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x3, 0x0]))); + let subtrie_3_index = path_subtrie_index_unchecked(&subtrie_3.path); + + // Add subtries at specific positions + trie.lower_subtries[subtrie_1_index] = LowerSparseSubtrie::Revealed(subtrie_1.clone()); + trie.lower_subtries[subtrie_2_index] = LowerSparseSubtrie::Revealed(subtrie_2.clone()); + trie.lower_subtries[subtrie_3_index] = LowerSparseSubtrie::Revealed(subtrie_3); + + let unchanged_prefix_set = PrefixSetMut::from([ + Nibbles::from_nibbles([0x0]), + Nibbles::from_nibbles([0x2, 0x0, 0x0]), + ]); + // Create a prefix set with the keys that match only the second subtrie + let mut prefix_set = PrefixSetMut::from([ + // Match second subtrie + Nibbles::from_nibbles([0x1, 0x0, 0x0]), + Nibbles::from_nibbles([0x1, 0x0, 0x1, 0x0]), + ]); + prefix_set.extend(unchanged_prefix_set); + let mut prefix_set = prefix_set.freeze(); + + // Second subtrie should be removed and returned + let (subtries, unchanged_prefix_set) = trie.take_changed_lower_subtries(&mut prefix_set); + assert_eq!( + subtries + .into_iter() + .map(|ChangedSubtrie { index, subtrie, prefix_set, .. }| { + (index, subtrie, prefix_set.iter().copied().collect::>()) + }) + .collect::>(), + vec![( + subtrie_2_index, + subtrie_2, + vec![ + Nibbles::from_nibbles([0x1, 0x0, 0x0]), + Nibbles::from_nibbles([0x1, 0x0, 0x1, 0x0]) + ] + )] + ); + assert_eq!(unchanged_prefix_set, unchanged_prefix_set); + assert!(trie.lower_subtries[subtrie_2_index].as_revealed_ref().is_none()); + + // First subtrie should remain unchanged + assert_eq!(trie.lower_subtries[subtrie_1_index], LowerSparseSubtrie::Revealed(subtrie_1)); + } + + #[test] + fn test_get_changed_subtries_all() { + // Create a trie with three subtries + let mut trie = ParallelSparseTrie::default(); + let subtrie_1 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0]))); + let subtrie_1_index = path_subtrie_index_unchecked(&subtrie_1.path); + let subtrie_2 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x1, 0x0]))); + let subtrie_2_index = path_subtrie_index_unchecked(&subtrie_2.path); + let subtrie_3 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x3, 0x0]))); + let subtrie_3_index = path_subtrie_index_unchecked(&subtrie_3.path); + + // Add subtries at specific positions + trie.lower_subtries[subtrie_1_index] = LowerSparseSubtrie::Revealed(subtrie_1.clone()); + trie.lower_subtries[subtrie_2_index] = LowerSparseSubtrie::Revealed(subtrie_2.clone()); + trie.lower_subtries[subtrie_3_index] = LowerSparseSubtrie::Revealed(subtrie_3.clone()); + + // Create a prefix set that matches any key + let mut prefix_set = PrefixSetMut::all().freeze(); + + // All subtries should be removed and returned + let (subtries, unchanged_prefix_set) = trie.take_changed_lower_subtries(&mut prefix_set); + assert_eq!( + subtries + .into_iter() + .map(|ChangedSubtrie { index, subtrie, prefix_set, .. }| { + (index, subtrie, prefix_set.all()) + }) + .collect::>(), + vec![ + (subtrie_1_index, subtrie_1, true), + (subtrie_2_index, subtrie_2, true), + (subtrie_3_index, subtrie_3, true) + ] + ); + assert_eq!(unchanged_prefix_set, PrefixSetMut::all()); + + assert!(trie.lower_subtries.iter().all(|subtrie| subtrie.as_revealed_ref().is_none())); + } + + #[test] + fn test_sparse_subtrie_type() { + assert_eq!(SparseSubtrieType::from_path(&Nibbles::new()), SparseSubtrieType::Upper); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0])), + SparseSubtrieType::Upper + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([15])), + SparseSubtrieType::Upper + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 0])), + SparseSubtrieType::Lower(0) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 0, 0])), + SparseSubtrieType::Lower(0) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 1])), + SparseSubtrieType::Lower(1) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 1, 0])), + SparseSubtrieType::Lower(1) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 15])), + SparseSubtrieType::Lower(15) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 0])), + SparseSubtrieType::Lower(240) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 1])), + SparseSubtrieType::Lower(241) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 15])), + SparseSubtrieType::Lower(255) + ); + assert_eq!( + SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 15, 15])), + SparseSubtrieType::Lower(255) + ); + } + + #[test] + fn test_reveal_node_leaves() { + let mut trie = ParallelSparseTrie::default(); + + // Reveal leaf in the upper trie + { + let path = Nibbles::from_nibbles([0x1]); + let node = create_leaf_node([0x2, 0x3], 42); + let masks = TrieMasks::none(); + + trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); + + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Leaf { key, hash: None }) + if key == &Nibbles::from_nibbles([0x2, 0x3]) + ); + + let full_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + assert_eq!( + trie.upper_subtrie.inner.values.get(&full_path), + Some(&encode_account_value(42)) + ); + } + + // Reveal leaf in a lower trie + { + let path = Nibbles::from_nibbles([0x1, 0x2]); + let node = create_leaf_node([0x3, 0x4], 42); + let masks = TrieMasks::none(); + + trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); + + // Check that the lower subtrie was created + let idx = path_subtrie_index_unchecked(&path); + assert!(trie.lower_subtries[idx].as_revealed_ref().is_some()); + + // Check that the lower subtrie's path was correctly set + let lower_subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap(); + assert_eq!(lower_subtrie.path, path); + + assert_matches!( + lower_subtrie.nodes.get(&path), + Some(SparseNode::Leaf { key, hash: None }) + if key == &Nibbles::from_nibbles([0x3, 0x4]) + ); + } + + // Reveal leaf in a lower trie with a longer path, shouldn't result in the subtrie's root + // path changing. + { + let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + let node = create_leaf_node([0x4, 0x5], 42); + let masks = TrieMasks::none(); + + trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); + + // Check that the lower subtrie's path hasn't changed + let idx = path_subtrie_index_unchecked(&path); + let lower_subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap(); + assert_eq!(lower_subtrie.path, Nibbles::from_nibbles([0x1, 0x2])); + } + } + + #[test] + fn test_reveal_node_extension_all_upper() { + let path = Nibbles::new(); + let child_hash = B256::repeat_byte(0xab); + let node = create_extension_node([0x1], child_hash); + let masks = TrieMasks::none(); + let trie = ParallelSparseTrie::from_root(node, masks, true).unwrap(); + + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Extension { key, hash: None, .. }) + if key == &Nibbles::from_nibbles([0x1]) + ); + + // Child path should be in upper trie + let child_path = Nibbles::from_nibbles([0x1]); + assert_eq!(trie.upper_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); + } + + #[test] + fn test_reveal_node_extension_cross_level() { + let path = Nibbles::new(); + let child_hash = B256::repeat_byte(0xcd); + let node = create_extension_node([0x1, 0x2, 0x3], child_hash); + let masks = TrieMasks::none(); + let trie = ParallelSparseTrie::from_root(node, masks, true).unwrap(); + + // Extension node should be in upper trie + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Extension { key, hash: None, .. }) + if key == &Nibbles::from_nibbles([0x1, 0x2, 0x3]) + ); + + // Child path (0x1, 0x2, 0x3) should be in lower trie + let child_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + let idx = path_subtrie_index_unchecked(&child_path); + assert!(trie.lower_subtries[idx].as_revealed_ref().is_some()); + + let lower_subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap(); + assert_eq!(lower_subtrie.path, child_path); + assert_eq!(lower_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); + } + + #[test] + fn test_reveal_node_extension_cross_level_boundary() { + let mut trie = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles([0x1]); + let child_hash = B256::repeat_byte(0xcd); + let node = create_extension_node([0x2], child_hash); + let masks = TrieMasks::none(); + + trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); + + // Extension node should be in upper trie + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Extension { key, hash: None, .. }) + if key == &Nibbles::from_nibbles([0x2]) + ); + + // Child path (0x1, 0x2) should be in lower trie + let child_path = Nibbles::from_nibbles([0x1, 0x2]); + let idx = path_subtrie_index_unchecked(&child_path); + assert!(trie.lower_subtries[idx].as_revealed_ref().is_some()); + + let lower_subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap(); + assert_eq!(lower_subtrie.path, child_path); + assert_eq!(lower_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); + } + + #[test] + fn test_reveal_node_branch_all_upper() { + let path = Nibbles::new(); + let child_hashes = [ + RlpNode::word_rlp(&B256::repeat_byte(0x11)), + RlpNode::word_rlp(&B256::repeat_byte(0x22)), + ]; + let node = create_branch_node_with_children(&[0x0, 0x5], child_hashes.clone()); + let masks = TrieMasks::none(); + let trie = ParallelSparseTrie::from_root(node, masks, true).unwrap(); + + // Branch node should be in upper trie + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Branch { state_mask, hash: None, .. }) + if *state_mask == 0b0000000000100001.into() + ); + + // Children should be in upper trie (paths of length 2) + let child_path_0 = Nibbles::from_nibbles([0x0]); + let child_path_5 = Nibbles::from_nibbles([0x5]); + assert_eq!( + trie.upper_subtrie.nodes.get(&child_path_0), + Some(&SparseNode::Hash(child_hashes[0].as_hash().unwrap())) + ); + assert_eq!( + trie.upper_subtrie.nodes.get(&child_path_5), + Some(&SparseNode::Hash(child_hashes[1].as_hash().unwrap())) + ); + } + + #[test] + fn test_reveal_node_branch_cross_level() { + let mut trie = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles([0x1]); // Exactly 1 nibbles - boundary case + let child_hashes = [ + RlpNode::word_rlp(&B256::repeat_byte(0x33)), + RlpNode::word_rlp(&B256::repeat_byte(0x44)), + RlpNode::word_rlp(&B256::repeat_byte(0x55)), + ]; + let node = create_branch_node_with_children(&[0x0, 0x7, 0xf], child_hashes.clone()); + let masks = TrieMasks::none(); + + trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); + + // Branch node should be in upper trie + assert_matches!( + trie.upper_subtrie.nodes.get(&path), + Some(SparseNode::Branch { state_mask, hash: None, .. }) + if *state_mask == 0b1000000010000001.into() + ); + + // All children should be in lower tries since they have paths of length 3 + let child_paths = [ + Nibbles::from_nibbles([0x1, 0x0]), + Nibbles::from_nibbles([0x1, 0x7]), + Nibbles::from_nibbles([0x1, 0xf]), + ]; + + for (i, child_path) in child_paths.iter().enumerate() { + let idx = path_subtrie_index_unchecked(child_path); + let lower_subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap(); + assert_eq!(&lower_subtrie.path, child_path); + assert_eq!( + lower_subtrie.nodes.get(child_path), + Some(&SparseNode::Hash(child_hashes[i].as_hash().unwrap())), + ); + } + } + + #[test] + fn test_update_subtrie_hashes() { + // Create a trie and reveal leaf nodes using reveal_nodes + let mut trie = ParallelSparseTrie::default(); + + // Create dummy leaf nodes that form an incorrect trie structure but enough to test the + // method + let leaf_1_full_path = Nibbles::from_nibbles([0; 64]); + let leaf_1_path = leaf_1_full_path.slice(..2); + let leaf_1_key = leaf_1_full_path.slice(2..); + let leaf_2_full_path = Nibbles::from_nibbles([vec![1, 0], vec![0; 62]].concat()); + let leaf_2_path = leaf_2_full_path.slice(..2); + let leaf_2_key = leaf_2_full_path.slice(2..); + let leaf_3_full_path = Nibbles::from_nibbles([vec![3, 0], vec![0; 62]].concat()); + let leaf_3_path = leaf_3_full_path.slice(..2); + let leaf_3_key = leaf_3_full_path.slice(2..); + let leaf_1 = create_leaf_node(leaf_1_key.to_vec(), 1); + let leaf_2 = create_leaf_node(leaf_2_key.to_vec(), 2); + let leaf_3 = create_leaf_node(leaf_3_key.to_vec(), 3); + + // Reveal nodes using reveal_nodes + trie.reveal_nodes(vec![ + RevealedSparseNode { path: leaf_1_path, node: leaf_1, masks: TrieMasks::none() }, + RevealedSparseNode { path: leaf_2_path, node: leaf_2, masks: TrieMasks::none() }, + RevealedSparseNode { path: leaf_3_path, node: leaf_3, masks: TrieMasks::none() }, + ]) + .unwrap(); + + // Calculate subtrie indexes + let subtrie_1_index = SparseSubtrieType::from_path(&leaf_1_path).lower_index().unwrap(); + let subtrie_2_index = SparseSubtrieType::from_path(&leaf_2_path).lower_index().unwrap(); + let subtrie_3_index = SparseSubtrieType::from_path(&leaf_3_path).lower_index().unwrap(); + + let unchanged_prefix_set = PrefixSetMut::from([ + Nibbles::from_nibbles([0x0]), + leaf_2_full_path, + Nibbles::from_nibbles([0x2, 0x0, 0x0]), + ]); + // Create a prefix set with the keys that match only the second subtrie + let mut prefix_set = PrefixSetMut::from([ + // Match second subtrie + Nibbles::from_nibbles([0x1, 0x0, 0x0]), + Nibbles::from_nibbles([0x1, 0x0, 0x1, 0x0]), + ]); + prefix_set.extend(unchanged_prefix_set.clone()); + trie.prefix_set = prefix_set; + + // Update subtrie hashes + trie.update_subtrie_hashes(); + + // Check that the prefix set was updated + assert_eq!(trie.prefix_set, unchanged_prefix_set); + // Check that subtries were returned back to the array + assert!(trie.lower_subtries[subtrie_1_index].as_revealed_ref().is_some()); + assert!(trie.lower_subtries[subtrie_2_index].as_revealed_ref().is_some()); + assert!(trie.lower_subtries[subtrie_3_index].as_revealed_ref().is_some()); + } + + #[test] + fn test_subtrie_update_hashes() { + let mut subtrie = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0]))); + + // Create leaf nodes with paths 0x0...0, 0x00001...0, 0x0010...0 + let leaf_1_full_path = Nibbles::from_nibbles([0; 64]); + let leaf_1_path = leaf_1_full_path.slice(..5); + let leaf_1_key = leaf_1_full_path.slice(5..); + let leaf_2_full_path = Nibbles::from_nibbles([vec![0, 0, 0, 0, 1], vec![0; 59]].concat()); + let leaf_2_path = leaf_2_full_path.slice(..5); + let leaf_2_key = leaf_2_full_path.slice(5..); + let leaf_3_full_path = Nibbles::from_nibbles([vec![0, 0, 1], vec![0; 61]].concat()); + let leaf_3_path = leaf_3_full_path.slice(..3); + let leaf_3_key = leaf_3_full_path.slice(3..); + + let account_1 = create_account(1); + let account_2 = create_account(2); + let account_3 = create_account(3); + let leaf_1 = create_leaf_node(leaf_1_key.to_vec(), account_1.nonce); + let leaf_2 = create_leaf_node(leaf_2_key.to_vec(), account_2.nonce); + let leaf_3 = create_leaf_node(leaf_3_key.to_vec(), account_3.nonce); + + // Create bottom branch node + let branch_1_path = Nibbles::from_nibbles([0, 0, 0, 0]); + let branch_1 = create_branch_node_with_children( + &[0, 1], + vec![ + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_1)), + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_2)), + ], + ); + + // Create an extension node + let extension_path = Nibbles::from_nibbles([0, 0, 0]); + let extension_key = Nibbles::from_nibbles([0]); + let extension = create_extension_node( + extension_key.to_vec(), + RlpNode::from_rlp(&alloy_rlp::encode(&branch_1)).as_hash().unwrap(), + ); + + // Create top branch node + let branch_2_path = Nibbles::from_nibbles([0, 0]); + let branch_2 = create_branch_node_with_children( + &[0, 1], + vec![ + RlpNode::from_rlp(&alloy_rlp::encode(&extension)), + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_3)), + ], + ); + + // Reveal nodes + subtrie.reveal_node(branch_2_path, &branch_2, TrieMasks::none()).unwrap(); + subtrie.reveal_node(leaf_1_path, &leaf_1, TrieMasks::none()).unwrap(); + subtrie.reveal_node(extension_path, &extension, TrieMasks::none()).unwrap(); + subtrie.reveal_node(branch_1_path, &branch_1, TrieMasks::none()).unwrap(); + subtrie.reveal_node(leaf_2_path, &leaf_2, TrieMasks::none()).unwrap(); + subtrie.reveal_node(leaf_3_path, &leaf_3, TrieMasks::none()).unwrap(); + + // Run hash builder for two leaf nodes + let (_, _, proof_nodes, _, _) = run_hash_builder( + [ + (leaf_1_full_path, account_1), + (leaf_2_full_path, account_2), + (leaf_3_full_path, account_3), + ], + NoopAccountTrieCursor::default(), + Default::default(), + [ + branch_1_path, + extension_path, + branch_2_path, + leaf_1_full_path, + leaf_2_full_path, + leaf_3_full_path, + ], + ); + + // Update hashes for the subtrie + subtrie.update_hashes( + &mut PrefixSetMut::from([leaf_1_full_path, leaf_2_full_path, leaf_3_full_path]) + .freeze(), + &mut None, + &HashMap::default(), + &HashMap::default(), + ); + + // Compare hashes between hash builder and subtrie + let hash_builder_branch_1_hash = + RlpNode::from_rlp(proof_nodes.get(&branch_1_path).unwrap().as_ref()).as_hash().unwrap(); + let subtrie_branch_1_hash = subtrie.nodes.get(&branch_1_path).unwrap().hash().unwrap(); + assert_eq!(hash_builder_branch_1_hash, subtrie_branch_1_hash); + + let hash_builder_extension_hash = + RlpNode::from_rlp(proof_nodes.get(&extension_path).unwrap().as_ref()) + .as_hash() + .unwrap(); + let subtrie_extension_hash = subtrie.nodes.get(&extension_path).unwrap().hash().unwrap(); + assert_eq!(hash_builder_extension_hash, subtrie_extension_hash); + + let hash_builder_branch_2_hash = + RlpNode::from_rlp(proof_nodes.get(&branch_2_path).unwrap().as_ref()).as_hash().unwrap(); + let subtrie_branch_2_hash = subtrie.nodes.get(&branch_2_path).unwrap().hash().unwrap(); + assert_eq!(hash_builder_branch_2_hash, subtrie_branch_2_hash); + + let subtrie_leaf_1_hash = subtrie.nodes.get(&leaf_1_path).unwrap().hash().unwrap(); + let hash_builder_leaf_1_hash = + RlpNode::from_rlp(proof_nodes.get(&leaf_1_path).unwrap().as_ref()).as_hash().unwrap(); + assert_eq!(hash_builder_leaf_1_hash, subtrie_leaf_1_hash); + + let hash_builder_leaf_2_hash = + RlpNode::from_rlp(proof_nodes.get(&leaf_2_path).unwrap().as_ref()).as_hash().unwrap(); + let subtrie_leaf_2_hash = subtrie.nodes.get(&leaf_2_path).unwrap().hash().unwrap(); + assert_eq!(hash_builder_leaf_2_hash, subtrie_leaf_2_hash); + + let hash_builder_leaf_3_hash = + RlpNode::from_rlp(proof_nodes.get(&leaf_3_path).unwrap().as_ref()).as_hash().unwrap(); + let subtrie_leaf_3_hash = subtrie.nodes.get(&leaf_3_path).unwrap().hash().unwrap(); + assert_eq!(hash_builder_leaf_3_hash, subtrie_leaf_3_hash); + } + + #[test] + fn test_remove_leaf_branch_becomes_extension() { + // + // 0x: Extension (Key = 5) + // 0x5: └── Branch (Mask = 1001) + // 0x50: ├── 0 -> Extension (Key = 23) + // 0x5023: │ └── Branch (Mask = 0101) + // 0x50231: │ ├── 1 -> Leaf + // 0x50233: │ └── 3 -> Leaf + // 0x53: └── 3 -> Leaf (Key = 7) + // + // After removing 0x53, extension+branch+extension become a single extension + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(TrieMask::new(0b1001))), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])), + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(TrieMask::new(0b0101)), + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::new()), + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::new()), + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x7])), + ), + ] + .into_iter(), + ); + + let provider = MockTrieNodeProvider::new(); + + // Remove the leaf with a full path of 0x537 + let leaf_full_path = Nibbles::from_nibbles([0x5, 0x3, 0x7]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + let upper_subtrie = &trie.upper_subtrie; + let lower_subtrie_50 = trie.lower_subtries[0x50].as_revealed_ref().unwrap(); + + // Check that the `SparseSubtrie` the leaf was removed from was itself removed, as it is now + // empty. + assert_matches!(trie.lower_subtries[0x53].as_revealed_ref(), None); + + // Check that the leaf node was removed, and that its parent/grandparent were modified + // appropriately. + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::from_nibbles([])), + Some(SparseNode::Extension{ key, ..}) + if key == &Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]) + ); + assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x5])), None); + assert_matches!(lower_subtrie_50.nodes.get(&Nibbles::from_nibbles([0x5, 0x0])), None); + assert_matches!( + lower_subtrie_50.nodes.get(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3])), + Some(SparseNode::Branch{ state_mask, .. }) + if *state_mask == 0b0101.into() + ); + } + + #[test] + fn test_remove_leaf_branch_becomes_leaf() { + // + // 0x: Branch (Mask = 0011) + // 0x0: ├── 0 -> Leaf (Key = 12) + // 0x1: └── 1 -> Leaf (Key = 34) + // + // After removing 0x012, branch becomes a leaf + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_branch(TrieMask::new(0b0011))), + ( + Nibbles::from_nibbles([0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), + ), + ( + Nibbles::from_nibbles([0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x4])), + ), + ] + .into_iter(), + ); + + // Add the branch node to updated_nodes to simulate it being modified earlier + if let Some(updates) = trie.updates.as_mut() { + updates + .updated_nodes + .insert(Nibbles::default(), BranchNodeCompact::new(0b11, 0, 0, vec![], None)); + } + + let provider = MockTrieNodeProvider::new(); + + // Remove the leaf with a full path of 0x012 + let leaf_full_path = Nibbles::from_nibbles([0x0, 0x1, 0x2]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + let upper_subtrie = &trie.upper_subtrie; + + // Check that the leaf's value was removed + assert_matches!(upper_subtrie.inner.values.get(&leaf_full_path), None); + + // Check that the branch node collapsed into a leaf node with the remaining child's key + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Leaf{ key, ..}) + if key == &Nibbles::from_nibbles([0x1, 0x3, 0x4]) + ); + + // Check that the remaining child node was removed + assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x1])), None); + // Check that the removed child node was also removed + assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x0])), None); + + // Check that updates were tracked correctly when branch collapsed + let updates = trie.updates.as_ref().unwrap(); + + // The branch at root should be marked as removed since it collapsed + assert!(updates.removed_nodes.contains(&Nibbles::default())); + + // The branch should no longer be in updated_nodes + assert!(!updates.updated_nodes.contains_key(&Nibbles::default())); + } + + #[test] + fn test_remove_leaf_extension_becomes_leaf() { + // + // 0x: Extension (Key = 5) + // 0x5: └── Branch (Mask = 0011) + // 0x50: ├── 0 -> Leaf (Key = 12) + // 0x51: └── 1 -> Leaf (Key = 34) + // + // After removing 0x5012, extension+branch becomes a leaf + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(TrieMask::new(0b0011))), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), + ), + ( + Nibbles::from_nibbles([0x5, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x4])), + ), + ] + .into_iter(), + ); + + let provider = MockTrieNodeProvider::new(); + + // Remove the leaf with a full path of 0x5012 + let leaf_full_path = Nibbles::from_nibbles([0x5, 0x0, 0x1, 0x2]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + let upper_subtrie = &trie.upper_subtrie; + + // Check that both lower subtries were removed. 0x50 should have been removed because + // removing its leaf made it empty. 0x51 should have been removed after its own leaf was + // collapsed into the upper trie, leaving it also empty. + assert_matches!(trie.lower_subtries[0x50].as_revealed_ref(), None); + assert_matches!(trie.lower_subtries[0x51].as_revealed_ref(), None); + + // Check that the other leaf's value was moved to the upper trie + let other_leaf_full_value = Nibbles::from_nibbles([0x5, 0x1, 0x3, 0x4]); + assert_matches!(upper_subtrie.inner.values.get(&other_leaf_full_value), Some(_)); + + // Check that the extension node collapsed into a leaf node + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Leaf{ key, ..}) + if key == &Nibbles::from_nibbles([0x5, 0x1, 0x3, 0x4]) + ); + + // Check that intermediate nodes were removed + assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x5])), None); + } + + #[test] + fn test_remove_leaf_branch_on_branch() { + // + // 0x: Branch (Mask = 0101) + // 0x0: ├── 0 -> Leaf (Key = 12) + // 0x2: └── 2 -> Branch (Mask = 0011) + // 0x20: ├── 0 -> Leaf (Key = 34) + // 0x21: └── 1 -> Leaf (Key = 56) + // + // After removing 0x2034, the inner branch becomes a leaf + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_branch(TrieMask::new(0b0101))), + ( + Nibbles::from_nibbles([0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), + ), + (Nibbles::from_nibbles([0x2]), SparseNode::new_branch(TrieMask::new(0b0011))), + ( + Nibbles::from_nibbles([0x2, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x4])), + ), + ( + Nibbles::from_nibbles([0x2, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x5, 0x6])), + ), + ] + .into_iter(), + ); + + let provider = MockTrieNodeProvider::new(); + + // Remove the leaf with a full path of 0x2034 + let leaf_full_path = Nibbles::from_nibbles([0x2, 0x0, 0x3, 0x4]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + let upper_subtrie = &trie.upper_subtrie; + + // Check that both lower subtries were removed. 0x20 should have been removed because + // removing its leaf made it empty. 0x21 should have been removed after its own leaf was + // collapsed into the upper trie, leaving it also empty. + assert_matches!(trie.lower_subtries[0x20].as_revealed_ref(), None); + assert_matches!(trie.lower_subtries[0x21].as_revealed_ref(), None); + + // Check that the other leaf's value was moved to the upper trie + let other_leaf_full_value = Nibbles::from_nibbles([0x2, 0x1, 0x5, 0x6]); + assert_matches!(upper_subtrie.inner.values.get(&other_leaf_full_value), Some(_)); + + // Check that the root branch still exists unchanged + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch{ state_mask, .. }) + if *state_mask == 0b0101.into() + ); + + // Check that the inner branch became an extension + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x2])), + Some(SparseNode::Leaf{ key, ..}) + if key == &Nibbles::from_nibbles([0x1, 0x5, 0x6]) + ); + } + + #[test] + fn test_remove_leaf_lower_subtrie_root_path_update() { + // + // 0x: Extension (Key = 123, root of lower subtrie) + // 0x123: └── Branch (Mask = 0011000) + // 0x1233: ├── 3 -> Leaf (Key = []) + // 0x1234: └── 4 -> Extension (Key = 5) + // 0x12345: └── Branch (Mask = 0011) + // 0x123450: ├── 0 -> Leaf (Key = []) + // 0x123451: └── 1 -> Leaf (Key = []) + // + // After removing leaf at 0x1233, the branch at 0x123 becomes an extension to 0x12345, which + // then gets merged with the root extension at 0x. The lower subtrie's `path` field should + // be updated from 0x123 to 0x12345. + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x1, 0x2, 0x3]))), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3]), + SparseNode::new_branch(TrieMask::new(0b0011000)), + ), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::default()), + ), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]), + SparseNode::new_ext(Nibbles::from_nibbles([0x5])), + ), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5]), + SparseNode::new_branch(TrieMask::new(0b0011)), + ), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5, 0x0]), + SparseNode::new_leaf(Nibbles::default()), + ), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5, 0x1]), + SparseNode::new_leaf(Nibbles::default()), + ), + ] + .into_iter(), + ); + + let provider = MockTrieNodeProvider::new(); + + // Verify initial state - the lower subtrie's path should be 0x123 + let lower_subtrie_root_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + assert_matches!( + trie.lower_subtrie_for_path_mut(&lower_subtrie_root_path), + Some(subtrie) + if subtrie.path == lower_subtrie_root_path + ); + + // Remove the leaf at 0x1233 + let leaf_full_path = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x3]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); + + // After removal: + // 1. The branch at 0x123 should become an extension to 0x12345 + // 2. That extension should merge with the root extension at 0x + // 3. The lower subtrie's path should be updated to 0x12345 + let lower_subtrie = trie.lower_subtries[0x12].as_revealed_ref().unwrap(); + assert_eq!(lower_subtrie.path, Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5])); + + // Verify the root extension now points all the way to 0x12345 + assert_matches!( + trie.upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Extension { key, .. }) + if key == &Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5]) + ); + + // Verify the branch at 0x12345 hasn't been modified + assert_matches!( + lower_subtrie.nodes.get(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5])), + Some(SparseNode::Branch { state_mask, .. }) + if state_mask == &TrieMask::new(0b0011) + ); + } + + #[test] + fn test_remove_leaf_remaining_child_needs_reveal() { + // + // 0x: Branch (Mask = 0011) + // 0x0: ├── 0 -> Leaf (Key = 12) + // 0x1: └── 1 -> Hash (blinded leaf) + // + // After removing 0x012, the hash node needs to be revealed to collapse the branch + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_branch(TrieMask::new(0b0011))), + ( + Nibbles::from_nibbles([0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), + ), + (Nibbles::from_nibbles([0x1]), SparseNode::Hash(B256::repeat_byte(0xab))), + ] + .into_iter(), + ); + + // Create a mock provider that will reveal the blinded leaf + let mut provider = MockTrieNodeProvider::new(); + let revealed_leaf = create_leaf_node([0x3, 0x4], 42); + let mut encoded = Vec::new(); + revealed_leaf.encode(&mut encoded); + provider.add_revealed_node( + Nibbles::from_nibbles([0x1]), + RevealedNode { node: encoded.into(), tree_mask: None, hash_mask: None }, + ); - /// Mock blinded provider for testing that allows pre-setting nodes at specific paths. - /// - /// This provider can be used in tests to simulate blinded nodes that need to be revealed - /// during trie operations, particularly when collapsing branch nodes during leaf removal. - #[derive(Debug, Clone)] - struct MockBlindedProvider { - /// Mapping from path to revealed node data - nodes: HashMap, - } + // Remove the leaf with a full path of 0x012 + let leaf_full_path = Nibbles::from_nibbles([0x0, 0x1, 0x2]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); - impl MockBlindedProvider { - /// Creates a new empty mock provider - fn new() -> Self { - Self { nodes: HashMap::with_hasher(RandomState::default()) } - } + let upper_subtrie = &trie.upper_subtrie; - /// Adds a revealed node at the specified path - fn add_revealed_node(&mut self, path: Nibbles, node: RevealedNode) { - self.nodes.insert(path, node); - } - } + // Check that the leaf value was removed + assert_matches!(upper_subtrie.inner.values.get(&leaf_full_path), None); - impl BlindedProvider for MockBlindedProvider { - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError> { - Ok(self.nodes.get(path).cloned()) - } - } + // Check that the branch node collapsed into a leaf node with the revealed child's key + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Leaf{ key, ..}) + if key == &Nibbles::from_nibbles([0x1, 0x3, 0x4]) + ); - fn create_account(nonce: u64) -> Account { - Account { nonce, ..Default::default() } + // Check that the remaining child node was removed (since it was merged) + assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x1])), None); } - fn encode_account_value(nonce: u64) -> Vec { - let account = Account { nonce, ..Default::default() }; - let trie_account = account.into_trie_account(EMPTY_ROOT_HASH); - let mut buf = Vec::new(); - trie_account.encode(&mut buf); - buf - } + #[test] + fn test_remove_leaf_root() { + // + // 0x: Leaf (Key = 123) + // + // After removing 0x123, the trie becomes empty + // + let mut trie = new_test_trie(std::iter::once(( + Nibbles::default(), + SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2, 0x3])), + ))); - fn create_leaf_node(key: impl AsRef<[u8]>, value_nonce: u64) -> TrieNode { - TrieNode::Leaf(LeafNode::new(Nibbles::from_nibbles(key), encode_account_value(value_nonce))) - } + let provider = MockTrieNodeProvider::new(); - fn create_extension_node(key: impl AsRef<[u8]>, child_hash: B256) -> TrieNode { - TrieNode::Extension(ExtensionNode::new( - Nibbles::from_nibbles(key), - RlpNode::word_rlp(&child_hash), - )) - } + // Remove the leaf with a full key of 0x123 + let leaf_full_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); - fn create_branch_node_with_children( - children_indices: &[u8], - child_hashes: impl IntoIterator, - ) -> TrieNode { - let mut stack = Vec::new(); - let mut state_mask = TrieMask::default(); + let upper_subtrie = &trie.upper_subtrie; - for (&idx, hash) in children_indices.iter().zip(child_hashes.into_iter()) { - state_mask.set_bit(idx); - stack.push(hash); - } + // Check that the leaf value was removed + assert_matches!(upper_subtrie.inner.values.get(&leaf_full_path), None); - TrieNode::Branch(BranchNode::new(stack, state_mask)) + // Check that the root node was changed to Empty + assert_matches!(upper_subtrie.nodes.get(&Nibbles::default()), Some(SparseNode::Empty)); } - /// Calculate the state root by feeding the provided state to the hash builder and retaining the - /// proofs for the provided targets. - /// - /// Returns the state root and the retained proof nodes. - fn run_hash_builder( - state: impl IntoIterator + Clone, - trie_cursor: impl TrieCursor, - destroyed_accounts: B256Set, - proof_targets: impl IntoIterator, - ) -> (B256, TrieUpdates, ProofNodes, HashMap, HashMap) - { - let mut account_rlp = Vec::new(); - - let mut hash_builder = HashBuilder::default() - .with_updates(true) - .with_proof_retainer(ProofRetainer::from_iter(proof_targets)); + #[test] + fn test_remove_leaf_unsets_hash_along_path() { + // + // Creates a trie structure: + // 0x: Branch (with hash set) + // 0x0: ├── Extension (with hash set) + // 0x01: │ └── Branch (with hash set) + // 0x012: │ ├── Leaf (Key = 34, with hash set) + // 0x013: │ ├── Leaf (Key = 56, with hash set) + // 0x014: │ └── Leaf (Key = 78, with hash set) + // 0x1: └── Leaf (Key = 78, with hash set) + // + // When removing leaf at 0x01234, all nodes along the path (root branch, + // extension at 0x0, branch at 0x01) should have their hash field unset + // - let mut prefix_set = PrefixSetMut::default(); - prefix_set.extend_keys(state.clone().into_iter().map(|(nibbles, _)| nibbles)); - prefix_set.extend_keys(destroyed_accounts.iter().map(Nibbles::unpack)); - let walker = - TrieWalker::state_trie(trie_cursor, prefix_set.freeze()).with_deletions_retained(true); - let hashed_post_state = HashedPostState::default() - .with_accounts(state.into_iter().map(|(nibbles, account)| { - (nibbles.pack().into_inner().unwrap().into(), Some(account)) - })) - .into_sorted(); - let mut node_iter = TrieNodeIter::state_trie( - walker, - HashedPostStateAccountCursor::new( - NoopHashedAccountCursor::default(), - hashed_post_state.accounts(), - ), + let mut trie = new_test_trie( + [ + ( + Nibbles::default(), + SparseNode::Branch { + state_mask: TrieMask::new(0b0011), + hash: Some(B256::repeat_byte(0x10)), + store_in_db_trie: None, + }, + ), + ( + Nibbles::from_nibbles([0x0]), + SparseNode::Extension { + key: Nibbles::from_nibbles([0x1]), + hash: Some(B256::repeat_byte(0x20)), + store_in_db_trie: None, + }, + ), + ( + Nibbles::from_nibbles([0x0, 0x1]), + SparseNode::Branch { + state_mask: TrieMask::new(0b11100), + hash: Some(B256::repeat_byte(0x30)), + store_in_db_trie: None, + }, + ), + ( + Nibbles::from_nibbles([0x0, 0x1, 0x2]), + SparseNode::Leaf { + key: Nibbles::from_nibbles([0x3, 0x4]), + hash: Some(B256::repeat_byte(0x40)), + }, + ), + ( + Nibbles::from_nibbles([0x0, 0x1, 0x3]), + SparseNode::Leaf { + key: Nibbles::from_nibbles([0x5, 0x6]), + hash: Some(B256::repeat_byte(0x50)), + }, + ), + ( + Nibbles::from_nibbles([0x0, 0x1, 0x4]), + SparseNode::Leaf { + key: Nibbles::from_nibbles([0x6, 0x7]), + hash: Some(B256::repeat_byte(0x60)), + }, + ), + ( + Nibbles::from_nibbles([0x1]), + SparseNode::Leaf { + key: Nibbles::from_nibbles([0x7, 0x8]), + hash: Some(B256::repeat_byte(0x70)), + }, + ), + ] + .into_iter(), ); - while let Some(node) = node_iter.try_next().unwrap() { - match node { - TrieElement::Branch(branch) => { - hash_builder.add_branch(branch.key, branch.value, branch.children_are_in_trie); - } - TrieElement::Leaf(key, account) => { - let account = account.into_trie_account(EMPTY_ROOT_HASH); - account.encode(&mut account_rlp); + let provider = MockTrieNodeProvider::new(); - hash_builder.add_leaf(Nibbles::unpack(key), &account_rlp); - account_rlp.clear(); - } - } + // Remove a leaf which does not exist; this should have no effect. + trie.remove_leaf(&Nibbles::from_nibbles([0x0, 0x1, 0x2, 0x3, 0x4, 0xF]), &provider) + .unwrap(); + for (path, node) in trie.all_nodes() { + assert!(node.hash().is_some(), "path {path:?} should still have a hash"); } - let root = hash_builder.root(); - let proof_nodes = hash_builder.take_proof_nodes(); - let branch_node_hash_masks = hash_builder - .updated_branch_nodes - .clone() - .unwrap_or_default() - .iter() - .map(|(path, node)| (*path, node.hash_mask)) - .collect(); - let branch_node_tree_masks = hash_builder - .updated_branch_nodes - .clone() - .unwrap_or_default() - .iter() - .map(|(path, node)| (*path, node.tree_mask)) - .collect(); - let mut trie_updates = TrieUpdates::default(); - let removed_keys = node_iter.walker.take_removed_keys(); - trie_updates.finalize(hash_builder, removed_keys, destroyed_accounts); + // Remove the leaf at path 0x01234 + let leaf_full_path = Nibbles::from_nibbles([0x0, 0x1, 0x2, 0x3, 0x4]); + trie.remove_leaf(&leaf_full_path, &provider).unwrap(); - (root, trie_updates, proof_nodes, branch_node_hash_masks, branch_node_tree_masks) + let upper_subtrie = &trie.upper_subtrie; + let lower_subtrie_10 = trie.lower_subtries[0x01].as_revealed_ref().unwrap(); + + // Verify that hash fields are unset for all nodes along the path to the removed leaf + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { hash: None, .. }) + ); + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x0])), + Some(SparseNode::Extension { hash: None, .. }) + ); + assert_matches!( + lower_subtrie_10.nodes.get(&Nibbles::from_nibbles([0x0, 0x1])), + Some(SparseNode::Branch { hash: None, .. }) + ); + + // Verify that nodes not on the path still have their hashes + assert_matches!( + upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x1])), + Some(SparseNode::Leaf { hash: Some(_), .. }) + ); + assert_matches!( + lower_subtrie_10.nodes.get(&Nibbles::from_nibbles([0x0, 0x1, 0x3])), + Some(SparseNode::Leaf { hash: Some(_), .. }) + ); + assert_matches!( + lower_subtrie_10.nodes.get(&Nibbles::from_nibbles([0x0, 0x1, 0x4])), + Some(SparseNode::Leaf { hash: Some(_), .. }) + ); } - /// Returns a `ParallelSparseTrie` pre-loaded with the given nodes, as well as leaf values - /// inferred from any provided leaf nodes. - fn new_test_trie(nodes: Nodes) -> ParallelSparseTrie - where - Nodes: Iterator, - { - let mut trie = ParallelSparseTrie::default().with_updates(true); + #[test] + fn test_parallel_sparse_trie_root() { + // Step 1: Create the trie structure + // Extension node at 0x with key 0x2 (goes to upper subtrie) + let extension_path = Nibbles::new(); + let extension_key = Nibbles::from_nibbles([0x2]); + + // Branch node at 0x2 with children 0 and 1 (goes to upper subtrie) + let branch_path = Nibbles::from_nibbles([0x2]); - for (path, node) in nodes { - let subtrie = trie.subtrie_for_path(&path); - if let SparseNode::Leaf { key, .. } = &node { - let mut full_key = path; - full_key.extend(key); - subtrie.inner.values.insert(full_key, "LEAF VALUE".into()); - } - subtrie.nodes.insert(path, node); - } - trie - } + // Leaf nodes at 0x20 and 0x21 (go to lower subtries) + let leaf_1_path = Nibbles::from_nibbles([0x2, 0x0]); + let leaf_1_key = Nibbles::from_nibbles(vec![0; 62]); // Remaining key + let leaf_1_full_path = Nibbles::from_nibbles([vec![0x2, 0x0], vec![0; 62]].concat()); - /// Assert that the parallel sparse trie nodes and the proof nodes from the hash builder are - /// equal. - #[allow(unused)] - fn assert_eq_parallel_sparse_trie_proof_nodes( - sparse_trie: &ParallelSparseTrie, - proof_nodes: ProofNodes, - ) { - let proof_nodes = proof_nodes - .into_nodes_sorted() - .into_iter() - .map(|(path, node)| (path, TrieNode::decode(&mut node.as_ref()).unwrap())); + let leaf_2_path = Nibbles::from_nibbles([0x2, 0x1]); + let leaf_2_key = Nibbles::from_nibbles(vec![0; 62]); // Remaining key + let leaf_2_full_path = Nibbles::from_nibbles([vec![0x2, 0x1], vec![0; 62]].concat()); - let lower_sparse_nodes = sparse_trie - .lower_subtries - .iter() - .filter_map(Option::as_ref) - .flat_map(|subtrie| subtrie.nodes.iter()); + // Create accounts + let account_1 = create_account(1); + let account_2 = create_account(2); - let upper_sparse_nodes = sparse_trie.upper_subtrie.nodes.iter(); + // Create leaf nodes + let leaf_1 = create_leaf_node(leaf_1_key.to_vec(), account_1.nonce); + let leaf_2 = create_leaf_node(leaf_2_key.to_vec(), account_2.nonce); - let all_sparse_nodes = - lower_sparse_nodes.chain(upper_sparse_nodes).sorted_by_key(|(path, _)| *path); + // Create branch node with children at indices 0 and 1 + let branch = create_branch_node_with_children( + &[0, 1], + vec![ + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_1)), + RlpNode::from_rlp(&alloy_rlp::encode(&leaf_2)), + ], + ); - for ((proof_node_path, proof_node), (sparse_node_path, sparse_node)) in - proof_nodes.zip(all_sparse_nodes) - { - assert_eq!(&proof_node_path, sparse_node_path); + // Create extension node pointing to branch + let extension = create_extension_node( + extension_key.to_vec(), + RlpNode::from_rlp(&alloy_rlp::encode(&branch)).as_hash().unwrap(), + ); - let equals = match (&proof_node, &sparse_node) { - // Both nodes are empty - (TrieNode::EmptyRoot, SparseNode::Empty) => true, - // Both nodes are branches and have the same state mask - ( - TrieNode::Branch(BranchNode { state_mask: proof_state_mask, .. }), - SparseNode::Branch { state_mask: sparse_state_mask, .. }, - ) => proof_state_mask == sparse_state_mask, - // Both nodes are extensions and have the same key - ( - TrieNode::Extension(ExtensionNode { key: proof_key, .. }), - SparseNode::Extension { key: sparse_key, .. }, - ) | - // Both nodes are leaves and have the same key - ( - TrieNode::Leaf(LeafNode { key: proof_key, .. }), - SparseNode::Leaf { key: sparse_key, .. }, - ) => proof_key == sparse_key, - // Empty and hash nodes are specific to the sparse trie, skip them - (_, SparseNode::Empty | SparseNode::Hash(_)) => continue, - _ => false, - }; - assert!( - equals, - "path: {proof_node_path:?}\nproof node: {proof_node:?}\nsparse node: {sparse_node:?}" - ); - } - } + // Step 2: Reveal nodes in the trie + let mut trie = ParallelSparseTrie::from_root(extension, TrieMasks::none(), true).unwrap(); + trie.reveal_nodes(vec![ + RevealedSparseNode { path: branch_path, node: branch, masks: TrieMasks::none() }, + RevealedSparseNode { path: leaf_1_path, node: leaf_1, masks: TrieMasks::none() }, + RevealedSparseNode { path: leaf_2_path, node: leaf_2, masks: TrieMasks::none() }, + ]) + .unwrap(); - #[test] - fn test_get_changed_subtries_empty() { - let mut trie = ParallelSparseTrie::default(); - let mut prefix_set = PrefixSetMut::from([Nibbles::default()]).freeze(); + // Step 3: Reset hashes for all revealed nodes to test actual hash calculation + // Reset upper subtrie node hashes + trie.upper_subtrie.nodes.get_mut(&extension_path).unwrap().set_hash(None); + trie.upper_subtrie.nodes.get_mut(&branch_path).unwrap().set_hash(None); - let (subtries, unchanged_prefix_set) = trie.take_changed_lower_subtries(&mut prefix_set); - assert!(subtries.is_empty()); - assert_eq!(unchanged_prefix_set, PrefixSetMut::from(prefix_set.iter().copied())); - } + // Reset lower subtrie node hashes + let leaf_1_subtrie_idx = path_subtrie_index_unchecked(&leaf_1_path); + let leaf_2_subtrie_idx = path_subtrie_index_unchecked(&leaf_2_path); - #[test] - fn test_get_changed_subtries() { - // Create a trie with three subtries - let mut trie = ParallelSparseTrie::default(); - let subtrie_1 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0]))); - let subtrie_1_index = path_subtrie_index_unchecked(&subtrie_1.path); - let subtrie_2 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x1, 0x0]))); - let subtrie_2_index = path_subtrie_index_unchecked(&subtrie_2.path); - let subtrie_3 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x3, 0x0]))); - let subtrie_3_index = path_subtrie_index_unchecked(&subtrie_3.path); + trie.lower_subtries[leaf_1_subtrie_idx] + .as_revealed_mut() + .unwrap() + .nodes + .get_mut(&leaf_1_path) + .unwrap() + .set_hash(None); + trie.lower_subtries[leaf_2_subtrie_idx] + .as_revealed_mut() + .unwrap() + .nodes + .get_mut(&leaf_2_path) + .unwrap() + .set_hash(None); - // Add subtries at specific positions - trie.lower_subtries[subtrie_1_index] = Some(subtrie_1.clone()); - trie.lower_subtries[subtrie_2_index] = Some(subtrie_2.clone()); - trie.lower_subtries[subtrie_3_index] = Some(subtrie_3); + // Step 4: Add changed leaf node paths to prefix set + trie.prefix_set.insert(leaf_1_full_path); + trie.prefix_set.insert(leaf_2_full_path); - let unchanged_prefix_set = PrefixSetMut::from([ - Nibbles::from_nibbles([0x0]), - Nibbles::from_nibbles([0x2, 0x0, 0x0]), - ]); - // Create a prefix set with the keys that match only the second subtrie - let mut prefix_set = PrefixSetMut::from([ - // Match second subtrie - Nibbles::from_nibbles([0x1, 0x0, 0x0]), - Nibbles::from_nibbles([0x1, 0x0, 0x1, 0x0]), - ]); - prefix_set.extend(unchanged_prefix_set); - let mut prefix_set = prefix_set.freeze(); + // Step 5: Calculate root using our implementation + let root = trie.root(); - // Second subtrie should be removed and returned - let (subtries, unchanged_prefix_set) = trie.take_changed_lower_subtries(&mut prefix_set); - assert_eq!( - subtries - .into_iter() - .map(|ChangedSubtrie { index, subtrie, prefix_set }| { - (index, subtrie, prefix_set.iter().copied().collect::>()) - }) - .collect::>(), - vec![( - subtrie_2_index, - subtrie_2, - vec![ - Nibbles::from_nibbles([0x1, 0x0, 0x0]), - Nibbles::from_nibbles([0x1, 0x0, 0x1, 0x0]) - ] - )] + // Step 6: Calculate root using HashBuilder for comparison + let (hash_builder_root, _, _proof_nodes, _, _) = run_hash_builder( + [(leaf_1_full_path, account_1), (leaf_2_full_path, account_2)], + NoopAccountTrieCursor::default(), + Default::default(), + [extension_path, branch_path, leaf_1_full_path, leaf_2_full_path], ); - assert_eq!(unchanged_prefix_set, unchanged_prefix_set); - assert!(trie.lower_subtries[subtrie_2_index].is_none()); - // First subtrie should remain unchanged - assert_eq!(trie.lower_subtries[subtrie_1_index], Some(subtrie_1)); + // Step 7: Verify the roots match + assert_eq!(root, hash_builder_root); + + // Verify hashes were computed + let leaf_1_subtrie = trie.lower_subtries[leaf_1_subtrie_idx].as_revealed_ref().unwrap(); + let leaf_2_subtrie = trie.lower_subtries[leaf_2_subtrie_idx].as_revealed_ref().unwrap(); + assert!(trie.upper_subtrie.nodes.get(&extension_path).unwrap().hash().is_some()); + assert!(trie.upper_subtrie.nodes.get(&branch_path).unwrap().hash().is_some()); + assert!(leaf_1_subtrie.nodes.get(&leaf_1_path).unwrap().hash().is_some()); + assert!(leaf_2_subtrie.nodes.get(&leaf_2_path).unwrap().hash().is_some()); } #[test] - fn test_get_changed_subtries_all() { - // Create a trie with three subtries - let mut trie = ParallelSparseTrie::default(); - let subtrie_1 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0]))); - let subtrie_1_index = path_subtrie_index_unchecked(&subtrie_1.path); - let subtrie_2 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x1, 0x0]))); - let subtrie_2_index = path_subtrie_index_unchecked(&subtrie_2.path); - let subtrie_3 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x3, 0x0]))); - let subtrie_3_index = path_subtrie_index_unchecked(&subtrie_3.path); - - // Add subtries at specific positions - trie.lower_subtries[subtrie_1_index] = Some(subtrie_1.clone()); - trie.lower_subtries[subtrie_2_index] = Some(subtrie_2.clone()); - trie.lower_subtries[subtrie_3_index] = Some(subtrie_3.clone()); + fn sparse_trie_empty_update_one() { + let ctx = ParallelSparseTrieTestContext; + + let key = Nibbles::unpack(B256::with_last_byte(42)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; - // Create a prefix set that matches any key - let mut prefix_set = PrefixSetMut::all().freeze(); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + [(key, value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key], + ); - // All subtries should be removed and returned - let (subtries, unchanged_prefix_set) = trie.take_changed_lower_subtries(&mut prefix_set); - assert_eq!( - subtries - .into_iter() - .map(|ChangedSubtrie { index, subtrie, prefix_set }| { - (index, subtrie, prefix_set.all()) - }) - .collect::>(), - vec![ - (subtrie_1_index, subtrie_1, true), - (subtrie_2_index, subtrie_2, true), - (subtrie_3_index, subtrie_3, true) - ] + let mut sparse = ParallelSparseTrie::default().with_updates(true); + ctx.update_leaves(&mut sparse, [(key, value_encoded())]); + ctx.assert_with_hash_builder( + &mut sparse, + hash_builder_root, + hash_builder_updates, + hash_builder_proof_nodes, ); - assert_eq!(unchanged_prefix_set, PrefixSetMut::all()); - - assert!(trie.lower_subtries.iter().all(Option::is_none)); } #[test] - fn test_sparse_subtrie_type() { - assert_eq!(SparseSubtrieType::from_path(&Nibbles::new()), SparseSubtrieType::Upper); - assert_eq!( - SparseSubtrieType::from_path(&Nibbles::from_nibbles([0])), - SparseSubtrieType::Upper - ); - assert_eq!( - SparseSubtrieType::from_path(&Nibbles::from_nibbles([15])), - SparseSubtrieType::Upper - ); - assert_eq!( - SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 0])), - SparseSubtrieType::Lower(0) - ); - assert_eq!( - SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 0, 0])), - SparseSubtrieType::Lower(0) - ); - assert_eq!( - SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 1])), - SparseSubtrieType::Lower(1) - ); - assert_eq!( - SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 1, 0])), - SparseSubtrieType::Lower(1) - ); - assert_eq!( - SparseSubtrieType::from_path(&Nibbles::from_nibbles([0, 15])), - SparseSubtrieType::Lower(15) - ); - assert_eq!( - SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 0])), - SparseSubtrieType::Lower(240) - ); - assert_eq!( - SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 1])), - SparseSubtrieType::Lower(241) - ); - assert_eq!( - SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 15])), - SparseSubtrieType::Lower(255) + fn sparse_trie_empty_update_multiple_lower_nibbles() { + let ctx = ParallelSparseTrieTestContext; + + let paths = (0..=16).map(|b| Nibbles::unpack(B256::with_last_byte(b))).collect::>(); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; + + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + paths.iter().copied().zip(std::iter::repeat_with(value)), + NoopAccountTrieCursor::default(), + Default::default(), + paths.clone(), + ); + + let mut sparse = ParallelSparseTrie::default().with_updates(true); + ctx.update_leaves( + &mut sparse, + paths.into_iter().zip(std::iter::repeat_with(value_encoded)), ); - assert_eq!( - SparseSubtrieType::from_path(&Nibbles::from_nibbles([15, 15, 15])), - SparseSubtrieType::Lower(255) + + ctx.assert_with_hash_builder( + &mut sparse, + hash_builder_root, + hash_builder_updates, + hash_builder_proof_nodes, ); } #[test] - fn test_reveal_node_leaves() { - let mut trie = ParallelSparseTrie::default(); - - // Reveal leaf in the upper trie - { - let path = Nibbles::from_nibbles([0x1]); - let node = create_leaf_node([0x2, 0x3], 42); - let masks = TrieMasks::none(); - - trie.reveal_node(path, node, masks).unwrap(); + fn sparse_trie_empty_update_multiple_upper_nibbles() { + let paths = (239..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; - assert_matches!( - trie.upper_subtrie.nodes.get(&path), - Some(SparseNode::Leaf { key, hash: None }) - if key == &Nibbles::from_nibbles([0x2, 0x3]) + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + paths.iter().copied().zip(std::iter::repeat_with(value)), + NoopAccountTrieCursor::default(), + Default::default(), + paths.clone(), ); - let full_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); - assert_eq!( - trie.upper_subtrie.inner.values.get(&full_path), - Some(&encode_account_value(42)) - ); + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::default().with_updates(true); + for path in &paths { + sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); } + let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - // Reveal leaf in a lower trie - { - let path = Nibbles::from_nibbles([0x1, 0x2]); - let node = create_leaf_node([0x3, 0x4], 42); - let masks = TrieMasks::none(); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_parallel_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } - trie.reveal_node(path, node, masks).unwrap(); + #[test] + fn sparse_trie_empty_update_multiple() { + let ctx = ParallelSparseTrieTestContext; - // Check that the lower subtrie was created - let idx = path_subtrie_index_unchecked(&path); - assert!(trie.lower_subtries[idx].is_some()); + let paths = (0..=255) + .map(|b| { + Nibbles::unpack(if b % 2 == 0 { + B256::repeat_byte(b) + } else { + B256::with_last_byte(b) + }) + }) + .collect::>(); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; - let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); - assert_matches!( - lower_subtrie.nodes.get(&path), - Some(SparseNode::Leaf { key, hash: None }) - if key == &Nibbles::from_nibbles([0x3, 0x4]) + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + paths.iter().sorted_unstable().copied().zip(std::iter::repeat_with(value)), + NoopAccountTrieCursor::default(), + Default::default(), + paths.clone(), ); - } + + let mut sparse = ParallelSparseTrie::default().with_updates(true); + ctx.update_leaves( + &mut sparse, + paths.iter().copied().zip(std::iter::repeat_with(value_encoded)), + ); + ctx.assert_with_hash_builder( + &mut sparse, + hash_builder_root, + hash_builder_updates, + hash_builder_proof_nodes, + ); } #[test] - fn test_reveal_node_extension_all_upper() { - let mut trie = ParallelSparseTrie::default(); - let path = Nibbles::new(); - let child_hash = B256::repeat_byte(0xab); - let node = create_extension_node([0x1], child_hash); - let masks = TrieMasks::none(); + fn sparse_trie_empty_update_repeated() { + let ctx = ParallelSparseTrieTestContext; + + let paths = (0..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); + let old_value = Account { nonce: 1, ..Default::default() }; + let old_value_encoded = { + let mut account_rlp = Vec::new(); + old_value.into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; + let new_value = Account { nonce: 2, ..Default::default() }; + let new_value_encoded = { + let mut account_rlp = Vec::new(); + new_value.into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; - trie.reveal_node(path, node, masks).unwrap(); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + paths.iter().copied().zip(std::iter::repeat_with(|| old_value)), + NoopAccountTrieCursor::default(), + Default::default(), + paths.clone(), + ); - assert_matches!( - trie.upper_subtrie.nodes.get(&path), - Some(SparseNode::Extension { key, hash: None, .. }) - if key == &Nibbles::from_nibbles([0x1]) + let mut sparse = ParallelSparseTrie::default().with_updates(true); + ctx.update_leaves( + &mut sparse, + paths.iter().copied().zip(std::iter::repeat(old_value_encoded)), + ); + ctx.assert_with_hash_builder( + &mut sparse, + hash_builder_root, + hash_builder_updates, + hash_builder_proof_nodes, ); - // Child path should be in upper trie - let child_path = Nibbles::from_nibbles([0x1]); - assert_eq!(trie.upper_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + paths.iter().copied().zip(std::iter::repeat(new_value)), + NoopAccountTrieCursor::default(), + Default::default(), + paths.clone(), + ); + + ctx.update_leaves( + &mut sparse, + paths.iter().copied().zip(std::iter::repeat(new_value_encoded)), + ); + ctx.assert_with_hash_builder( + &mut sparse, + hash_builder_root, + hash_builder_updates, + hash_builder_proof_nodes, + ); } #[test] - fn test_reveal_node_extension_cross_level() { - let mut trie = ParallelSparseTrie::default(); - let path = Nibbles::new(); - let child_hash = B256::repeat_byte(0xcd); - let node = create_extension_node([0x1, 0x2, 0x3], child_hash); - let masks = TrieMasks::none(); + fn sparse_trie_remove_leaf() { + let ctx = ParallelSparseTrieTestContext; + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::default(); - trie.reveal_node(path, node, masks).unwrap(); + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); - // Extension node should be in upper trie - assert_matches!( - trie.upper_subtrie.nodes.get(&path), - Some(SparseNode::Extension { key, hash: None, .. }) - if key == &Nibbles::from_nibbles([0x1, 0x2, 0x3]) + ctx.update_leaves( + &mut sparse, + [ + (Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()), + (Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()), + (Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()), + (Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()), + (Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()), + (Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value), + ], ); - // Child path (0x1, 0x2, 0x3) should be in lower trie - let child_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); - let idx = path_subtrie_index_unchecked(&child_path); - assert!(trie.lower_subtries[idx].is_some()); + // Extension (Key = 5) + // └── Branch (Mask = 1011) + // ├── 0 -> Extension (Key = 23) + // │ └── Branch (Mask = 0101) + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) + // │ └── 3 -> Leaf (Key = 3, Path = 50233) + // ├── 2 -> Leaf (Key = 013, Path = 52013) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1101.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(0b1010.into()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x1, 0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); - let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); - assert_eq!(lower_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); - } + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), &provider).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Extension (Key = 23) + // │ └── Branch (Mask = 0101) + // │ ├── 1 -> Leaf (Key = 0231, Path = 50231) + // │ └── 3 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(0b1010.into()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::default()) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); - #[test] - fn test_reveal_node_extension_cross_level_boundary() { - let mut trie = ParallelSparseTrie::default(); - let path = Nibbles::from_nibbles([0x1]); - let child_hash = B256::repeat_byte(0xcd); - let node = create_extension_node([0x2], child_hash); - let masks = TrieMasks::none(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), &provider).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); - trie.reveal_node(path, node, masks).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), &provider).unwrap(); - // Extension node should be in upper trie - assert_matches!( - trie.upper_subtrie.nodes.get(&path), - Some(SparseNode::Extension { key, hash: None, .. }) - if key == &Nibbles::from_nibbles([0x2]) + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_ext(Nibbles::from_nibbles([0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) ); - // Child path (0x1, 0x2) should be in lower trie - let child_path = Nibbles::from_nibbles([0x1, 0x2]); - let idx = path_subtrie_index_unchecked(&child_path); - assert!(trie.lower_subtries[idx].is_some()); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), &provider).unwrap(); - let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); - assert_eq!(lower_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Leaf (Key = 3302, Path = 53302) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x0, 0x2])) + ), + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), &provider).unwrap(); + + // Leaf (Key = 53302) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([( + Nibbles::default(), + SparseNode::new_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])) + ),]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), &provider).unwrap(); + + // Empty + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([(Nibbles::default(), SparseNode::Empty)]) + ); } #[test] - fn test_reveal_node_branch_all_upper() { - let mut trie = ParallelSparseTrie::default(); - let path = Nibbles::new(); - let child_hashes = [ - RlpNode::word_rlp(&B256::repeat_byte(0x11)), - RlpNode::word_rlp(&B256::repeat_byte(0x22)), - ]; - let node = create_branch_node_with_children(&[0x0, 0x5], child_hashes.clone()); - let masks = TrieMasks::none(); - - trie.reveal_node(path, node, masks).unwrap(); + fn sparse_trie_remove_leaf_blinded() { + let leaf = LeafNode::new( + Nibbles::default(), + alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), + ); + let branch = TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )); + + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::from_root( + branch.clone(), + TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, + false, + ) + .unwrap(); + + // Reveal a branch node and one of its children + // + // Branch (Mask = 11) + // ├── 0 -> Hash (Path = 0) + // └── 1 -> Leaf (Path = 1) + sparse + .reveal_nodes(vec![ + RevealedSparseNode { + path: Nibbles::default(), + node: branch, + masks: TrieMasks { hash_mask: None, tree_mask: Some(TrieMask::new(0b01)) }, + }, + RevealedSparseNode { + path: Nibbles::from_nibbles([0x1]), + node: TrieNode::Leaf(leaf), + masks: TrieMasks::none(), + }, + ]) + .unwrap(); - // Branch node should be in upper trie + // Removing a blinded leaf should result in an error assert_matches!( - trie.upper_subtrie.nodes.get(&path), - Some(SparseNode::Branch { state_mask, hash: None, .. }) - if *state_mask == 0b0000000000100001.into() + sparse.remove_leaf(&Nibbles::from_nibbles([0x0]), &provider).map_err(|e| e.into_kind()), + Err(SparseTrieErrorKind::BlindedNode { path, hash }) if path == Nibbles::from_nibbles([0x0]) && hash == B256::repeat_byte(1) ); + } - // Children should be in upper trie (paths of length 2) - let child_path_0 = Nibbles::from_nibbles([0x0]); - let child_path_5 = Nibbles::from_nibbles([0x5]); - assert_eq!( - trie.upper_subtrie.nodes.get(&child_path_0), - Some(&SparseNode::Hash(child_hashes[0].as_hash().unwrap())) - ); - assert_eq!( - trie.upper_subtrie.nodes.get(&child_path_5), - Some(&SparseNode::Hash(child_hashes[1].as_hash().unwrap())) + #[test] + fn sparse_trie_remove_leaf_non_existent() { + let leaf = LeafNode::new( + Nibbles::default(), + alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), ); + let branch = TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )); + + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::from_root( + branch.clone(), + TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, + false, + ) + .unwrap(); + + // Reveal a branch node and one of its children + // + // Branch (Mask = 11) + // ├── 0 -> Hash (Path = 0) + // └── 1 -> Leaf (Path = 1) + sparse + .reveal_nodes(vec![ + RevealedSparseNode { + path: Nibbles::default(), + node: branch, + masks: TrieMasks { hash_mask: None, tree_mask: Some(TrieMask::new(0b01)) }, + }, + RevealedSparseNode { + path: Nibbles::from_nibbles([0x1]), + node: TrieNode::Leaf(leaf), + masks: TrieMasks::none(), + }, + ]) + .unwrap(); + + // Removing a non-existent leaf should be a noop + let sparse_old = sparse.clone(); + assert_matches!(sparse.remove_leaf(&Nibbles::from_nibbles([0x2]), &provider), Ok(())); + assert_eq!(sparse, sparse_old); } #[test] - fn test_reveal_node_branch_cross_level() { - let mut trie = ParallelSparseTrie::default(); - let path = Nibbles::from_nibbles([0x1]); // Exactly 1 nibbles - boundary case - let child_hashes = [ - RlpNode::word_rlp(&B256::repeat_byte(0x33)), - RlpNode::word_rlp(&B256::repeat_byte(0x44)), - RlpNode::word_rlp(&B256::repeat_byte(0x55)), - ]; - let node = create_branch_node_with_children(&[0x0, 0x7, 0xf], child_hashes.clone()); - let masks = TrieMasks::none(); + fn sparse_trie_fuzz() { + // Having only the first 3 nibbles set, we narrow down the range of keys + // to 4096 different hashes. It allows us to generate collisions more likely + // to test the sparse trie updates. + const KEY_NIBBLES_LEN: usize = 3; + + fn test(updates: Vec<(BTreeMap, BTreeSet)>) { + { + let mut state = BTreeMap::default(); + let default_provider = DefaultTrieNodeProvider; + let provider_factory = create_test_provider_factory(); + let mut sparse = ParallelSparseTrie::default().with_updates(true); + + for (update, keys_to_delete) in updates { + // Insert state updates into the sparse trie and calculate the root + for (key, account) in update.clone() { + let account = account.into_trie_account(EMPTY_ROOT_HASH); + let mut account_rlp = Vec::new(); + account.encode(&mut account_rlp); + sparse.update_leaf(key, account_rlp, &default_provider).unwrap(); + } + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); + + // Insert state updates into the hash builder and calculate the root + state.extend(update); + let provider = provider_factory.provider().unwrap(); + let trie_cursor = DatabaseTrieCursorFactory::new(provider.tx_ref()); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + state.clone(), + trie_cursor.account_trie_cursor().unwrap(), + Default::default(), + state.keys().copied().collect::>(), + ); + + // Write trie updates to the database + let provider_rw = provider_factory.provider_rw().unwrap(); + provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.commit().unwrap(); + + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + BTreeMap::from_iter(sparse_updates.updated_nodes), + BTreeMap::from_iter(hash_builder_updates.account_nodes) + ); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_parallel_sparse_trie_proof_nodes( + &updated_sparse, + hash_builder_proof_nodes, + ); + + // Delete some keys from both the hash builder and the sparse trie and check + // that the sparse trie root still matches the hash builder root + for key in &keys_to_delete { + state.remove(key).unwrap(); + sparse.remove_leaf(key, &default_provider).unwrap(); + } + + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); + + let provider = provider_factory.provider().unwrap(); + let trie_cursor = DatabaseTrieCursorFactory::new(provider.tx_ref()); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + state.clone(), + trie_cursor.account_trie_cursor().unwrap(), + keys_to_delete + .iter() + .map(|nibbles| B256::from_slice(&nibbles.pack())) + .collect(), + state.keys().copied().collect::>(), + ); + + // Write trie updates to the database + let provider_rw = provider_factory.provider_rw().unwrap(); + provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.commit().unwrap(); + + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + BTreeMap::from_iter(sparse_updates.updated_nodes), + BTreeMap::from_iter(hash_builder_updates.account_nodes) + ); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_parallel_sparse_trie_proof_nodes( + &updated_sparse, + hash_builder_proof_nodes, + ); + } + } + } + + fn transform_updates( + updates: Vec>, + mut rng: impl rand::Rng, + ) -> Vec<(BTreeMap, BTreeSet)> { + let mut keys = BTreeSet::new(); + updates + .into_iter() + .map(|update| { + keys.extend(update.keys().copied()); + + let keys_to_delete_len = update.len() / 2; + let keys_to_delete = (0..keys_to_delete_len) + .map(|_| { + let key = + *rand::seq::IteratorRandom::choose(keys.iter(), &mut rng).unwrap(); + keys.take(&key).unwrap() + }) + .collect(); + + (update, keys_to_delete) + }) + .collect::>() + } + + proptest!(ProptestConfig::with_cases(10), |( + updates in proptest::collection::vec( + proptest::collection::btree_map( + any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles_right), + arb::(), + 1..50, + ), + 1..50, + ).prop_perturb(transform_updates) + )| { + test(updates) + }); + } - trie.reveal_node(path, node, masks).unwrap(); + #[test] + fn sparse_trie_fuzz_vs_serial() { + // Having only the first 3 nibbles set, we narrow down the range of keys + // to 4096 different hashes. It allows us to generate collisions more likely + // to test the sparse trie updates. + const KEY_NIBBLES_LEN: usize = 3; + + fn test(updates: Vec<(BTreeMap, BTreeSet)>) { + let default_provider = DefaultTrieNodeProvider; + let mut serial = SerialSparseTrie::default().with_updates(true); + let mut parallel = ParallelSparseTrie::default().with_updates(true); + + for (update, keys_to_delete) in updates { + // Perform leaf updates on both tries + for (key, account) in update.clone() { + let account = account.into_trie_account(EMPTY_ROOT_HASH); + let mut account_rlp = Vec::new(); + account.encode(&mut account_rlp); + serial.update_leaf(key, account_rlp.clone(), &default_provider).unwrap(); + parallel.update_leaf(key, account_rlp, &default_provider).unwrap(); + } - // Branch node should be in upper trie - assert_matches!( - trie.upper_subtrie.nodes.get(&path), - Some(SparseNode::Branch { state_mask, hash: None, .. }) - if *state_mask == 0b1000000010000001.into() - ); + // Calculate roots and assert their equality + let serial_root = serial.root(); + let parallel_root = parallel.root(); + assert_eq!(parallel_root, serial_root); + + // Assert that both tries produce the same updates + let serial_updates = serial.take_updates(); + let parallel_updates = parallel.take_updates(); + pretty_assertions::assert_eq!( + BTreeMap::from_iter(parallel_updates.updated_nodes), + BTreeMap::from_iter(serial_updates.updated_nodes), + ); + pretty_assertions::assert_eq!( + BTreeSet::from_iter(parallel_updates.removed_nodes), + BTreeSet::from_iter(serial_updates.removed_nodes), + ); - // All children should be in lower tries since they have paths of length 3 - let child_paths = [ - Nibbles::from_nibbles([0x1, 0x0]), - Nibbles::from_nibbles([0x1, 0x7]), - Nibbles::from_nibbles([0x1, 0xf]), - ]; + // Perform leaf removals on both tries + for key in &keys_to_delete { + parallel.remove_leaf(key, &default_provider).unwrap(); + serial.remove_leaf(key, &default_provider).unwrap(); + } - for (i, child_path) in child_paths.iter().enumerate() { - let idx = path_subtrie_index_unchecked(child_path); - let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); - assert_eq!( - lower_subtrie.nodes.get(child_path), - Some(&SparseNode::Hash(child_hashes[i].as_hash().unwrap())), - ); + // Calculate roots and assert their equality + let serial_root = serial.root(); + let parallel_root = parallel.root(); + assert_eq!(parallel_root, serial_root); + + // Assert that both tries produce the same updates + let serial_updates = serial.take_updates(); + let parallel_updates = parallel.take_updates(); + pretty_assertions::assert_eq!( + BTreeMap::from_iter(parallel_updates.updated_nodes), + BTreeMap::from_iter(serial_updates.updated_nodes), + ); + pretty_assertions::assert_eq!( + BTreeSet::from_iter(parallel_updates.removed_nodes), + BTreeSet::from_iter(serial_updates.removed_nodes), + ); + } } - } - #[test] - fn test_update_subtrie_hashes() { - // Create a trie with three subtries - let mut trie = ParallelSparseTrie::default(); - let mut subtrie_1 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0]))); - let subtrie_1_index = path_subtrie_index_unchecked(&subtrie_1.path); - let mut subtrie_2 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x1, 0x0]))); - let subtrie_2_index = path_subtrie_index_unchecked(&subtrie_2.path); - let mut subtrie_3 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x3, 0x0]))); - let subtrie_3_index = path_subtrie_index_unchecked(&subtrie_3.path); + fn transform_updates( + updates: Vec>, + mut rng: impl rand::Rng, + ) -> Vec<(BTreeMap, BTreeSet)> { + let mut keys = BTreeSet::new(); + updates + .into_iter() + .map(|update| { + keys.extend(update.keys().copied()); + + let keys_to_delete_len = update.len() / 2; + let keys_to_delete = (0..keys_to_delete_len) + .map(|_| { + let key = + *rand::seq::IteratorRandom::choose(keys.iter(), &mut rng).unwrap(); + keys.take(&key).unwrap() + }) + .collect(); + + (update, keys_to_delete) + }) + .collect::>() + } - // Reveal dummy leaf nodes that form an incorrect trie structure but enough to test the - // method - let leaf_1_full_path = Nibbles::from_nibbles([0; 64]); - let leaf_1_path = leaf_1_full_path.slice(..2); - let leaf_1_key = leaf_1_full_path.slice(2..); - let leaf_2_full_path = Nibbles::from_nibbles([vec![1, 0], vec![0; 62]].concat()); - let leaf_2_path = leaf_2_full_path.slice(..2); - let leaf_2_key = leaf_2_full_path.slice(2..); - let leaf_3_full_path = Nibbles::from_nibbles([vec![3, 0], vec![0; 62]].concat()); - let leaf_3_path = leaf_3_full_path.slice(..2); - let leaf_3_key = leaf_3_full_path.slice(2..); - let leaf_1 = create_leaf_node(leaf_1_key.to_vec(), 1); - let leaf_2 = create_leaf_node(leaf_2_key.to_vec(), 2); - let leaf_3 = create_leaf_node(leaf_3_key.to_vec(), 3); - subtrie_1.reveal_node(leaf_1_path, &leaf_1, TrieMasks::none()).unwrap(); - subtrie_2.reveal_node(leaf_2_path, &leaf_2, TrieMasks::none()).unwrap(); - subtrie_3.reveal_node(leaf_3_path, &leaf_3, TrieMasks::none()).unwrap(); + proptest!(ProptestConfig::with_cases(10), |( + updates in proptest::collection::vec( + proptest::collection::btree_map( + any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles_right), + arb::(), + 1..50, + ), + 1..50, + ).prop_perturb(transform_updates) + )| { + test(updates) + }); + } - // Add subtries at specific positions - trie.lower_subtries[subtrie_1_index] = Some(subtrie_1); - trie.lower_subtries[subtrie_2_index] = Some(subtrie_2); - trie.lower_subtries[subtrie_3_index] = Some(subtrie_3); + #[test] + fn sparse_trie_two_leaves_at_lower_roots() { + let provider = DefaultTrieNodeProvider; + let mut trie = ParallelSparseTrie::default().with_updates(true); + let key_50 = Nibbles::unpack(hex!( + "0x5000000000000000000000000000000000000000000000000000000000000000" + )); + let key_51 = Nibbles::unpack(hex!( + "0x5100000000000000000000000000000000000000000000000000000000000000" + )); + + let account = Account::default().into_trie_account(EMPTY_ROOT_HASH); + let mut account_rlp = Vec::new(); + account.encode(&mut account_rlp); - let unchanged_prefix_set = PrefixSetMut::from([ - Nibbles::from_nibbles([0x0]), - Nibbles::from_nibbles([0x2, 0x0, 0x0]), - ]); - // Create a prefix set with the keys that match only the second subtrie - let mut prefix_set = PrefixSetMut::from([ - // Match second subtrie - Nibbles::from_nibbles([0x1, 0x0, 0x0]), - Nibbles::from_nibbles([0x1, 0x0, 0x1, 0x0]), - ]); - prefix_set.extend(unchanged_prefix_set.clone()); - trie.prefix_set = prefix_set; + // Add a leaf and calculate the root. + trie.update_leaf(key_50, account_rlp.clone(), &provider).unwrap(); + trie.root(); - // Update subtrie hashes - trie.update_lower_subtrie_hashes(); + // Add a second leaf and assert that the root is the expected value. + trie.update_leaf(key_51, account_rlp.clone(), &provider).unwrap(); - // Check that the prefix set was updated - assert_eq!(trie.prefix_set, unchanged_prefix_set); - // Check that subtries were returned back to the array - assert!(trie.lower_subtries[subtrie_1_index].is_some()); - assert!(trie.lower_subtries[subtrie_2_index].is_some()); - assert!(trie.lower_subtries[subtrie_3_index].is_some()); + let expected_root = + hex!("0xdaf0ef9f91a2f179bb74501209effdb5301db1697bcab041eca2234b126e25de"); + let root = trie.root(); + assert_eq!(root, expected_root); + assert_eq!(SparseTrieUpdates::default(), trie.take_updates()); } + /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has + /// only nodes 0x00 and 0x01, and we have proofs for them. Node B is new and inserted in the + /// sparse trie first. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Insert leaf 0x01 into the sparse trie. + /// 3. Reveal the hash builder proof to leaf 0x02 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x02 didn't have the leaf 0x01 at the corresponding + /// nibble of the branch node, so we need to adjust the branch node instead of fully + /// replacing it. #[test] - fn test_subtrie_update_hashes() { - let mut subtrie = - Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0])).with_updates(true)); + fn sparse_trie_reveal_node_1() { + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x02])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; - // Create leaf nodes with paths 0x0...0, 0x00001...0, 0x0010...0 - let leaf_1_full_path = Nibbles::from_nibbles([0; 64]); - let leaf_1_path = leaf_1_full_path.slice(..5); - let leaf_1_key = leaf_1_full_path.slice(5..); - let leaf_2_full_path = Nibbles::from_nibbles([vec![0, 0, 0, 0, 1], vec![0; 59]].concat()); - let leaf_2_path = leaf_2_full_path.slice(..5); - let leaf_2_key = leaf_2_full_path.slice(5..); - let leaf_3_full_path = Nibbles::from_nibbles([vec![0, 0, 1], vec![0; 61]].concat()); - let leaf_3_path = leaf_3_full_path.slice(..3); - let leaf_3_key = leaf_3_full_path.slice(3..); + // Generate the proof for the root node and initialize the sparse trie with it + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [Nibbles::default()], + ); - let account_1 = create_account(1); - let account_2 = create_account(2); - let account_3 = create_account(3); - let leaf_1 = create_leaf_node(leaf_1_key.to_vec(), account_1.nonce); - let leaf_2 = create_leaf_node(leaf_2_key.to_vec(), account_2.nonce); - let leaf_3 = create_leaf_node(leaf_3_key.to_vec(), account_3.nonce); + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::from_root( + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieMasks { + hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), + tree_mask: branch_node_tree_masks.get(&Nibbles::default()).copied(), + }, + false, + ) + .unwrap(); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key1()], + ); + let revealed_nodes: Vec = hash_builder_proof_nodes + .nodes_sorted() + .into_iter() + .map(|(path, node)| { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + RevealedSparseNode { + path, + node: TrieNode::decode(&mut &node[..]).unwrap(), + masks: TrieMasks { hash_mask, tree_mask }, + } + }) + .collect(); + sparse.reveal_nodes(revealed_nodes).unwrap(); - // Create bottom branch node - let branch_1_path = Nibbles::from_nibbles([0, 0, 0, 0]); - let branch_1 = create_branch_node_with_children( - &[0, 1], - vec![ - RlpNode::from_rlp(&alloy_rlp::encode(&leaf_1)), - RlpNode::from_rlp(&alloy_rlp::encode(&leaf_2)), - ], + // Check that the branch node exists with only two nibbles set + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b101.into())) ); - // Create an extension node - let extension_path = Nibbles::from_nibbles([0, 0, 0]); - let extension_key = Nibbles::from_nibbles([0]); - let extension = create_extension_node( - extension_key.to_vec(), - RlpNode::from_rlp(&alloy_rlp::encode(&branch_1)).as_hash().unwrap(), - ); + // Insert the leaf for the second key + sparse.update_leaf(key2(), value_encoded(), &provider).unwrap(); - // Create top branch node - let branch_2_path = Nibbles::from_nibbles([0, 0]); - let branch_2 = create_branch_node_with_children( - &[0, 1], - vec![ - RlpNode::from_rlp(&alloy_rlp::encode(&extension)), - RlpNode::from_rlp(&alloy_rlp::encode(&leaf_3)), - ], + // Check that the branch node was updated and another nibble was set + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) ); - // Reveal nodes - subtrie.reveal_node(branch_2_path, &branch_2, TrieMasks::none()).unwrap(); - subtrie.reveal_node(leaf_1_path, &leaf_1, TrieMasks::none()).unwrap(); - subtrie.reveal_node(extension_path, &extension, TrieMasks::none()).unwrap(); - subtrie.reveal_node(branch_1_path, &branch_1, TrieMasks::none()).unwrap(); - subtrie.reveal_node(leaf_2_path, &leaf_2, TrieMasks::none()).unwrap(); - subtrie.reveal_node(leaf_3_path, &leaf_3, TrieMasks::none()).unwrap(); + // Generate the proof for the third key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key3()], + ); + let revealed_nodes: Vec = hash_builder_proof_nodes + .nodes_sorted() + .into_iter() + .map(|(path, node)| { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + RevealedSparseNode { + path, + node: TrieNode::decode(&mut &node[..]).unwrap(), + masks: TrieMasks { hash_mask, tree_mask }, + } + }) + .collect(); + sparse.reveal_nodes(revealed_nodes).unwrap(); - // Run hash builder for two leaf nodes - let (_, _, proof_nodes, _, _) = run_hash_builder( - [ - (leaf_1_full_path, account_1), - (leaf_2_full_path, account_2), - (leaf_3_full_path, account_3), - ], + // Check that nothing changed in the branch node + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) + ); + + // Generate the nodes for the full trie with all three key using the hash builder, and + // compare them to the sparse trie + let (_, _, hash_builder_proof_nodes, _, _) = run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], NoopAccountTrieCursor::default(), Default::default(), - [ - branch_1_path, - extension_path, - branch_2_path, - leaf_1_full_path, - leaf_2_full_path, - leaf_3_full_path, - ], + [key1(), key2(), key3()], ); - // Update hashes for the subtrie - subtrie.update_hashes( - &mut PrefixSetMut::from([leaf_1_full_path, leaf_2_full_path, leaf_3_full_path]) - .freeze(), - ); + assert_eq_parallel_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } - // Compare hashes between hash builder and subtrie + /// We have three leaves: 0x0000, 0x0101, and 0x0102. Hash builder trie has all nodes, and we + /// have proofs for them. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Remove leaf 0x00 from the sparse trie (that will remove the branch node and create an + /// extension node with the key 0x0000). + /// 3. Reveal the hash builder proof to leaf 0x0101 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x0101 had a branch node in the path, but we turned it + /// into an extension node, so it should ignore this node. + #[test] + fn sparse_trie_reveal_node_2() { + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x02])); + let value = || Account::default(); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [Nibbles::default()], + ); - let hash_builder_branch_1_hash = - RlpNode::from_rlp(proof_nodes.get(&branch_1_path).unwrap().as_ref()).as_hash().unwrap(); - let subtrie_branch_1_hash = subtrie.nodes.get(&branch_1_path).unwrap().hash().unwrap(); - assert_eq!(hash_builder_branch_1_hash, subtrie_branch_1_hash); + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::from_root( + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieMasks { + hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), + tree_mask: branch_node_tree_masks.get(&Nibbles::default()).copied(), + }, + false, + ) + .unwrap(); + + // Generate the proof for the children of the root branch node and reveal it in the sparse + // trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key1(), Nibbles::from_nibbles_unchecked([0x01])], + ); + let revealed_nodes: Vec = hash_builder_proof_nodes + .nodes_sorted() + .into_iter() + .map(|(path, node)| { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + RevealedSparseNode { + path, + node: TrieNode::decode(&mut &node[..]).unwrap(), + masks: TrieMasks { hash_mask, tree_mask }, + } + }) + .collect(); + sparse.reveal_nodes(revealed_nodes).unwrap(); - let hash_builder_extension_hash = - RlpNode::from_rlp(proof_nodes.get(&extension_path).unwrap().as_ref()) - .as_hash() - .unwrap(); - let subtrie_extension_hash = subtrie.nodes.get(&extension_path).unwrap().hash().unwrap(); - assert_eq!(hash_builder_extension_hash, subtrie_extension_hash); + // Check that the branch node exists + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b11.into())) + ); - let hash_builder_branch_2_hash = - RlpNode::from_rlp(proof_nodes.get(&branch_2_path).unwrap().as_ref()).as_hash().unwrap(); - let subtrie_branch_2_hash = subtrie.nodes.get(&branch_2_path).unwrap().hash().unwrap(); - assert_eq!(hash_builder_branch_2_hash, subtrie_branch_2_hash); + // Remove the leaf for the first key + sparse.remove_leaf(&key1(), &provider).unwrap(); - let subtrie_leaf_1_hash = subtrie.nodes.get(&leaf_1_path).unwrap().hash().unwrap(); - let hash_builder_leaf_1_hash = - RlpNode::from_rlp(proof_nodes.get(&leaf_1_path).unwrap().as_ref()).as_hash().unwrap(); - assert_eq!(hash_builder_leaf_1_hash, subtrie_leaf_1_hash); + // Check that the branch node was turned into an extension node + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); - let hash_builder_leaf_2_hash = - RlpNode::from_rlp(proof_nodes.get(&leaf_2_path).unwrap().as_ref()).as_hash().unwrap(); - let subtrie_leaf_2_hash = subtrie.nodes.get(&leaf_2_path).unwrap().hash().unwrap(); - assert_eq!(hash_builder_leaf_2_hash, subtrie_leaf_2_hash); + // Generate the proof for the third key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key2()], + ); + let revealed_nodes: Vec = hash_builder_proof_nodes + .nodes_sorted() + .into_iter() + .map(|(path, node)| { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + RevealedSparseNode { + path, + node: TrieNode::decode(&mut &node[..]).unwrap(), + masks: TrieMasks { hash_mask, tree_mask }, + } + }) + .collect(); + sparse.reveal_nodes(revealed_nodes).unwrap(); - let hash_builder_leaf_3_hash = - RlpNode::from_rlp(proof_nodes.get(&leaf_3_path).unwrap().as_ref()).as_hash().unwrap(); - let subtrie_leaf_3_hash = subtrie.nodes.get(&leaf_3_path).unwrap().hash().unwrap(); - assert_eq!(hash_builder_leaf_3_hash, subtrie_leaf_3_hash); + // Check that nothing changed in the extension node + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); } + /// We have two leaves that share the same prefix: 0x0001 and 0x0002, and a leaf with a + /// different prefix: 0x0100. Hash builder trie has only the first two leaves, and we have + /// proofs for them. + /// + /// 1. Insert the leaf 0x0100 into the sparse trie, and check that the root extension node was + /// turned into a branch node. + /// 2. Reveal the leaf 0x0001 in the sparse trie, and check that the root branch node wasn't + /// overwritten with the extension node from the proof. #[test] - fn test_remove_leaf_branch_becomes_extension() { - // - // 0x: Extension (Key = 5) - // 0x5: └── Branch (Mask = 1001) - // 0x50: ├── 0 -> Extension (Key = 23) - // 0x5023: │ └── Branch (Mask = 0101) - // 0x50231: │ ├── 1 -> Leaf - // 0x50233: │ └── 3 -> Leaf - // 0x53: └── 3 -> Leaf (Key = 7) - // - // After removing 0x53, extension+branch+extension become a single extension - // - let mut trie = new_test_trie( - [ - (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), - (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(TrieMask::new(0b1001))), - ( - Nibbles::from_nibbles([0x5, 0x0]), - SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])), - ), - ( - Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), - SparseNode::new_branch(TrieMask::new(0b0101)), - ), - ( - Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), - SparseNode::new_leaf(Nibbles::new()), - ), - ( - Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), - SparseNode::new_leaf(Nibbles::new()), - ), - ( - Nibbles::from_nibbles([0x5, 0x3]), - SparseNode::new_leaf(Nibbles::from_nibbles([0x7])), - ), - ] - .into_iter(), - ); + fn sparse_trie_reveal_node_3() { + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x01])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x02])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x00])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; - let provider = MockBlindedProvider::new(); + // Generate the proof for the root node and initialize the sparse trie with it + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [Nibbles::default()], + ); - // Remove the leaf with a full path of 0x537 - let leaf_full_path = Nibbles::from_nibbles([0x5, 0x3, 0x7]); - trie.remove_leaf(&leaf_full_path, provider).unwrap(); + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::from_root( + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieMasks { + hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), + tree_mask: branch_node_tree_masks.get(&Nibbles::default()).copied(), + }, + false, + ) + .unwrap(); - let upper_subtrie = &trie.upper_subtrie; - let lower_subtrie_50 = trie.lower_subtries[0x50].as_ref().unwrap(); - let lower_subtrie_53 = trie.lower_subtries[0x53].as_ref().unwrap(); + // Check that the root extension node exists + assert_matches!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Extension { key, hash: None, store_in_db_trie: None }) if *key == Nibbles::from_nibbles([0x00]) + ); - // Check that the leaf value was removed from the appropriate `SparseSubtrie`. - assert_matches!(lower_subtrie_53.inner.values.get(&leaf_full_path), None); + // Insert the leaf with a different prefix + sparse.update_leaf(key3(), value_encoded(), &provider).unwrap(); - // Check that the leaf node was removed, and that its parent/grandparent were modified - // appropriately. + // Check that the extension node was turned into a branch node assert_matches!( - upper_subtrie.nodes.get(&Nibbles::from_nibbles([])), - Some(SparseNode::Extension{ key, ..}) - if key == &Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]) + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) ); - assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x5])), None); - assert_matches!(lower_subtrie_50.nodes.get(&Nibbles::from_nibbles([0x5, 0x0])), None); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key1()], + ); + let revealed_nodes: Vec = hash_builder_proof_nodes + .nodes_sorted() + .into_iter() + .map(|(path, node)| { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + RevealedSparseNode { + path, + node: TrieNode::decode(&mut &node[..]).unwrap(), + masks: TrieMasks { hash_mask, tree_mask }, + } + }) + .collect(); + sparse.reveal_nodes(revealed_nodes).unwrap(); + + // Check that the branch node wasn't overwritten by the extension node in the proof assert_matches!( - lower_subtrie_50.nodes.get(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3])), - Some(SparseNode::Branch{ state_mask, .. }) - if *state_mask == 0b0101.into() + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) ); - assert_matches!(lower_subtrie_53.nodes.get(&Nibbles::from_nibbles([0x5, 0x3])), None); } #[test] - fn test_remove_leaf_branch_becomes_leaf() { + fn test_update_leaf_cross_level() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test adding leaves that demonstrate the cross-level behavior + // Based on the example: leaves 0x1234, 0x1245, 0x1334, 0x1345 // - // 0x: Branch (Mask = 0011) - // 0x0: ├── 0 -> Leaf (Key = 12) - // 0x1: └── 1 -> Leaf (Key = 34) + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0x1 } + // └── 0x1: Branch { state_mask: 0x1100 } + // └── Subtrie (0x12): pointer to lower subtrie + // └── Subtrie (0x13): pointer to lower subtrie // - // After removing 0x012, branch becomes a leaf + // Lower subtrie (0x12): + // 0x12: Branch { state_mask: 0x8 | 0x10 } + // ├── 0x123: Leaf { key: 0x4 } + // └── 0x124: Leaf { key: 0x5 } // - let mut trie = new_test_trie( + // Lower subtrie (0x13): + // 0x13: Branch { state_mask: 0x8 | 0x10 } + // ├── 0x133: Leaf { key: 0x4 } + // └── 0x134: Leaf { key: 0x5 } + + // First add leaf 0x1345 - this should create a leaf in upper trie at 0x + let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x3, 0x4, 0x5], 1); + trie.update_leaf(leaf1_path, value1.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify upper trie has a leaf at the root with key 1345 + ctx.assert_upper_subtrie(&trie) + .has_leaf(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x3, 0x4, 0x5])) + .has_value(&leaf1_path, &value1); + + // Add leaf 0x1234 - this should go first in the upper subtrie + let (leaf2_path, value2) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 2); + trie.update_leaf(leaf2_path, value2.clone(), DefaultTrieNodeProvider).unwrap(); + + // Upper trie should now have a branch at 0x1 + ctx.assert_upper_subtrie(&trie) + .has_branch(&Nibbles::from_nibbles([0x1]), &[0x2, 0x3]) + .has_no_value(&leaf1_path) + .has_no_value(&leaf2_path); + + // Add leaf 0x1245 - this should cause a branch and create the 0x12 subtrie + let (leaf3_path, value3) = ctx.create_test_leaf([0x1, 0x2, 0x4, 0x5], 3); + trie.update_leaf(leaf3_path, value3.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify lower subtrie at 0x12 exists with correct structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x3, 0x4]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &Nibbles::from_nibbles([0x4])) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x4]), &Nibbles::from_nibbles([0x5])) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3); + + // Add leaf 0x1334 - this should create another lower subtrie + let (leaf4_path, value4) = ctx.create_test_leaf([0x1, 0x3, 0x3, 0x4], 4); + trie.update_leaf(leaf4_path, value4.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify lower subtrie at 0x13 exists with correct values + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x3])) + .has_value(&leaf1_path, &value1) + .has_value(&leaf4_path, &value4); + + // Verify the 0x12 subtrie still has its values + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3); + + // Upper trie has no values + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1])) + .has_branch(&Nibbles::from_nibbles([0x1]), &[0x2, 0x3]) + .has_no_value(&leaf1_path) + .has_no_value(&leaf2_path) + .has_no_value(&leaf3_path) + .has_no_value(&leaf4_path); + } + + #[test] + fn test_update_leaf_split_at_level_boundary() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // This test demonstrates what happens when we insert leaves that cause + // splitting exactly at the upper/lower trie boundary (2 nibbles). + // + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0x12 } + // └── Subtrie (0x12): pointer to lower subtrie + // + // Lower subtrie (0x12): + // 0x12: Branch { state_mask: 0x4 | 0x8 } + // ├── 0x122: Leaf { key: 0x4 } + // └── 0x123: Leaf { key: 0x4 } + + // First insert a leaf that ends exactly at the boundary (2 nibbles) + let (first_leaf_path, first_value) = ctx.create_test_leaf([0x1, 0x2, 0x2, 0x4], 1); + + trie.update_leaf(first_leaf_path, first_value.clone(), DefaultTrieNodeProvider).unwrap(); + + // In an empty trie, the first leaf becomes the root, regardless of path length + ctx.assert_upper_subtrie(&trie) + .has_leaf(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x2, 0x4])) + .has_value(&first_leaf_path, &first_value); + + // Now insert another leaf that shares the same 2-nibble prefix + let (second_leaf_path, second_value) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 2); + + trie.update_leaf(second_leaf_path, second_value.clone(), DefaultTrieNodeProvider).unwrap(); + + // Now both leaves should be in a lower subtrie at index [0x1, 0x2] + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x2, 0x3]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x2]), &Nibbles::from_nibbles([0x4])) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &Nibbles::from_nibbles([0x4])) + .has_value(&first_leaf_path, &first_value) + .has_value(&second_leaf_path, &second_value); + + // Upper subtrie should no longer have these values + ctx.assert_upper_subtrie(&trie) + .has_no_value(&first_leaf_path) + .has_no_value(&second_leaf_path); + } + + #[test] + fn test_update_subtrie_with_multiple_leaves() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // First, add multiple leaves that will create a subtrie structure + // All leaves share the prefix [0x1, 0x2] to ensure they create a subtrie + // + // This should result in a trie with the following structure: + // 0x: Extension { key: 0x12 } + // └── Subtrie (0x12): + // 0x12: Branch { state_mask: 0x3 | 0x4 } + // ├── 0x123: Branch { state_mask: 0x4 | 0x5 } + // │ ├── 0x1234: Leaf { key: 0x } + // │ └── 0x1235: Leaf { key: 0x } + // └── 0x124: Branch { state_mask: 0x6 | 0x7 } + // ├── 0x1246: Leaf { key: 0x } + // └── 0x1247: Leaf { key: 0x } + let leaves = ctx.create_test_leaves(&[ + &[0x1, 0x2, 0x3, 0x4], + &[0x1, 0x2, 0x3, 0x5], + &[0x1, 0x2, 0x4, 0x6], + &[0x1, 0x2, 0x4, 0x7], + ]); + + // Insert all leaves + ctx.update_leaves(&mut trie, leaves.clone()); + + // Verify the upper subtrie has an extension node at the root with key 0x12 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2])); + + // Verify the subtrie structure using fluent assertions + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x3, 0x4]) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5]) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x4]), &[0x6, 0x7]) + .has_value(&leaves[0].0, &leaves[0].1) + .has_value(&leaves[1].0, &leaves[1].1) + .has_value(&leaves[2].0, &leaves[2].1) + .has_value(&leaves[3].0, &leaves[3].1); + + // Now update one of the leaves with a new value + let updated_path = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); + let (_, updated_value) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 100); + + trie.update_leaf(updated_path, updated_value.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify the subtrie structure is maintained and value is updated + // The branch structure should remain the same and all values should be present + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x3, 0x4]) + .has_value(&updated_path, &updated_value) + .has_value(&leaves[1].0, &leaves[1].1) + .has_value(&leaves[2].0, &leaves[2].1) + .has_value(&leaves[3].0, &leaves[3].1); + + // Add a new leaf that extends an existing branch + let (new_leaf_path, new_leaf_value) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x6], 200); + + trie.update_leaf(new_leaf_path, new_leaf_value.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify the branch at [0x1, 0x2, 0x3] now has an additional child + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5, 0x6]) + .has_value(&new_leaf_path, &new_leaf_value); + } + + #[test] + fn test_update_subtrie_extension_node_subtrie() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // All leaves share the prefix [0x1, 0x2] to ensure they create a subtrie + // + // This should result in a trie with the following structure + // 0x: Extension { key: 0x123 } + // └── Subtrie (0x12): + // 0x123: Branch { state_mask: 0x3 | 0x4 } + // ├── 0x123: Leaf { key: 0x4 } + // └── 0x124: Leaf { key: 0x5 } + let leaves = ctx.create_test_leaves(&[&[0x1, 0x2, 0x3, 0x4], &[0x1, 0x2, 0x3, 0x5]]); + + // Insert all leaves + ctx.update_leaves(&mut trie, leaves.clone()); + + // Verify the upper subtrie has an extension node at the root with key 0x123 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x3])); + + // Verify the lower subtrie structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5]) + .has_value(&leaves[0].0, &leaves[0].1) + .has_value(&leaves[1].0, &leaves[1].1); + } + + #[test] + fn update_subtrie_extension_node_cross_level() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // First, add multiple leaves that will create a subtrie structure + // All leaves share the prefix [0x1, 0x2] to ensure they create a branch node and subtrie + // + // This should result in a trie with the following structure + // 0x: Extension { key: 0x12 } + // └── Subtrie (0x12): + // 0x12: Branch { state_mask: 0x3 | 0x4 } + // ├── 0x123: Leaf { key: 0x4 } + // └── 0x124: Leaf { key: 0x5 } + let leaves = ctx.create_test_leaves(&[&[0x1, 0x2, 0x3, 0x4], &[0x1, 0x2, 0x4, 0x5]]); + + // Insert all leaves + ctx.update_leaves(&mut trie, leaves.clone()); + + // Verify the upper subtrie has an extension node at the root with key 0x12 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2])); + + // Verify the lower subtrie structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x3, 0x4]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &Nibbles::from_nibbles([0x4])) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x4]), &Nibbles::from_nibbles([0x5])) + .has_value(&leaves[0].0, &leaves[0].1) + .has_value(&leaves[1].0, &leaves[1].1); + } + + #[test] + fn test_update_single_nibble_paths() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: single nibble paths that create branches in upper trie + // + // Final trie structure: + // Upper trie: + // 0x: Branch { state_mask: 0x1 | 0x2 | 0x4 | 0x8 } + // ├── 0x0: Leaf { key: 0x } + // ├── 0x1: Leaf { key: 0x } + // ├── 0x2: Leaf { key: 0x } + // └── 0x3: Leaf { key: 0x } + + // Insert leaves with single nibble paths + let (leaf1_path, value1) = ctx.create_test_leaf([0x0], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0x1], 2); + let (leaf3_path, value3) = ctx.create_test_leaf([0x2], 3); + let (leaf4_path, value4) = ctx.create_test_leaf([0x3], 4); + + ctx.update_leaves( + &mut trie, [ - (Nibbles::default(), SparseNode::new_branch(TrieMask::new(0b0011))), - ( - Nibbles::from_nibbles([0x0]), - SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), - ), - ( - Nibbles::from_nibbles([0x1]), - SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x4])), - ), - ] - .into_iter(), + (leaf1_path, value1.clone()), + (leaf2_path, value2.clone()), + (leaf3_path, value3.clone()), + (leaf4_path, value4.clone()), + ], ); - // Add the branch node to updated_nodes to simulate it being modified earlier - if let Some(updates) = trie.updates.as_mut() { - updates - .updated_nodes - .insert(Nibbles::default(), BranchNodeCompact::new(0b11, 0, 0, vec![], None)); - } + // Verify upper trie has a branch at root with 4 children + ctx.assert_upper_subtrie(&trie) + .has_branch(&Nibbles::default(), &[0x0, 0x1, 0x2, 0x3]) + .has_leaf(&Nibbles::from_nibbles([0x0]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x1]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x2]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x3]), &Nibbles::default()) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3) + .has_value(&leaf4_path, &value4); + } - let provider = MockBlindedProvider::new(); + #[test] + fn test_update_deep_extension_chain() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); - // Remove the leaf with a full path of 0x012 - let leaf_full_path = Nibbles::from_nibbles([0x0, 0x1, 0x2]); - trie.remove_leaf(&leaf_full_path, provider).unwrap(); + // Test edge case: deep extension chains that span multiple levels + // + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0x111111 } + // └── Subtrie (0x11): pointer to lower subtrie + // + // Lower subtrie (0x11): + // 0x111111: Branch { state_mask: 0x1 | 0x2 } + // ├── 0x1111110: Leaf { key: 0x } + // └── 0x1111111: Leaf { key: 0x } - let upper_subtrie = &trie.upper_subtrie; + // Create leaves with a long common prefix + let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x0], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1], 2); - // Check that the leaf's value was removed - assert_matches!(upper_subtrie.inner.values.get(&leaf_full_path), None); + ctx.update_leaves(&mut trie, [(leaf1_path, value1.clone()), (leaf2_path, value2.clone())]); - // Check that the branch node collapsed into a leaf node with the remaining child's key - assert_matches!( - upper_subtrie.nodes.get(&Nibbles::default()), - Some(SparseNode::Leaf{ key, ..}) - if key == &Nibbles::from_nibbles([0x1, 0x3, 0x4]) + // Verify upper trie has extension with the full common prefix + ctx.assert_upper_subtrie(&trie).has_extension( + &Nibbles::default(), + &Nibbles::from_nibbles([0x1, 0x1, 0x1, 0x1, 0x1, 0x1]), ); - // Check that the remaining child node was removed - assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x1])), None); - // Check that the removed child node was also removed - assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x0])), None); + // Verify lower subtrie has branch structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x1])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x1, 0x1, 0x1, 0x1, 0x1]), &[0x0, 0x1]) + .has_leaf( + &Nibbles::from_nibbles([0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x0]), + &Nibbles::default(), + ) + .has_leaf( + &Nibbles::from_nibbles([0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1]), + &Nibbles::default(), + ) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2); + } - // Check that updates were tracked correctly when branch collapsed - let updates = trie.updates.as_ref().unwrap(); + #[test] + fn test_update_branch_with_all_nibbles() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); - // The branch at root should be marked as removed since it collapsed - assert!(updates.removed_nodes.contains(&Nibbles::default())); + // Test edge case: branch node with all 16 possible nibble children + // + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0xA } + // └── Subtrie (0xA0): pointer to lower subtrie + // + // Lower subtrie (0xA0): + // 0xA0: Branch { state_mask: 0xFFFF } (all 16 children) + // ├── 0xA00: Leaf { key: 0x } + // ├── 0xA01: Leaf { key: 0x } + // ├── 0xA02: Leaf { key: 0x } + // ... (all nibbles 0x0 through 0xF) + // └── 0xA0F: Leaf { key: 0x } + + // Create leaves for all 16 possible nibbles + let mut leaves = Vec::new(); + for nibble in 0x0..=0xF { + let (path, value) = ctx.create_test_leaf([0xA, 0x0, nibble], nibble as u64 + 1); + leaves.push((path, value)); + } - // The branch should no longer be in updated_nodes - assert!(!updates.updated_nodes.contains_key(&Nibbles::default())); + // Insert all leaves + ctx.update_leaves(&mut trie, leaves.iter().cloned()); + + // Verify upper trie structure + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0xA, 0x0])); + + // Verify lower subtrie has branch with all 16 children + let mut subtrie_assert = + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xA, 0x0])).has_branch( + &Nibbles::from_nibbles([0xA, 0x0]), + &[0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF], + ); + + // Verify all leaves exist + for (i, (path, value)) in leaves.iter().enumerate() { + subtrie_assert = subtrie_assert + .has_leaf(&Nibbles::from_nibbles([0xA, 0x0, i as u8]), &Nibbles::default()) + .has_value(path, value); + } } #[test] - fn test_remove_leaf_extension_becomes_leaf() { + fn test_update_creates_multiple_subtries() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: updates that create multiple subtries at once // - // 0x: Extension (Key = 5) - // 0x5: └── Branch (Mask = 0011) - // 0x50: ├── 0 -> Leaf (Key = 12) - // 0x51: └── 1 -> Leaf (Key = 34) + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0x0 } + // └── 0x0: Branch { state_mask: 0xF } + // ├── Subtrie (0x00): pointer + // ├── Subtrie (0x01): pointer + // ├── Subtrie (0x02): pointer + // └── Subtrie (0x03): pointer // - // After removing 0x5012, extension+branch becomes a leaf + // Each lower subtrie has leaves: + // 0xXY: Leaf { key: 0xZ... } + + // Create leaves that will force multiple subtries + let leaves = vec![ + ctx.create_test_leaf([0x0, 0x0, 0x1, 0x2], 1), + ctx.create_test_leaf([0x0, 0x1, 0x3, 0x4], 2), + ctx.create_test_leaf([0x0, 0x2, 0x5, 0x6], 3), + ctx.create_test_leaf([0x0, 0x3, 0x7, 0x8], 4), + ]; + + // Insert all leaves + ctx.update_leaves(&mut trie, leaves.iter().cloned()); + + // Verify upper trie has extension then branch + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x0])) + .has_branch(&Nibbles::from_nibbles([0x0]), &[0x0, 0x1, 0x2, 0x3]); + + // Verify each subtrie exists and contains its leaf + for (i, (leaf_path, leaf_value)) in leaves.iter().enumerate() { + let subtrie_path = Nibbles::from_nibbles([0x0, i as u8]); + ctx.assert_subtrie(&trie, subtrie_path) + .has_leaf( + &subtrie_path, + &Nibbles::from_nibbles(match i { + 0 => vec![0x1, 0x2], + 1 => vec![0x3, 0x4], + 2 => vec![0x5, 0x6], + 3 => vec![0x7, 0x8], + _ => unreachable!(), + }), + ) + .has_value(leaf_path, leaf_value); + } + } + + #[test] + fn test_update_extension_to_branch_transformation() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: extension node transforms to branch when split + // + // Initial state after first two leaves: + // Upper trie: + // 0x: Extension { key: 0xFF0 } + // └── Subtrie (0xFF): pointer + // + // After third leaf (0xF0...): + // Upper trie: + // 0x: Extension { key: 0xF } + // └── 0xF: Branch { state_mask: 0x10 | 0x8000 } + // ├── Subtrie (0xF0): pointer + // └── Subtrie (0xFF): pointer + + // First two leaves share prefix 0xFF0 + let (leaf1_path, value1) = ctx.create_test_leaf([0xF, 0xF, 0x0, 0x1], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0xF, 0xF, 0x0, 0x2], 2); + let (leaf3_path, value3) = ctx.create_test_leaf([0xF, 0x0, 0x0, 0x3], 3); + + ctx.update_leaves(&mut trie, [(leaf1_path, value1.clone()), (leaf2_path, value2.clone())]); + + // Verify initial extension structure + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0xF, 0xF, 0x0])); + + // Add leaf that splits the extension + ctx.update_leaves(&mut trie, [(leaf3_path, value3.clone())]); + + // Verify transformed structure + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0xF])) + .has_branch(&Nibbles::from_nibbles([0xF]), &[0x0, 0xF]); + + // Verify subtries + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xF, 0xF])) + .has_branch(&Nibbles::from_nibbles([0xF, 0xF, 0x0]), &[0x1, 0x2]) + .has_leaf(&Nibbles::from_nibbles([0xF, 0xF, 0x0, 0x1]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0xF, 0xF, 0x0, 0x2]), &Nibbles::default()) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2); + + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xF, 0x0])) + .has_leaf(&Nibbles::from_nibbles([0xF, 0x0]), &Nibbles::from_nibbles([0x0, 0x3])) + .has_value(&leaf3_path, &value3); + } + + #[test] + fn test_update_upper_extension_reveal_lower_hash_node() { + let ctx = ParallelSparseTrieTestContext; + + // Test edge case: extension pointing to hash node that gets updated to branch + // and reveals the hash node from lower trie + // + // Setup: + // Upper trie: + // 0x: Extension { key: 0xAB } + // └── Subtrie (0xAB): pointer + // Lower trie (0xAB): + // 0xAB: Hash // + // After update: + // Upper trie: + // 0x: Extension { key: 0xA } + // └── 0xA: Branch { state_mask: 0b100000000001 } + // ├── 0xA0: Leaf { value: ... } + // └── 0xAB: pointer + // Lower trie (0xAB): + // 0xAB: Branch { state_mask: 0b11 } + // ├── 0xAB1: Hash + // └── 0xAB2: Hash + + // Create a mock provider that will provide the hash node + let mut provider = MockTrieNodeProvider::new(); + + // Create revealed branch which will get revealed and add it to the mock provider + let child_hashes = [ + RlpNode::word_rlp(&B256::repeat_byte(0x11)), + RlpNode::word_rlp(&B256::repeat_byte(0x22)), + ]; + let revealed_branch = create_branch_node_with_children(&[0x1, 0x2], child_hashes); + let mut encoded = Vec::new(); + revealed_branch.encode(&mut encoded); + provider.add_revealed_node( + Nibbles::from_nibbles([0xA, 0xB]), + RevealedNode { node: encoded.into(), tree_mask: None, hash_mask: None }, + ); + let mut trie = new_test_trie( [ - (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), - (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(TrieMask::new(0b0011))), - ( - Nibbles::from_nibbles([0x5, 0x0]), - SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), - ), - ( - Nibbles::from_nibbles([0x5, 0x1]), - SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x4])), - ), + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0xA, 0xB]))), + (Nibbles::from_nibbles([0xA, 0xB]), SparseNode::Hash(B256::repeat_byte(0x42))), ] .into_iter(), ); - let provider = MockBlindedProvider::new(); + // Now add a leaf that will force the hash node to become a branch + let (leaf_path, value) = ctx.create_test_leaf([0xA, 0x0], 1); + trie.update_leaf(leaf_path, value, provider).unwrap(); - // Remove the leaf with a full path of 0x5012 - let leaf_full_path = Nibbles::from_nibbles([0x5, 0x0, 0x1, 0x2]); - trie.remove_leaf(&leaf_full_path, provider).unwrap(); + // Verify the structure: extension should now terminate in a branch on the upper trie + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0xA])) + .has_branch(&Nibbles::from_nibbles([0xA]), &[0x0, 0xB]); - let upper_subtrie = &trie.upper_subtrie; - let lower_subtrie_50 = trie.lower_subtries[0x50].as_ref().unwrap(); - let lower_subtrie_51 = trie.lower_subtries[0x51].as_ref().unwrap(); + // Verify the lower trie now has a branch structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xA, 0xB])) + .has_branch(&Nibbles::from_nibbles([0xA, 0xB]), &[0x1, 0x2]) + .has_hash(&Nibbles::from_nibbles([0xA, 0xB, 0x1]), &B256::repeat_byte(0x11)) + .has_hash(&Nibbles::from_nibbles([0xA, 0xB, 0x2]), &B256::repeat_byte(0x22)); + } - // Check that the full key was removed - assert_matches!(lower_subtrie_50.inner.values.get(&leaf_full_path), None); + #[test] + fn test_update_long_shared_prefix_at_boundary() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); - // Check that the other leaf's value was moved to the upper trie - let other_leaf_full_value = Nibbles::from_nibbles([0x5, 0x1, 0x3, 0x4]); - assert_matches!(lower_subtrie_51.inner.values.get(&other_leaf_full_value), None); - assert_matches!(upper_subtrie.inner.values.get(&other_leaf_full_value), Some(_)); + // Test edge case: leaves with long shared prefix that ends exactly at 2-nibble boundary + // + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0xAB } + // └── Subtrie (0xAB): pointer to lower subtrie + // + // Lower subtrie (0xAB): + // 0xAB: Branch { state_mask: 0x1000 | 0x2000 } + // ├── 0xABC: Leaf { key: 0xDEF } + // └── 0xABD: Leaf { key: 0xEF0 } + + // Create leaves that share exactly 2 nibbles + let (leaf1_path, value1) = ctx.create_test_leaf([0xA, 0xB, 0xC, 0xD, 0xE, 0xF], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0xA, 0xB, 0xD, 0xE, 0xF, 0x0], 2); + + trie.update_leaf(leaf1_path, value1.clone(), DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify upper trie structure + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0xA, 0xB])); + + // Verify lower subtrie structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xA, 0xB])) + .has_branch(&Nibbles::from_nibbles([0xA, 0xB]), &[0xC, 0xD]) + .has_leaf( + &Nibbles::from_nibbles([0xA, 0xB, 0xC]), + &Nibbles::from_nibbles([0xD, 0xE, 0xF]), + ) + .has_leaf( + &Nibbles::from_nibbles([0xA, 0xB, 0xD]), + &Nibbles::from_nibbles([0xE, 0xF, 0x0]), + ) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2); + } - // Check that the extension node collapsed into a leaf node - assert_matches!( - upper_subtrie.nodes.get(&Nibbles::default()), - Some(SparseNode::Leaf{ key, ..}) - if key == &Nibbles::from_nibbles([0x5, 0x1, 0x3, 0x4]) - ); + #[test] + fn test_update_branch_to_extension_collapse() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); - // Check that intermediate nodes were removed - assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x5])), None); - assert_matches!(lower_subtrie_50.nodes.get(&Nibbles::from_nibbles([0x5, 0x0])), None); - assert_matches!(lower_subtrie_51.nodes.get(&Nibbles::from_nibbles([0x5, 0x1])), None); + // Test creating a trie with leaves that share a long common prefix + // + // Initial state with 3 leaves (0x1234, 0x2345, 0x2356): + // Upper trie: + // 0x: Branch { state_mask: 0x6 } + // ├── 0x1: Leaf { key: 0x234 } + // └── 0x2: Extension { key: 0x3 } + // └── Subtrie (0x23): pointer + // Lower subtrie (0x23): + // 0x23: Branch { state_mask: 0x30 } + // ├── 0x234: Leaf { key: 0x5 } + // └── 0x235: Leaf { key: 0x6 } + // + // Then we create a new trie with leaves (0x1234, 0x1235, 0x1236): + // Expected structure: + // Upper trie: + // 0x: Extension { key: 0x123 } + // └── Subtrie (0x12): pointer + // Lower subtrie (0x12): + // 0x123: Branch { state_mask: 0x70 } // bits 4, 5, 6 set + // ├── 0x1234: Leaf { key: 0x } + // ├── 0x1235: Leaf { key: 0x } + // └── 0x1236: Leaf { key: 0x } + + // Create initial leaves + let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0x2, 0x3, 0x4, 0x5], 2); + let (leaf3_path, value3) = ctx.create_test_leaf([0x2, 0x3, 0x5, 0x6], 3); + + trie.update_leaf(leaf1_path, value1, DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf2_path, value2, DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf3_path, value3, DefaultTrieNodeProvider).unwrap(); + + // Verify initial structure has branch at root + ctx.assert_upper_subtrie(&trie).has_branch(&Nibbles::default(), &[0x1, 0x2]); + + // Now update to create a pattern where extension is more efficient + // Replace leaves to all share prefix 0x123 + let (new_leaf1_path, new_value1) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 10); + let (new_leaf2_path, new_value2) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x5], 11); + let (new_leaf3_path, new_value3) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x6], 12); + + // Clear and add new leaves + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + trie.update_leaf(new_leaf1_path, new_value1.clone(), DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(new_leaf2_path, new_value2.clone(), DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(new_leaf3_path, new_value3.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify new structure has extension + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x3])); + + // Verify lower subtrie path was correctly updated to 0x123 + ctx.assert_subtrie_path(&trie, [0x1, 0x2], [0x1, 0x2, 0x3]); + + // Verify lower subtrie - all three leaves should be properly inserted + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5, 0x6]) // All three children + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x5]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x6]), &Nibbles::default()) + .has_value(&new_leaf1_path, &new_value1) + .has_value(&new_leaf2_path, &new_value2) + .has_value(&new_leaf3_path, &new_value3); } #[test] - fn test_remove_leaf_branch_on_branch() { + fn test_update_shared_prefix_patterns() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: different patterns of shared prefixes // - // 0x: Branch (Mask = 0101) - // 0x0: ├── 0 -> Leaf (Key = 12) - // 0x2: └── 2 -> Branch (Mask = 0011) - // 0x20: ├── 0 -> Leaf (Key = 34) - // 0x21: └── 1 -> Leaf (Key = 56) + // Final trie structure: + // Upper trie: + // 0x: Branch { state_mask: 0x6 } + // ├── 0x1: Leaf { key: 0x234 } + // └── 0x2: Extension { key: 0x3 } + // └── Subtrie (0x23): pointer // - // After removing 0x2034, the inner branch becomes a leaf + // Lower subtrie (0x23): + // 0x23: Branch { state_mask: 0x10 | 0x20 } + // ├── 0x234: Leaf { key: 0x5 } + // └── 0x235: Leaf { key: 0x6 } + + // Create leaves with different shared prefix patterns + let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0x2, 0x3, 0x4, 0x5], 2); + let (leaf3_path, value3) = ctx.create_test_leaf([0x2, 0x3, 0x5, 0x6], 3); + + trie.update_leaf(leaf1_path, value1, DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf3_path, value3.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify upper trie structure + ctx.assert_upper_subtrie(&trie) + .has_branch(&Nibbles::default(), &[0x1, 0x2]) + .has_leaf(&Nibbles::from_nibbles([0x1]), &Nibbles::from_nibbles([0x2, 0x3, 0x4])) + .has_extension(&Nibbles::from_nibbles([0x2]), &Nibbles::from_nibbles([0x3])); + + // Verify lower subtrie structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x2, 0x3])) + .has_branch(&Nibbles::from_nibbles([0x2, 0x3]), &[0x4, 0x5]) + .has_leaf(&Nibbles::from_nibbles([0x2, 0x3, 0x4]), &Nibbles::from_nibbles([0x5])) + .has_leaf(&Nibbles::from_nibbles([0x2, 0x3, 0x5]), &Nibbles::from_nibbles([0x6])) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3); + } + + #[test] + fn test_progressive_branch_creation() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test starting with a single leaf and progressively adding leaves + // that create branch nodes at shorter and shorter paths // - let mut trie = new_test_trie( - [ - (Nibbles::default(), SparseNode::new_branch(TrieMask::new(0b0101))), - ( - Nibbles::from_nibbles([0x0]), - SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), - ), - (Nibbles::from_nibbles([0x2]), SparseNode::new_branch(TrieMask::new(0b0011))), - ( - Nibbles::from_nibbles([0x2, 0x0]), - SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x4])), - ), - ( - Nibbles::from_nibbles([0x2, 0x1]), - SparseNode::new_leaf(Nibbles::from_nibbles([0x5, 0x6])), - ), - ] - .into_iter(), + // Step 1: Add leaf at 0x12345 + // Upper trie: + // 0x: Leaf { key: 0x12345 } + // + // Step 2: Add leaf at 0x12346 + // Upper trie: + // 0x: Extension { key: 0x1234 } + // └── Subtrie (0x12): pointer + // Lower subtrie (0x12): + // 0x1234: Branch { state_mask: 0x60 } // bits 5 and 6 set + // ├── 0x12345: Leaf { key: 0x } + // └── 0x12346: Leaf { key: 0x } + // + // Step 3: Add leaf at 0x1235 + // Lower subtrie (0x12) updates to: + // 0x123: Branch { state_mask: 0x30 } // bits 4 and 5 set + // ├── 0x1234: Branch { state_mask: 0x60 } + // │ ├── 0x12345: Leaf { key: 0x } + // │ └── 0x12346: Leaf { key: 0x } + // └── 0x1235: Leaf { key: 0x } + // + // Step 4: Add leaf at 0x124 + // Lower subtrie (0x12) updates to: + // 0x12: Branch { state_mask: 0x18 } // bits 3 and 4 set + // ├── 0x123: Branch { state_mask: 0x30 } + // │ ├── 0x1234: Branch { state_mask: 0x60 } + // │ │ ├── 0x12345: Leaf { key: 0x } + // │ │ └── 0x12346: Leaf { key: 0x } + // │ └── 0x1235: Leaf { key: 0x } + // └── 0x124: Leaf { key: 0x } + + // Step 1: Add first leaf - initially stored as leaf in upper trie + let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4, 0x5], 1); + trie.update_leaf(leaf1_path, value1.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify leaf node in upper trie (optimized single-leaf case) + ctx.assert_upper_subtrie(&trie) + .has_leaf(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5])) + .has_value(&leaf1_path, &value1); + + // Step 2: Add leaf at 0x12346 - creates branch at 0x1234 + let (leaf2_path, value2) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4, 0x6], 2); + trie.update_leaf(leaf2_path, value2.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify extension now goes to 0x1234 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4])); + + // Verify subtrie path updated to 0x1234 + ctx.assert_subtrie_path(&trie, [0x1, 0x2], [0x1, 0x2, 0x3, 0x4]); + + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]), &[0x5, 0x6]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x6]), &Nibbles::default()) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2); + + // Step 3: Add leaf at 0x1235 - creates branch at 0x123 + let (leaf3_path, value3) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x5], 3); + trie.update_leaf(leaf3_path, value3.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify extension now goes to 0x123 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x3])); + + // Verify subtrie path updated to 0x123 + ctx.assert_subtrie_path(&trie, [0x1, 0x2], [0x1, 0x2, 0x3]); + + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5]) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]), &[0x5, 0x6]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x5]), &Nibbles::default()) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3); + + // Step 4: Add leaf at 0x124 - creates branch at 0x12 (subtrie root) + let (leaf4_path, value4) = ctx.create_test_leaf([0x1, 0x2, 0x4], 4); + trie.update_leaf(leaf4_path, value4.clone(), DefaultTrieNodeProvider).unwrap(); + + // Verify extension now goes to 0x12 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2])); + + // Verify subtrie path updated to 0x12 + ctx.assert_subtrie_path(&trie, [0x1, 0x2], [0x1, 0x2]); + + // Verify final structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x3, 0x4]) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5]) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]), &[0x5, 0x6]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x4]), &Nibbles::default()) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3) + .has_value(&leaf4_path, &value4); + } + + #[test] + fn test_update_max_depth_paths() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: very long paths (64 nibbles - max for addresses/storage) + // + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0xFF } + // └── Subtrie (0xFF): pointer + // + // Lower subtrie (0xFF): + // Has very long paths with slight differences at the end + + // Create two 64-nibble paths that differ only in the last nibble + let mut path1_nibbles = vec![0xF; 63]; + path1_nibbles.push(0x0); + let mut path2_nibbles = vec![0xF; 63]; + path2_nibbles.push(0x1); + + let (leaf1_path, value1) = ctx.create_test_leaf(&path1_nibbles, 1); + let (leaf2_path, value2) = ctx.create_test_leaf(&path2_nibbles, 2); + + trie.update_leaf(leaf1_path, value1.clone(), DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultTrieNodeProvider).unwrap(); + + // The common prefix of 63 F's will create a very long extension + let extension_key = vec![0xF; 63]; + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles(&extension_key)); + + // Verify the subtrie has the branch at the end + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xF, 0xF])) + .has_branch(&Nibbles::from_nibbles(&path1_nibbles[..63]), &[0x0, 0x1]) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2); + } + + #[test] + fn test_hoodie_block_1_data() { + // Reveal node at path Nibbles(0x) - root branch node + let root_branch_stack = vec![ + hex!("a0550b6aba4dd4582a2434d2cbdad8d3007d09f622d7a6e6eaa7a49385823c2fa2"), + hex!("a04788a4975a9e1efd29b834fd80fdfe8a57cc1b1c5ace6d30ce5a36a15e0092b3"), + hex!("a093aeccf87da304e6f7d09edc5d7bd3a552808866d2149dd0940507a8f9bfa910"), + hex!("a08b5b423ba68d0dec2eca1f408076f9170678505eb4a5db2abbbd83bb37666949"), + hex!("a08592f62216af4218098a78acad7cf472a727fb55e6c27d3cfdf2774d4518eb83"), + hex!("a0ef02aeee845cb64c11f85edc1a3094227c26445952554b8a9248915d80c746c3"), + hex!("a0df2529ee3a1ce4df5a758cf17e6a86d0fb5ea22ab7071cf60af6412e9b0a428a"), + hex!("a0acaa1092db69cd5a63676685827b3484c4b80dc1d3361f6073bbb9240101e144"), + hex!("a09c3f2bb2a729d71f246a833353ade65667716bb330e0127a3299a42d11200f93"), + hex!("a0ce978470f4c0b1f8069570563a14d2b79d709add2db4bf22dd9b6aed3271c566"), + hex!("a095f783cd1d464a60e3c8adcadc28c6eb9fec7306664df39553be41dccc909606"), + hex!("a0a9083f5fb914b255e1feb5d951a4dfddacf3c8003ef1d1ec6a13bb6ba5b2ac62"), + hex!("a0fec113d537d8577cd361e0cabf5e95ef58f1cc34318292fdecce9fae57c3e094"), + hex!("a08b7465f5fe8b3e3c0d087cb7521310d4065ef2a0ee43bf73f68dee8a5742b3dd"), + hex!("a0c589aa1ae3d5fd87d8640957f7d5184a4ac06f393b453a8e8ed7e8fba0d385c8"), + hex!("a0b516d6f3352f87beab4ed6e7322f191fc7a147686500ef4de7dd290ad784ef51"), + ]; + + let root_branch_rlp_stack: Vec = root_branch_stack + .iter() + .map(|hex_str| RlpNode::from_raw_rlp(&hex_str[..]).unwrap()) + .collect(); + + let root_branch_node = BranchNode::new( + root_branch_rlp_stack, + TrieMask::new(0b1111111111111111), // state_mask: all 16 children present ); - let provider = MockBlindedProvider::new(); + let root_branch_masks = TrieMasks { + hash_mask: Some(TrieMask::new(0b1111111111111111)), + tree_mask: Some(TrieMask::new(0b1111111111111111)), + }; - // Remove the leaf with a full path of 0x2034 - let leaf_full_path = Nibbles::from_nibbles([0x2, 0x0, 0x3, 0x4]); - trie.remove_leaf(&leaf_full_path, provider).unwrap(); + let mut trie = ParallelSparseTrie::from_root( + TrieNode::Branch(root_branch_node), + root_branch_masks, + true, + ) + .unwrap(); + + // Reveal node at path Nibbles(0x3) - branch node + let branch_0x3_stack = vec![ + hex!("a09da7d9755fe0c558b3c3de9fdcdf9f28ae641f38c9787b05b73ab22ae53af3e2"), + hex!("a0d9990bf0b810d1145ecb2b011fd68c63cc85564e6724166fd4a9520180706e5f"), + hex!("a0f60eb4b12132a40df05d9bbdb88bbde0185a3f097f3c76bf4200c23eda26cf86"), + hex!("a0ca976997ddaf06f18992f6207e4f6a05979d07acead96568058789017cc6d06b"), + hex!("a04d78166b48044fdc28ed22d2fd39c8df6f8aaa04cb71d3a17286856f6893ff83"), + hex!("a021d4f90c34d3f1706e78463b6482bca77a3aa1cd059a3f326c42a1cfd30b9b60"), + hex!("a0fc3b71c33e2e6b77c5e494c1db7fdbb447473f003daf378c7a63ba9bf3f0049d"), + hex!("a0e33ed2be194a3d93d343e85642447c93a9d0cfc47a016c2c23d14c083be32a7c"), + hex!("a07b8e7a21c1178d28074f157b50fca85ee25c12568ff8e9706dcbcdacb77bf854"), + hex!("a0973274526811393ea0bf4811ca9077531db00d06b86237a2ecd683f55ba4bcb0"), + hex!("a03a93d726d7487874e51b52d8d534c63aa2a689df18e3b307c0d6cb0a388b00f3"), + hex!("a06aa67101d011d1c22fe739ef83b04b5214a3e2f8e1a2625d8bfdb116b447e86f"), + hex!("a02dd545b33c62d33a183e127a08a4767fba891d9f3b94fc20a2ca02600d6d1fff"), + hex!("a0fe6db87d00f06d53bff8169fa497571ff5af1addfb715b649b4d79dd3e394b04"), + hex!("a0d9240a9d2d5851d05a97ff3305334dfdb0101e1e321fc279d2bb3cad6afa8fc8"), + hex!("a01b69c6ab5173de8a8ec53a6ebba965713a4cc7feb86cb3e230def37c230ca2b2"), + ]; - let upper_subtrie = &trie.upper_subtrie; - let lower_subtrie_20 = trie.lower_subtries[0x20].as_ref().unwrap(); - let lower_subtrie_21 = trie.lower_subtries[0x21].as_ref().unwrap(); + let branch_0x3_rlp_stack: Vec = branch_0x3_stack + .iter() + .map(|hex_str| RlpNode::from_raw_rlp(&hex_str[..]).unwrap()) + .collect(); - // Check that the leaf's value was removed - assert_matches!(lower_subtrie_20.inner.values.get(&leaf_full_path), None); + let branch_0x3_node = BranchNode::new( + branch_0x3_rlp_stack, + TrieMask::new(0b1111111111111111), // state_mask: all 16 children present + ); - // Check that the other leaf's value was moved to the upper trie - let other_leaf_full_value = Nibbles::from_nibbles([0x2, 0x1, 0x5, 0x6]); - assert_matches!(lower_subtrie_21.inner.values.get(&other_leaf_full_value), None); - assert_matches!(upper_subtrie.inner.values.get(&other_leaf_full_value), Some(_)); + let branch_0x3_masks = TrieMasks { + hash_mask: Some(TrieMask::new(0b0100010000010101)), + tree_mask: Some(TrieMask::new(0b0100000000000000)), + }; - // Check that the root branch still exists unchanged - assert_matches!( - upper_subtrie.nodes.get(&Nibbles::default()), - Some(SparseNode::Branch{ state_mask, .. }) - if *state_mask == 0b0101.into() + // Reveal node at path Nibbles(0x37) - leaf node + let leaf_path = Nibbles::from_nibbles([0x3, 0x7]); + let leaf_key = Nibbles::unpack( + &hex!("d65eaa92c6bc4c13a5ec45527f0c18ea8932588728769ec7aecfe6d9f32e42")[..], ); + let leaf_value = hex!("f8440180a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0f57acd40259872606d76197ef052f3d35588dadf919ee1f0e3cb9b62d3f4b02c").to_vec(); - // Check that the inner branch became an extension - assert_matches!( - upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x2])), - Some(SparseNode::Leaf{ key, ..}) - if key == &Nibbles::from_nibbles([0x1, 0x5, 0x6]) + let leaf_node = LeafNode::new(leaf_key, leaf_value); + let leaf_masks = TrieMasks::none(); + + trie.reveal_nodes(vec![ + RevealedSparseNode { + path: Nibbles::from_nibbles([0x3]), + node: TrieNode::Branch(branch_0x3_node), + masks: branch_0x3_masks, + }, + RevealedSparseNode { + path: leaf_path, + node: TrieNode::Leaf(leaf_node), + masks: leaf_masks, + }, + ]) + .unwrap(); + + // Update leaf with its new value + let mut leaf_full_path = leaf_path; + leaf_full_path.extend(&leaf_key); + + let leaf_new_value = vec![ + 248, 68, 1, 128, 160, 224, 163, 152, 169, 122, 160, 155, 102, 53, 41, 0, 47, 28, 205, + 190, 199, 5, 215, 108, 202, 22, 138, 70, 196, 178, 193, 208, 18, 96, 95, 63, 238, 160, + 245, 122, 205, 64, 37, 152, 114, 96, 109, 118, 25, 126, 240, 82, 243, 211, 85, 136, + 218, 223, 145, 158, 225, 240, 227, 203, 155, 98, 211, 244, 176, 44, + ]; + + trie.update_leaf(leaf_full_path, leaf_new_value.clone(), DefaultTrieNodeProvider).unwrap(); + + // Sanity checks before calculating the root + assert_eq!( + Some(&leaf_new_value), + trie.lower_subtrie_for_path(&leaf_path).unwrap().inner.values.get(&leaf_full_path) ); + assert!(trie.upper_subtrie.inner.values.is_empty()); - // Check that the branch's child nodes were removed - assert_matches!(lower_subtrie_20.nodes.get(&Nibbles::from_nibbles([0x2, 0x0])), None); - assert_matches!(lower_subtrie_21.nodes.get(&Nibbles::from_nibbles([0x2, 0x1])), None); + // Assert the root hash matches the expected value + let expected_root = + b256!("29b07de8376e9ce7b3a69e9b102199869514d3f42590b5abc6f7d48ec9b8665c"); + assert_eq!(trie.root(), expected_root); } #[test] - fn test_remove_leaf_remaining_child_needs_reveal() { - // - // 0x: Branch (Mask = 0011) - // 0x0: ├── 0 -> Leaf (Key = 12) - // 0x1: └── 1 -> Hash (blinded leaf) - // - // After removing 0x012, the hash node needs to be revealed to collapse the branch - // - let mut trie = new_test_trie( - [ - (Nibbles::default(), SparseNode::new_branch(TrieMask::new(0b0011))), - ( - Nibbles::from_nibbles([0x0]), - SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2])), - ), - (Nibbles::from_nibbles([0x1]), SparseNode::Hash(B256::repeat_byte(0xab))), - ] - .into_iter(), - ); + fn find_leaf_existing_leaf() { + // Create a simple trie with one leaf + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + let value = b"test_value".to_vec(); + + sparse.update_leaf(path, value.clone(), &provider).unwrap(); + + // Check that the leaf exists + let result = sparse.find_leaf(&path, None); + assert_matches!(result, Ok(LeafLookup::Exists)); + + // Check with expected value matching + let result = sparse.find_leaf(&path, Some(&value)); + assert_matches!(result, Ok(LeafLookup::Exists)); + } - // Create a mock provider that will reveal the blinded leaf - let mut provider = MockBlindedProvider::new(); - let revealed_leaf = create_leaf_node([0x3, 0x4], 42); - let mut encoded = Vec::new(); - revealed_leaf.encode(&mut encoded); - provider.add_revealed_node( - Nibbles::from_nibbles([0x1]), - RevealedNode { node: encoded.into(), tree_mask: None, hash_mask: None }, + #[test] + fn find_leaf_value_mismatch() { + // Create a simple trie with one leaf + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + let value = b"test_value".to_vec(); + let wrong_value = b"wrong_value".to_vec(); + + sparse.update_leaf(path, value, &provider).unwrap(); + + // Check with wrong expected value + let result = sparse.find_leaf(&path, Some(&wrong_value)); + assert_matches!( + result, + Err(LeafLookupError::ValueMismatch { path: p, expected: Some(e), actual: _a }) if p == path && e == wrong_value ); + } - // Remove the leaf with a full path of 0x012 - let leaf_full_path = Nibbles::from_nibbles([0x0, 0x1, 0x2]); - trie.remove_leaf(&leaf_full_path, provider).unwrap(); + #[test] + fn find_leaf_not_found_empty_trie() { + // Empty trie + let sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + + // Leaf should not exist + let result = sparse.find_leaf(&path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)); + } - let upper_subtrie = &trie.upper_subtrie; + #[test] + fn find_leaf_empty_trie() { + let sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); - // Check that the leaf value was removed - assert_matches!(upper_subtrie.inner.values.get(&leaf_full_path), None); + let result = sparse.find_leaf(&path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)); + } - // Check that the branch node collapsed into a leaf node with the revealed child's key - assert_matches!( - upper_subtrie.nodes.get(&Nibbles::default()), - Some(SparseNode::Leaf{ key, ..}) - if key == &Nibbles::from_nibbles([0x1, 0x3, 0x4]) - ); + #[test] + fn find_leaf_exists_no_value_check() { + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); + sparse.update_leaf(path, encode_account_value(0), &provider).unwrap(); + + let result = sparse.find_leaf(&path, None); + assert_matches!(result, Ok(LeafLookup::Exists)); + } - // Check that the remaining child node was removed (since it was merged) - assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x1])), None); + #[test] + fn find_leaf_exists_with_value_check_ok() { + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); + let value = encode_account_value(0); + sparse.update_leaf(path, value.clone(), &provider).unwrap(); + + let result = sparse.find_leaf(&path, Some(&value)); + assert_matches!(result, Ok(LeafLookup::Exists)); } #[test] - fn test_remove_leaf_root() { - // - // 0x: Leaf (Key = 123) - // - // After removing 0x123, the trie becomes empty - // - let mut trie = new_test_trie(std::iter::once(( - Nibbles::default(), - SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2, 0x3])), - ))); + fn find_leaf_exclusion_branch_divergence() { + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::default(); + let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 + let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); // Belongs to same branch + let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x7, 0x8]); // Diverges at nibble 7 + + sparse.update_leaf(path1, encode_account_value(0), &provider).unwrap(); + sparse.update_leaf(path2, encode_account_value(1), &provider).unwrap(); + + let result = sparse.find_leaf(&search_path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)) + } - let provider = MockBlindedProvider::new(); + #[test] + fn find_leaf_exclusion_extension_divergence() { + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::default(); + // This will create an extension node at root with key 0x12 + let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); + // This path diverges from the extension key + let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x7, 0x8]); + + sparse.update_leaf(path1, encode_account_value(0), &provider).unwrap(); + + let result = sparse.find_leaf(&search_path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)) + } - // Remove the leaf with a full key of 0x123 - let leaf_full_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); - trie.remove_leaf(&leaf_full_path, provider).unwrap(); + #[test] + fn find_leaf_exclusion_leaf_divergence() { + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::default(); + let existing_leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); + let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); - let upper_subtrie = &trie.upper_subtrie; + sparse.update_leaf(existing_leaf_path, encode_account_value(0), &provider).unwrap(); - // Check that the leaf value was removed - assert_matches!(upper_subtrie.inner.values.get(&leaf_full_path), None); + let result = sparse.find_leaf(&search_path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)) + } - // Check that the root node was changed to Empty - assert_matches!(upper_subtrie.nodes.get(&Nibbles::default()), Some(SparseNode::Empty)); + #[test] + fn find_leaf_exclusion_path_ends_at_branch() { + let provider = DefaultTrieNodeProvider; + let mut sparse = ParallelSparseTrie::default(); + let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 + let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); + let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2]); // Path of the branch itself + + sparse.update_leaf(path1, encode_account_value(0), &provider).unwrap(); + sparse.update_leaf(path2, encode_account_value(1), &provider).unwrap(); + + let result = sparse.find_leaf(&search_path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] - fn test_remove_leaf_unsets_hash_along_path() { - // - // Creates a trie structure: - // 0x: Branch (with hash set) - // 0x0: ├── Extension (with hash set) - // 0x01: │ └── Branch (with hash set) - // 0x012: │ ├── Leaf (Key = 34, with hash set) - // 0x013: │ ├── Leaf (Key = 56, with hash set) - // 0x014: │ └── Leaf (Key = 78, with hash set) - // 0x1: └── Leaf (Key = 78, with hash set) - // - // When removing leaf at 0x01234, all nodes along the path (root branch, - // extension at 0x0, branch at 0x01) should have their hash field unset - // + fn find_leaf_error_blinded_node_at_leaf_path() { + // Scenario: The node *at* the leaf path is blinded. + let blinded_hash = B256::repeat_byte(0xBB); + let leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); - let mut trie = new_test_trie( + let sparse = new_test_trie( [ ( + // Ext 0x12 Nibbles::default(), - SparseNode::Branch { - state_mask: TrieMask::new(0b0011), - hash: Some(B256::repeat_byte(0x10)), - store_in_db_trie: None, - }, + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x1, 0x2])), ), ( - Nibbles::from_nibbles([0x0]), - SparseNode::Extension { - key: Nibbles::from_nibbles([0x1]), - hash: Some(B256::repeat_byte(0x20)), - store_in_db_trie: None, - }, - ), - ( - Nibbles::from_nibbles([0x0, 0x1]), - SparseNode::Branch { - state_mask: TrieMask::new(0b11100), - hash: Some(B256::repeat_byte(0x30)), - store_in_db_trie: None, - }, - ), - ( - Nibbles::from_nibbles([0x0, 0x1, 0x2]), - SparseNode::Leaf { - key: Nibbles::from_nibbles([0x3, 0x4]), - hash: Some(B256::repeat_byte(0x40)), - }, - ), - ( - Nibbles::from_nibbles([0x0, 0x1, 0x3]), - SparseNode::Leaf { - key: Nibbles::from_nibbles([0x5, 0x6]), - hash: Some(B256::repeat_byte(0x50)), - }, + // Ext 0x123 + Nibbles::from_nibbles_unchecked([0x1, 0x2]), + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x3])), ), ( - Nibbles::from_nibbles([0x0, 0x1, 0x4]), - SparseNode::Leaf { - key: Nibbles::from_nibbles([0x6, 0x7]), - hash: Some(B256::repeat_byte(0x60)), - }, + // Branch at 0x123, child 4 + Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3]), + SparseNode::new_branch(TrieMask::new(0b10000)), ), ( - Nibbles::from_nibbles([0x1]), - SparseNode::Leaf { - key: Nibbles::from_nibbles([0x7, 0x8]), - hash: Some(B256::repeat_byte(0x70)), - }, + // Blinded node at 0x1234 + leaf_path, + SparseNode::Hash(blinded_hash), ), ] .into_iter(), ); - let provider = MockBlindedProvider::new(); - - // Remove the leaf at path 0x01234 - let leaf_full_path = Nibbles::from_nibbles([0x0, 0x1, 0x2, 0x3, 0x4]); - trie.remove_leaf(&leaf_full_path, provider).unwrap(); - - let upper_subtrie = &trie.upper_subtrie; - let lower_subtrie_10 = trie.lower_subtries[0x01].as_ref().unwrap(); - - // Verify that hash fields are unset for all nodes along the path to the removed leaf - assert_matches!( - upper_subtrie.nodes.get(&Nibbles::default()), - Some(SparseNode::Branch { hash: None, .. }) - ); - assert_matches!( - upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x0])), - Some(SparseNode::Extension { hash: None, .. }) - ); - assert_matches!( - lower_subtrie_10.nodes.get(&Nibbles::from_nibbles([0x0, 0x1])), - Some(SparseNode::Branch { hash: None, .. }) - ); + let result = sparse.find_leaf(&leaf_path, None); - // Verify that nodes not on the path still have their hashes - assert_matches!( - upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x1])), - Some(SparseNode::Leaf { hash: Some(_), .. }) - ); - assert_matches!( - lower_subtrie_10.nodes.get(&Nibbles::from_nibbles([0x0, 0x1, 0x3])), - Some(SparseNode::Leaf { hash: Some(_), .. }) - ); - assert_matches!( - lower_subtrie_10.nodes.get(&Nibbles::from_nibbles([0x0, 0x1, 0x4])), - Some(SparseNode::Leaf { hash: Some(_), .. }) + // Should error because it hit the blinded node exactly at the leaf path + assert_matches!(result, Err(LeafLookupError::BlindedNode { path, hash }) + if path == leaf_path && hash == blinded_hash ); } #[test] - fn test_parallel_sparse_trie_root() { - let mut trie = ParallelSparseTrie::default().with_updates(true); - - // Step 1: Create the trie structure - // Extension node at 0x with key 0x2 (goes to upper subtrie) - let extension_path = Nibbles::new(); - let extension_key = Nibbles::from_nibbles([0x2]); - - // Branch node at 0x2 with children 0 and 1 (goes to upper subtrie) - let branch_path = Nibbles::from_nibbles([0x2]); - - // Leaf nodes at 0x20 and 0x21 (go to lower subtries) - let leaf_1_path = Nibbles::from_nibbles([0x2, 0x0]); - let leaf_1_key = Nibbles::from_nibbles(vec![0; 62]); // Remaining key - let leaf_1_full_path = Nibbles::from_nibbles([vec![0x2, 0x0], vec![0; 62]].concat()); - - let leaf_2_path = Nibbles::from_nibbles([0x2, 0x1]); - let leaf_2_key = Nibbles::from_nibbles(vec![0; 62]); // Remaining key - let leaf_2_full_path = Nibbles::from_nibbles([vec![0x2, 0x1], vec![0; 62]].concat()); - - // Create accounts - let account_1 = create_account(1); - let account_2 = create_account(2); - - // Create leaf nodes - let leaf_1 = create_leaf_node(leaf_1_key.to_vec(), account_1.nonce); - let leaf_2 = create_leaf_node(leaf_2_key.to_vec(), account_2.nonce); - - // Create branch node with children at indices 0 and 1 - let branch = create_branch_node_with_children( - &[0, 1], - vec![ - RlpNode::from_rlp(&alloy_rlp::encode(&leaf_1)), - RlpNode::from_rlp(&alloy_rlp::encode(&leaf_2)), - ], - ); + fn find_leaf_error_blinded_node() { + let blinded_hash = B256::repeat_byte(0xAA); + let path_to_blind = Nibbles::from_nibbles_unchecked([0x1]); + let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); - // Create extension node pointing to branch - let extension = create_extension_node( - extension_key.to_vec(), - RlpNode::from_rlp(&alloy_rlp::encode(&branch)).as_hash().unwrap(), + let sparse = new_test_trie( + [ + // Root is a branch with child 0x1 (blinded) and 0x5 (revealed leaf) + // So we set Bit 1 and Bit 5 in the state_mask + (Nibbles::default(), SparseNode::new_branch(TrieMask::new(0b100010))), + (path_to_blind, SparseNode::Hash(blinded_hash)), + ( + Nibbles::from_nibbles_unchecked([0x5]), + SparseNode::new_leaf(Nibbles::from_nibbles_unchecked([0x6, 0x7, 0x8])), + ), + ] + .into_iter(), ); - // Step 2: Reveal nodes in the trie - trie.reveal_node(extension_path, extension, TrieMasks::none()).unwrap(); - trie.reveal_node(branch_path, branch, TrieMasks::none()).unwrap(); - trie.reveal_node(leaf_1_path, leaf_1, TrieMasks::none()).unwrap(); - trie.reveal_node(leaf_2_path, leaf_2, TrieMasks::none()).unwrap(); - - // Step 3: Reset hashes for all revealed nodes to test actual hash calculation - // Reset upper subtrie node hashes - trie.upper_subtrie.nodes.get_mut(&extension_path).unwrap().set_hash(None); - trie.upper_subtrie.nodes.get_mut(&branch_path).unwrap().set_hash(None); - - // Reset lower subtrie node hashes - let leaf_1_subtrie_idx = path_subtrie_index_unchecked(&leaf_1_path); - let leaf_2_subtrie_idx = path_subtrie_index_unchecked(&leaf_2_path); - - trie.lower_subtries[leaf_1_subtrie_idx] - .as_mut() - .unwrap() - .nodes - .get_mut(&leaf_1_path) - .unwrap() - .set_hash(None); - trie.lower_subtries[leaf_2_subtrie_idx] - .as_mut() - .unwrap() - .nodes - .get_mut(&leaf_2_path) - .unwrap() - .set_hash(None); - - // Step 4: Add changed leaf node paths to prefix set - trie.prefix_set.insert(leaf_1_full_path); - trie.prefix_set.insert(leaf_2_full_path); - - // Step 5: Calculate root using our implementation - let root = trie.root(); + let result = sparse.find_leaf(&search_path, None); - // Step 6: Calculate root using HashBuilder for comparison - let (hash_builder_root, _, _proof_nodes, _, _) = run_hash_builder( - [(leaf_1_full_path, account_1), (leaf_2_full_path, account_2)], - NoopAccountTrieCursor::default(), - Default::default(), - [extension_path, branch_path, leaf_1_full_path, leaf_2_full_path], + // Should error because it hit the blinded node at path 0x1 + assert_matches!(result, Err(LeafLookupError::BlindedNode { path, hash }) + if path == path_to_blind && hash == blinded_hash ); - - // Step 7: Verify the roots match - assert_eq!(root, hash_builder_root); - - // Verify hashes were computed - let leaf_1_subtrie = trie.lower_subtries[leaf_1_subtrie_idx].as_ref().unwrap(); - let leaf_2_subtrie = trie.lower_subtries[leaf_2_subtrie_idx].as_ref().unwrap(); - assert!(trie.upper_subtrie.nodes.get(&extension_path).unwrap().hash().is_some()); - assert!(trie.upper_subtrie.nodes.get(&branch_path).unwrap().hash().is_some()); - assert!(leaf_1_subtrie.nodes.get(&leaf_1_path).unwrap().hash().is_some()); - assert!(leaf_2_subtrie.nodes.get(&leaf_2_path).unwrap().hash().is_some()); } } diff --git a/crates/trie/sparse/benches/rlp_node.rs b/crates/trie/sparse/benches/rlp_node.rs index 113392fca54..9f2337f31b8 100644 --- a/crates/trie/sparse/benches/rlp_node.rs +++ b/crates/trie/sparse/benches/rlp_node.rs @@ -7,7 +7,7 @@ use proptest::{prelude::*, test_runner::TestRunner}; use rand::{seq::IteratorRandom, Rng}; use reth_testing_utils::generators; use reth_trie::Nibbles; -use reth_trie_sparse::RevealedSparseTrie; +use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrieInterface}; fn update_rlp_node_level(c: &mut Criterion) { let mut rng = generators::rng(); @@ -22,10 +22,15 @@ fn update_rlp_node_level(c: &mut Criterion) { .current(); // Create a sparse trie with `size` leaves - let mut sparse = RevealedSparseTrie::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); for (key, value) in &state { sparse - .update_leaf(Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec()) + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(value).to_vec(), + &provider, + ) .unwrap(); } sparse.root(); @@ -39,6 +44,7 @@ fn update_rlp_node_level(c: &mut Criterion) { .update_leaf( Nibbles::unpack(key), alloy_rlp::encode_fixed_size(&rng.random::()).to_vec(), + &provider, ) .unwrap(); } diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index f4d461ae51a..ed88921ecf2 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -13,7 +13,7 @@ use reth_trie::{ HashedStorage, }; use reth_trie_common::{HashBuilder, Nibbles}; -use reth_trie_sparse::SparseTrie; +use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; fn calculate_root_from_leaves(c: &mut Criterion) { let mut group = c.benchmark_group("calculate root from leaves"); @@ -40,13 +40,15 @@ fn calculate_root_from_leaves(c: &mut Criterion) { }); // sparse trie + let provider = DefaultTrieNodeProvider; group.bench_function(BenchmarkId::new("sparse trie", size), |b| { - b.iter_with_setup(SparseTrie::revealed_empty, |mut sparse| { + b.iter_with_setup(SparseTrie::::revealed_empty, |mut sparse| { for (key, value) in &state { sparse .update_leaf( Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec(), + &provider, ) .unwrap(); } @@ -177,6 +179,7 @@ fn calculate_root_from_leaves_repeated(c: &mut Criterion) { }); // sparse trie + let provider = DefaultTrieNodeProvider; let benchmark_id = BenchmarkId::new( "sparse trie", format!( @@ -186,12 +189,13 @@ fn calculate_root_from_leaves_repeated(c: &mut Criterion) { group.bench_function(benchmark_id, |b| { b.iter_with_setup( || { - let mut sparse = SparseTrie::revealed_empty(); + let mut sparse = SparseTrie::::revealed_empty(); for (key, value) in &init_state { sparse .update_leaf( Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec(), + &provider, ) .unwrap(); } @@ -205,6 +209,7 @@ fn calculate_root_from_leaves_repeated(c: &mut Criterion) { .update_leaf( Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec(), + &provider, ) .unwrap(); } diff --git a/crates/trie/sparse/benches/update.rs b/crates/trie/sparse/benches/update.rs index 4b2971c1e05..dff0260a9a4 100644 --- a/crates/trie/sparse/benches/update.rs +++ b/crates/trie/sparse/benches/update.rs @@ -5,7 +5,7 @@ use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criteri use proptest::{prelude::*, strategy::ValueTree}; use rand::seq::IteratorRandom; use reth_trie_common::Nibbles; -use reth_trie_sparse::SparseTrie; +use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; const LEAF_COUNTS: [usize; 2] = [1_000, 5_000]; @@ -16,14 +16,16 @@ fn update_leaf(c: &mut Criterion) { group.bench_function(BenchmarkId::from_parameter(leaf_count), |b| { let leaves = generate_leaves(leaf_count); // Start with an empty trie - let mut trie = SparseTrie::revealed_empty(); - // Pre-populate with data - for (path, value) in leaves.iter().cloned() { - trie.update_leaf(path, value).unwrap(); - } + let provider = DefaultTrieNodeProvider; b.iter_batched( || { + let mut trie = SparseTrie::::revealed_empty(); + // Pre-populate with data + for (path, value) in leaves.iter().cloned() { + trie.update_leaf(path, value, &provider).unwrap(); + } + let new_leaves = leaves .iter() // Update 10% of existing leaves with new values @@ -37,11 +39,11 @@ fn update_leaf(c: &mut Criterion) { }) .collect::>(); - (trie.clone(), new_leaves) + (trie, new_leaves) }, |(mut trie, new_leaves)| { for (path, new_value) in new_leaves { - trie.update_leaf(*path, new_value).unwrap(); + trie.update_leaf(*path, new_value, &provider).unwrap(); } trie }, @@ -58,25 +60,27 @@ fn remove_leaf(c: &mut Criterion) { group.bench_function(BenchmarkId::from_parameter(leaf_count), |b| { let leaves = generate_leaves(leaf_count); // Start with an empty trie - let mut trie = SparseTrie::revealed_empty(); - // Pre-populate with data - for (path, value) in leaves.iter().cloned() { - trie.update_leaf(path, value).unwrap(); - } + let provider = DefaultTrieNodeProvider; b.iter_batched( || { + let mut trie = SparseTrie::::revealed_empty(); + // Pre-populate with data + for (path, value) in leaves.iter().cloned() { + trie.update_leaf(path, value, &provider).unwrap(); + } + let delete_leaves = leaves .iter() .map(|(path, _)| path) // Remove 10% leaves .choose_multiple(&mut rand::rng(), leaf_count / 10); - (trie.clone(), delete_leaves) + (trie, delete_leaves) }, |(mut trie, delete_leaves)| { for path in delete_leaves { - trie.remove_leaf(path).unwrap(); + trie.remove_leaf(path, &provider).unwrap(); } trie }, diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index 617622d194f..6b175970481 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -11,7 +11,10 @@ pub use state::*; mod trie; pub use trie::*; -pub mod blinded; +mod traits; +pub use traits::*; + +pub mod provider; #[cfg(feature = "metrics")] mod metrics; diff --git a/crates/trie/sparse/src/blinded.rs b/crates/trie/sparse/src/provider.rs similarity index 58% rename from crates/trie/sparse/src/blinded.rs rename to crates/trie/sparse/src/provider.rs index b42012eb8ea..405b3a84747 100644 --- a/crates/trie/sparse/src/blinded.rs +++ b/crates/trie/sparse/src/provider.rs @@ -4,13 +4,13 @@ use alloy_primitives::{Bytes, B256}; use reth_execution_errors::SparseTrieError; use reth_trie_common::{Nibbles, TrieMask}; -/// Factory for instantiating blinded node providers. +/// Factory for instantiating trie node providers. #[auto_impl::auto_impl(&)] -pub trait BlindedProviderFactory { +pub trait TrieNodeProviderFactory { /// Type capable of fetching blinded account nodes. - type AccountNodeProvider: BlindedProvider; + type AccountNodeProvider: TrieNodeProvider; /// Type capable of fetching blinded storage nodes. - type StorageNodeProvider: BlindedProvider; + type StorageNodeProvider: TrieNodeProvider; /// Returns blinded account node provider. fn account_node_provider(&self) -> Self::AccountNodeProvider; @@ -30,36 +30,36 @@ pub struct RevealedNode { pub hash_mask: Option, } -/// Trie node provider for retrieving blinded nodes. +/// Trie node provider for retrieving trie nodes. #[auto_impl::auto_impl(&)] -pub trait BlindedProvider { - /// Retrieve blinded node by path. - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError>; +pub trait TrieNodeProvider { + /// Retrieve trie node by path. + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError>; } -/// Default blinded node provider factory that creates [`DefaultBlindedProvider`]. +/// Default trie node provider factory that creates [`DefaultTrieNodeProviderFactory`]. #[derive(PartialEq, Eq, Clone, Default, Debug)] -pub struct DefaultBlindedProviderFactory; +pub struct DefaultTrieNodeProviderFactory; -impl BlindedProviderFactory for DefaultBlindedProviderFactory { - type AccountNodeProvider = DefaultBlindedProvider; - type StorageNodeProvider = DefaultBlindedProvider; +impl TrieNodeProviderFactory for DefaultTrieNodeProviderFactory { + type AccountNodeProvider = DefaultTrieNodeProvider; + type StorageNodeProvider = DefaultTrieNodeProvider; fn account_node_provider(&self) -> Self::AccountNodeProvider { - DefaultBlindedProvider + DefaultTrieNodeProvider } fn storage_node_provider(&self, _account: B256) -> Self::StorageNodeProvider { - DefaultBlindedProvider + DefaultTrieNodeProvider } } -/// Default blinded node provider that always returns `Ok(None)`. +/// Default trie node provider that always returns `Ok(None)`. #[derive(PartialEq, Eq, Clone, Default, Debug)] -pub struct DefaultBlindedProvider; +pub struct DefaultTrieNodeProvider; -impl BlindedProvider for DefaultBlindedProvider { - fn blinded_node(&self, _path: &Nibbles) -> Result, SparseTrieError> { +impl TrieNodeProvider for DefaultTrieNodeProvider { + fn trie_node(&self, _path: &Nibbles) -> Result, SparseTrieError> { Ok(None) } } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 66c3596363c..0739d6946a3 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,16 +1,15 @@ use crate::{ - blinded::{BlindedProvider, BlindedProviderFactory, DefaultBlindedProviderFactory}, - LeafLookup, RevealedSparseTrie, SparseTrie, SparseTrieState, TrieMasks, + provider::{TrieNodeProvider, TrieNodeProviderFactory}, + traits::SparseTrieInterface, + RevealedSparseNode, SerialSparseTrie, SparseTrie, TrieMasks, }; use alloc::{collections::VecDeque, vec::Vec}; use alloy_primitives::{ - hex, map::{B256Map, HashMap, HashSet}, Bytes, B256, }; use alloy_rlp::{Decodable, Encodable}; use alloy_trie::proof::DecodedProofNodes; -use core::{fmt, iter::Peekable}; use reth_execution_errors::{SparseStateTrieErrorKind, SparseStateTrieResult, SparseTrieErrorKind}; use reth_primitives_traits::Account; use reth_trie_common::{ @@ -21,14 +20,48 @@ use reth_trie_common::{ }; use tracing::trace; +/// Provides type-safe re-use of cleared [`SparseStateTrie`]s, which helps to save allocations +/// across payload runs. +#[derive(Debug)] +pub struct ClearedSparseStateTrie< + A = SerialSparseTrie, // Account trie implementation + S = SerialSparseTrie, // Storage trie implementation +>(SparseStateTrie); + +impl ClearedSparseStateTrie +where + A: SparseTrieInterface + Default, + S: SparseTrieInterface + Default, +{ + /// Creates a [`ClearedSparseStateTrie`] by clearing all the existing internal state of a + /// [`SparseStateTrie`] and then storing that instance for later re-use. + pub fn from_state_trie(mut trie: SparseStateTrie) -> Self { + trie.state = trie.state.clear(); + trie.cleared_storages.extend(trie.storages.drain().map(|(_, trie)| trie.clear())); + trie.revealed_account_paths.clear(); + trie.revealed_storage_paths.clear(); + trie.account_rlp_buf.clear(); + Self(trie) + } + + /// Returns the cleared [`SparseStateTrie`], consuming this instance. + pub fn into_inner(self) -> SparseStateTrie { + self.0 + } +} + +#[derive(Debug)] /// Sparse state trie representing lazy-loaded Ethereum state trie. -pub struct SparseStateTrie { - /// Blinded node provider factory. - provider_factory: F, +pub struct SparseStateTrie< + A = SerialSparseTrie, // Account trie implementation + S = SerialSparseTrie, // Storage trie implementation +> { /// Sparse account trie. - state: SparseTrie, + state: SparseTrie, /// Sparse storage tries. - storages: B256Map>, + storages: B256Map>, + /// Cleared storage tries, kept for re-use + cleared_storages: Vec>, /// Collection of revealed account trie paths. revealed_account_paths: HashSet, /// Collection of revealed storage trie paths, per account. @@ -42,13 +75,16 @@ pub struct SparseStateTrie Default for SparseStateTrie +where + A: Default, + S: Default, +{ fn default() -> Self { Self { - provider_factory: DefaultBlindedProviderFactory, state: Default::default(), storages: Default::default(), + cleared_storages: Default::default(), revealed_account_paths: Default::default(), revealed_storage_paths: Default::default(), retain_updates: false, @@ -59,19 +95,6 @@ impl Default for SparseStateTrie { } } -impl fmt::Debug for SparseStateTrie

{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SparseStateTrie") - .field("state", &self.state) - .field("storages", &self.storages) - .field("revealed_account_paths", &self.revealed_account_paths) - .field("revealed_storage_paths", &self.revealed_storage_paths) - .field("retain_updates", &self.retain_updates) - .field("account_rlp_buf", &hex::encode(&self.account_rlp_buf)) - .finish_non_exhaustive() - } -} - #[cfg(test)] impl SparseStateTrie { /// Create state trie from state trie. @@ -80,43 +103,35 @@ impl SparseStateTrie { } } -impl SparseStateTrie { - /// Create new [`SparseStateTrie`] with blinded node provider factory. - pub fn new(provider_factory: F) -> Self { - Self { - provider_factory, - state: Default::default(), - storages: Default::default(), - revealed_account_paths: Default::default(), - revealed_storage_paths: Default::default(), - retain_updates: false, - account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), - #[cfg(feature = "metrics")] - metrics: Default::default(), - } - } - +impl SparseStateTrie { /// Set the retention of branch node updates and deletions. pub const fn with_updates(mut self, retain_updates: bool) -> Self { self.retain_updates = retain_updates; self } + /// Set the accounts trie to the given `SparseTrie`. + pub fn with_accounts_trie(mut self, trie: SparseTrie) -> Self { + self.state = trie; + self + } +} + +impl SparseStateTrie +where + A: SparseTrieInterface + Default, + S: SparseTrieInterface + Default, +{ + /// Create new [`SparseStateTrie`] + pub fn new() -> Self { + Self::default() + } + /// Returns `true` if account was already revealed. pub fn is_account_revealed(&self, account: B256) -> bool { self.revealed_account_paths.contains(&Nibbles::unpack(account)) } - /// Uses the input `SparseTrieState` to populate the backing data structures in the `state` - /// trie. - pub fn populate_from(&mut self, trie: SparseTrieState) { - if let Some(new_trie) = self.state.as_revealed_mut() { - new_trie.use_allocated_state(trie); - } else { - self.state = SparseTrie::AllocatedEmpty { allocated: trie }; - } - } - /// Was the account witness for `address` complete? pub fn check_valid_account_witness(&self, address: B256) -> bool { let path = Nibbles::unpack(address); @@ -125,10 +140,7 @@ impl SparseStateTrie { None => return false, }; - matches!( - trie.find_leaf(&path, None), - Ok(LeafLookup::Exists | LeafLookup::NonExistent { .. }) - ) + trie.find_leaf(&path, None).is_ok() } /// Was the storage-slot witness for (`address`,`slot`) complete? @@ -139,10 +151,7 @@ impl SparseStateTrie { None => return false, }; - matches!( - trie.find_leaf(&path, None), - Ok(LeafLookup::Exists | LeafLookup::NonExistent { .. }) - ) + trie.find_leaf(&path, None).is_ok() } /// Returns `true` if storage slot for account was already revealed. @@ -163,131 +172,38 @@ impl SparseStateTrie { } /// Returns reference to state trie if it was revealed. - pub const fn state_trie_ref(&self) -> Option<&RevealedSparseTrie> { + pub const fn state_trie_ref(&self) -> Option<&A> { self.state.as_revealed_ref() } /// Returns reference to storage trie if it was revealed. - pub fn storage_trie_ref( - &self, - address: &B256, - ) -> Option<&RevealedSparseTrie> { + pub fn storage_trie_ref(&self, address: &B256) -> Option<&S> { self.storages.get(address).and_then(|e| e.as_revealed_ref()) } /// Returns mutable reference to storage sparse trie if it was revealed. - pub fn storage_trie_mut( - &mut self, - address: &B256, - ) -> Option<&mut RevealedSparseTrie> { + pub fn storage_trie_mut(&mut self, address: &B256) -> Option<&mut S> { self.storages.get_mut(address).and_then(|e| e.as_revealed_mut()) } /// Takes the storage trie for the provided address. - pub fn take_storage_trie( - &mut self, - address: &B256, - ) -> Option> { + pub fn take_storage_trie(&mut self, address: &B256) -> Option> { self.storages.remove(address) } /// Inserts storage trie for the provided address. - pub fn insert_storage_trie( - &mut self, - address: B256, - storage_trie: SparseTrie, - ) { + pub fn insert_storage_trie(&mut self, address: B256, storage_trie: SparseTrie) { self.storages.insert(address, storage_trie); } - /// Reveal unknown trie paths from provided leaf path and its proof for the account. - /// - /// Panics if trie updates retention is enabled. + /// Retrieves the storage trie for the given address, creating a new one if it doesn't exist. /// - /// NOTE: This method does not extensively validate the proof. - pub fn reveal_account( - &mut self, - account: B256, - proof: impl IntoIterator, - ) -> SparseStateTrieResult<()> { - assert!(!self.retain_updates); - - if self.is_account_revealed(account) { - return Ok(()); - } - - let mut proof = proof.into_iter().peekable(); - - let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; - - // Reveal root node if it wasn't already. - let trie = self.state.reveal_root_with_provider( - self.provider_factory.account_node_provider(), - root_node, - TrieMasks::none(), - self.retain_updates, - )?; - - // Reveal the remaining proof nodes. - for (path, bytes) in proof { - if self.revealed_account_paths.contains(&path) { - continue - } - let node = TrieNode::decode(&mut &bytes[..])?; - trie.reveal_node(path, node, TrieMasks::none())?; - - // Track the revealed path. - self.revealed_account_paths.insert(path); - } - - Ok(()) - } - - /// Reveal unknown trie paths from provided leaf path and its proof for the storage slot. - /// - /// Panics if trie updates retention is enabled. - /// - /// NOTE: This method does not extensively validate the proof. - pub fn reveal_storage_slot( - &mut self, - account: B256, - slot: B256, - proof: impl IntoIterator, - ) -> SparseStateTrieResult<()> { - assert!(!self.retain_updates); - - if self.is_storage_slot_revealed(account, slot) { - return Ok(()); - } - - let mut proof = proof.into_iter().peekable(); - - let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; - - // Reveal root node if it wasn't already. - let trie = self.storages.entry(account).or_default().reveal_root_with_provider( - self.provider_factory.storage_node_provider(account), - root_node, - TrieMasks::none(), - self.retain_updates, - )?; - - let revealed_nodes = self.revealed_storage_paths.entry(account).or_default(); - - // Reveal the remaining proof nodes. - for (path, bytes) in proof { - // If the node is already revealed, skip it. - if revealed_nodes.contains(&path) { - continue - } - let node = TrieNode::decode(&mut &bytes[..])?; - trie.reveal_node(path, node, TrieMasks::none())?; - - // Track the revealed path. - revealed_nodes.insert(path); - } - - Ok(()) + /// This method should always be used to create a storage trie, as it will re-use previously + /// allocated and cleared storage tries when possible. + fn get_or_create_storage_trie(&mut self, address: B256) -> &mut SparseTrie { + self.storages + .entry(address) + .or_insert_with(|| self.cleared_storages.pop().unwrap_or_default()) } /// Reveal unknown trie paths from multiproof. @@ -351,48 +267,36 @@ impl SparseStateTrie { branch_node_hash_masks: HashMap, branch_node_tree_masks: HashMap, ) -> SparseStateTrieResult<()> { - let FilteredProofNodes { + let FilterMappedProofNodes { + root_node, nodes, - new_nodes: _, + new_nodes, total_nodes: _total_nodes, skipped_nodes: _skipped_nodes, - } = filter_revealed_nodes(account_subtree, &self.revealed_account_paths)?; + } = filter_map_revealed_nodes( + account_subtree, + &mut self.revealed_account_paths, + &branch_node_hash_masks, + &branch_node_tree_masks, + )?; #[cfg(feature = "metrics")] { self.metrics.increment_total_account_nodes(_total_nodes as u64); self.metrics.increment_skipped_account_nodes(_skipped_nodes as u64); } - let mut account_nodes = nodes.into_iter().peekable(); - if let Some(root_node) = Self::validate_root_node_decoded(&mut account_nodes)? { + if let Some(root_node) = root_node { // Reveal root node if it wasn't already. - let trie = self.state.reveal_root_with_provider( - self.provider_factory.account_node_provider(), - root_node, - TrieMasks { - hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), - tree_mask: branch_node_tree_masks.get(&Nibbles::default()).copied(), - }, - self.retain_updates, - )?; + trace!(target: "trie::sparse", ?root_node, "Revealing root account node"); + let trie = + self.state.reveal_root(root_node.node, root_node.masks, self.retain_updates)?; - // Reveal the remaining proof nodes. - for (path, node) in account_nodes { - let (hash_mask, tree_mask) = if let TrieNode::Branch(_) = node { - ( - branch_node_hash_masks.get(&path).copied(), - branch_node_tree_masks.get(&path).copied(), - ) - } else { - (None, None) - }; - - trace!(target: "trie::sparse", ?path, ?node, ?hash_mask, ?tree_mask, "Revealing account node"); - trie.reveal_node(path, node, TrieMasks { hash_mask, tree_mask })?; + // Reserve the capacity for new nodes ahead of time, if the trie implementation + // supports doing so. + trie.reserve_nodes(new_nodes); - // Track the revealed path. - self.revealed_account_paths.insert(path); - } + trace!(target: "trie::sparse", total_nodes = ?nodes.len(), "Revealing account nodes"); + trie.reveal_nodes(nodes)?; } Ok(()) @@ -417,57 +321,40 @@ impl SparseStateTrie { ) -> SparseStateTrieResult<()> { let revealed_nodes = self.revealed_storage_paths.entry(account).or_default(); - let FilteredProofNodes { + let FilterMappedProofNodes { + root_node, nodes, new_nodes, total_nodes: _total_nodes, skipped_nodes: _skipped_nodes, - } = filter_revealed_nodes(storage_subtree.subtree, revealed_nodes)?; + } = filter_map_revealed_nodes( + storage_subtree.subtree, + revealed_nodes, + &storage_subtree.branch_node_hash_masks, + &storage_subtree.branch_node_tree_masks, + )?; #[cfg(feature = "metrics")] { self.metrics.increment_total_storage_nodes(_total_nodes as u64); self.metrics.increment_skipped_storage_nodes(_skipped_nodes as u64); } - let mut nodes = nodes.into_iter().peekable(); - if let Some(root_node) = Self::validate_root_node_decoded(&mut nodes)? { + if let Some(root_node) = root_node { // Reveal root node if it wasn't already. - let trie = self.storages.entry(account).or_default().reveal_root_with_provider( - self.provider_factory.storage_node_provider(account), - root_node, - TrieMasks { - hash_mask: storage_subtree - .branch_node_hash_masks - .get(&Nibbles::default()) - .copied(), - tree_mask: storage_subtree - .branch_node_tree_masks - .get(&Nibbles::default()) - .copied(), - }, - self.retain_updates, + trace!(target: "trie::sparse", ?account, ?root_node, "Revealing root storage node"); + let retain_updates = self.retain_updates; + let trie = self.get_or_create_storage_trie(account).reveal_root( + root_node.node, + root_node.masks, + retain_updates, )?; - // Reserve the capacity for new nodes ahead of time. + // Reserve the capacity for new nodes ahead of time, if the trie implementation + // supports doing so. trie.reserve_nodes(new_nodes); - // Reveal the remaining proof nodes. - for (path, node) in nodes { - let (hash_mask, tree_mask) = if let TrieNode::Branch(_) = node { - ( - storage_subtree.branch_node_hash_masks.get(&path).copied(), - storage_subtree.branch_node_tree_masks.get(&path).copied(), - ) - } else { - (None, None) - }; - - trace!(target: "trie::sparse", ?account, ?path, ?node, ?hash_mask, ?tree_mask, "Revealing storage node"); - trie.reveal_node(path, node, TrieMasks { hash_mask, tree_mask })?; - - // Track the revealed path. - revealed_nodes.insert(path); - } + trace!(target: "trie::sparse", ?account, total_nodes = ?nodes.len(), "Revealing storage nodes"); + trie.reveal_nodes(nodes)?; } Ok(()) @@ -534,14 +421,14 @@ impl SparseStateTrie { .get(&account) .is_none_or(|paths| !paths.contains(&path)) { - let storage_trie_entry = self.storages.entry(account).or_default(); + let retain_updates = self.retain_updates; + let storage_trie_entry = self.get_or_create_storage_trie(account); if path.is_empty() { // Handle special storage state root node case. - storage_trie_entry.reveal_root_with_provider( - self.provider_factory.storage_node_provider(account), + storage_trie_entry.reveal_root( trie_node, TrieMasks::none(), - self.retain_updates, + retain_updates, )?; } else { // Reveal non-root storage trie node. @@ -559,12 +446,7 @@ impl SparseStateTrie { else if !self.revealed_account_paths.contains(&path) { if path.is_empty() { // Handle special state root node case. - self.state.reveal_root_with_provider( - self.provider_factory.account_node_provider(), - trie_node, - TrieMasks::none(), - self.retain_updates, - )?; + self.state.reveal_root(trie_node, TrieMasks::none(), self.retain_updates)?; } else { // Reveal non-root state trie node. self.state.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?.reveal_node( @@ -582,52 +464,6 @@ impl SparseStateTrie { Ok(()) } - /// Validates the root node of the proof and returns it if it exists and is valid. - fn validate_root_node>( - &self, - proof: &mut Peekable, - ) -> SparseStateTrieResult> { - // Validate root node. - let Some((path, node)) = proof.next() else { return Ok(None) }; - if !path.is_empty() { - return Err(SparseStateTrieErrorKind::InvalidRootNode { path, node }.into()) - } - - // Decode root node and perform sanity check. - let root_node = TrieNode::decode(&mut &node[..])?; - if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { - return Err(SparseStateTrieErrorKind::InvalidRootNode { path, node }.into()) - } - - Ok(Some(root_node)) - } - - /// Validates the decoded root node of the proof and returns it if it exists and is valid. - fn validate_root_node_decoded>( - proof: &mut Peekable, - ) -> SparseStateTrieResult> { - // Validate root node. - let Some((path, root_node)) = proof.next() else { return Ok(None) }; - if !path.is_empty() { - return Err(SparseStateTrieErrorKind::InvalidRootNode { - path, - node: alloy_rlp::encode(&root_node).into(), - } - .into()) - } - - // Perform sanity check. - if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { - return Err(SparseStateTrieErrorKind::InvalidRootNode { - path, - node: alloy_rlp::encode(&root_node).into(), - } - .into()) - } - - Ok(Some(root_node)) - } - /// Wipe the storage trie at the provided address. pub fn wipe_storage(&mut self, address: B256) -> SparseStateTrieResult<()> { if let Some(trie) = self.storages.get_mut(&address) { @@ -636,12 +472,12 @@ impl SparseStateTrie { Ok(()) } - /// Calculates the hashes of the nodes below the provided level. + /// Calculates the hashes of subtries. /// /// If the trie has not been revealed, this function does nothing. - pub fn calculate_below_level(&mut self, level: usize) { + pub fn calculate_subtries(&mut self) { if let SparseTrie::Revealed(trie) = &mut self.state { - trie.update_rlp_node_level(level); + trie.update_subtrie_hashes(); } } @@ -650,18 +486,18 @@ impl SparseStateTrie { self.storages.get_mut(&account).and_then(|trie| trie.root()) } - /// Returns mutable reference to the revealed sparse trie. + /// Returns mutable reference to the revealed account sparse trie. /// - /// If the trie is not revealed yet, its root will be revealed using the blinded node provider. + /// If the trie is not revealed yet, its root will be revealed using the trie node provider. fn revealed_trie_mut( &mut self, - ) -> SparseStateTrieResult<&mut RevealedSparseTrie> { + provider_factory: impl TrieNodeProviderFactory, + ) -> SparseStateTrieResult<&mut A> { match self.state { - SparseTrie::Blind | SparseTrie::AllocatedEmpty { .. } => { - let (root_node, hash_mask, tree_mask) = self - .provider_factory + SparseTrie::Blind(_) => { + let (root_node, hash_mask, tree_mask) = provider_factory .account_node_provider() - .blinded_node(&Nibbles::default())? + .trie_node(&Nibbles::default())? .map(|node| { TrieNode::decode(&mut &node.node[..]) .map(|decoded| (decoded, node.hash_mask, node.tree_mask)) @@ -669,12 +505,7 @@ impl SparseStateTrie { .transpose()? .unwrap_or((TrieNode::EmptyRoot, None, None)); self.state - .reveal_root_with_provider( - self.provider_factory.account_node_provider(), - root_node, - TrieMasks { hash_mask, tree_mask }, - self.retain_updates, - ) + .reveal_root(root_node, TrieMasks { hash_mask, tree_mask }, self.retain_updates) .map_err(Into::into) } SparseTrie::Revealed(ref mut trie) => Ok(trie), @@ -684,22 +515,28 @@ impl SparseStateTrie { /// Returns sparse trie root. /// /// If the trie has not been revealed, this function reveals the root node and returns its hash. - pub fn root(&mut self) -> SparseStateTrieResult { + pub fn root( + &mut self, + provider_factory: impl TrieNodeProviderFactory, + ) -> SparseStateTrieResult { // record revealed node metrics #[cfg(feature = "metrics")] self.metrics.record(); - Ok(self.revealed_trie_mut()?.root()) + Ok(self.revealed_trie_mut(provider_factory)?.root()) } /// Returns sparse trie root and trie updates if the trie has been revealed. - pub fn root_with_updates(&mut self) -> SparseStateTrieResult<(B256, TrieUpdates)> { + pub fn root_with_updates( + &mut self, + provider_factory: impl TrieNodeProviderFactory, + ) -> SparseStateTrieResult<(B256, TrieUpdates)> { // record revealed node metrics #[cfg(feature = "metrics")] self.metrics.record(); let storage_tries = self.storage_trie_updates(); - let revealed = self.revealed_trie_mut()?; + let revealed = self.revealed_trie_mut(provider_factory)?; let (root, updates) = (revealed.root(), revealed.take_updates()); let updates = TrieUpdates { @@ -750,12 +587,14 @@ impl SparseStateTrie { &mut self, path: Nibbles, value: Vec, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<()> { if !self.revealed_account_paths.contains(&path) { self.revealed_account_paths.insert(path); } - self.state.update_leaf(path, value)?; + let provider = provider_factory.account_node_provider(); + self.state.update_leaf(path, value, provider)?; Ok(()) } @@ -765,13 +604,16 @@ impl SparseStateTrie { address: B256, slot: Nibbles, value: Vec, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<()> { if !self.revealed_storage_paths.get(&address).is_some_and(|slots| slots.contains(&slot)) { self.revealed_storage_paths.entry(address).or_default().insert(slot); } let storage_trie = self.storages.get_mut(&address).ok_or(SparseTrieErrorKind::Blind)?; - storage_trie.update_leaf(slot, value)?; + + let provider = provider_factory.storage_node_provider(address); + storage_trie.update_leaf(slot, value, provider)?; Ok(()) } @@ -779,7 +621,12 @@ impl SparseStateTrie { /// the storage root based on update storage trie or look it up from existing leaf value. /// /// If the new account info and storage trie are empty, the account leaf will be removed. - pub fn update_account(&mut self, address: B256, account: Account) -> SparseStateTrieResult<()> { + pub fn update_account( + &mut self, + address: B256, + account: Account, + provider_factory: impl TrieNodeProviderFactory, + ) -> SparseStateTrieResult<()> { let nibbles = Nibbles::unpack(address); let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { @@ -789,7 +636,7 @@ impl SparseStateTrie { trace!(target: "trie::sparse", ?address, "Retrieving storage root from account leaf to update account"); // The account was revealed, either... if let Some(value) = self.get_account_value(&address) { - // ..it exists and we should take it's current storage root or... + // ..it exists and we should take its current storage root or... TrieAccount::decode(&mut &value[..])?.storage_root } else { // ...the account is newly created and the storage trie is empty. @@ -801,12 +648,12 @@ impl SparseStateTrie { if account.is_empty() && storage_root == EMPTY_ROOT_HASH { trace!(target: "trie::sparse", ?address, "Removing account"); - self.remove_account_leaf(&nibbles) + self.remove_account_leaf(&nibbles, provider_factory) } else { trace!(target: "trie::sparse", ?address, "Updating account"); self.account_rlp_buf.clear(); account.into_trie_account(storage_root).encode(&mut self.account_rlp_buf); - self.update_account_leaf(nibbles, self.account_rlp_buf.clone()) + self.update_account_leaf(nibbles, self.account_rlp_buf.clone(), provider_factory) } } @@ -816,7 +663,11 @@ impl SparseStateTrie { /// /// If the new storage root is empty, and the account info was already empty, the account leaf /// will be removed. - pub fn update_account_storage_root(&mut self, address: B256) -> SparseStateTrieResult<()> { + pub fn update_account_storage_root( + &mut self, + address: B256, + provider_factory: impl TrieNodeProviderFactory, + ) -> SparseStateTrieResult<()> { if !self.is_account_revealed(address) { return Err(SparseTrieErrorKind::Blind.into()) } @@ -847,21 +698,26 @@ impl SparseStateTrie { if trie_account == TrieAccount::default() { // If the account is empty, remove it. trace!(target: "trie::sparse", ?address, "Removing account because the storage root is empty"); - self.remove_account_leaf(&nibbles)?; + self.remove_account_leaf(&nibbles, provider_factory)?; } else { // Otherwise, update the account leaf. trace!(target: "trie::sparse", ?address, "Updating account with the new storage root"); self.account_rlp_buf.clear(); trie_account.encode(&mut self.account_rlp_buf); - self.update_account_leaf(nibbles, self.account_rlp_buf.clone())?; + self.update_account_leaf(nibbles, self.account_rlp_buf.clone(), provider_factory)?; } Ok(()) } /// Remove the account leaf node. - pub fn remove_account_leaf(&mut self, path: &Nibbles) -> SparseStateTrieResult<()> { - self.state.remove_leaf(path)?; + pub fn remove_account_leaf( + &mut self, + path: &Nibbles, + provider_factory: impl TrieNodeProviderFactory, + ) -> SparseStateTrieResult<()> { + let provider = provider_factory.account_node_provider(); + self.state.remove_leaf(path, provider)?; Ok(()) } @@ -870,24 +726,23 @@ impl SparseStateTrie { &mut self, address: B256, slot: &Nibbles, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<()> { let storage_trie = self.storages.get_mut(&address).ok_or(SparseTrieErrorKind::Blind)?; - storage_trie.remove_leaf(slot)?; - Ok(()) - } - /// Clears and takes the account trie. - pub fn take_cleared_account_trie_state(&mut self) -> SparseTrieState { - let trie = core::mem::take(&mut self.state); - trie.cleared() + let provider = provider_factory.storage_node_provider(address); + storage_trie.remove_leaf(slot, provider)?; + Ok(()) } } -/// Result of [`filter_revealed_nodes`]. +/// Result of [`filter_map_revealed_nodes`]. #[derive(Debug, PartialEq, Eq)] -struct FilteredProofNodes { - /// Filtered, decoded and sorted proof nodes. - nodes: Vec<(Nibbles, TrieNode)>, +struct FilterMappedProofNodes { + /// Root node which was pulled out of the original node set to be handled specially. + root_node: Option, + /// Filtered, decoded and unsorted proof nodes. Root node is removed. + nodes: Vec, /// Number of nodes in the proof. total_nodes: usize, /// Number of nodes that were skipped because they were already revealed. @@ -897,52 +752,91 @@ struct FilteredProofNodes { new_nodes: usize, } -/// Filters the decoded nodes that are already revealed and returns additional information about the -/// number of total, skipped, and new nodes. -fn filter_revealed_nodes( +/// Filters the decoded nodes that are already revealed, maps them to `RevealedSparseNodes`, +/// separates the root node if present, and returns additional information about the number of +/// total, skipped, and new nodes. +fn filter_map_revealed_nodes( proof_nodes: DecodedProofNodes, - revealed_nodes: &HashSet, -) -> alloy_rlp::Result { - let mut result = FilteredProofNodes { + revealed_nodes: &mut HashSet, + branch_node_hash_masks: &HashMap, + branch_node_tree_masks: &HashMap, +) -> SparseStateTrieResult { + let mut result = FilterMappedProofNodes { + root_node: None, nodes: Vec::with_capacity(proof_nodes.len()), total_nodes: 0, skipped_nodes: 0, new_nodes: 0, }; - for (path, node) in proof_nodes.into_inner() { + let proof_nodes_len = proof_nodes.len(); + for (path, proof_node) in proof_nodes.into_inner() { result.total_nodes += 1; - // If the node is already revealed, skip it. - if revealed_nodes.contains(&path) { + + let is_root = path.is_empty(); + + // If the node is already revealed, skip it. We don't ever skip the root node, nor do we add + // it to `revealed_nodes`. + if !is_root && !revealed_nodes.insert(path) { result.skipped_nodes += 1; continue } result.new_nodes += 1; - // If it's a branch node, increase the number of new nodes by the number of children - // according to the state mask. - if let TrieNode::Branch(branch) = &node { - result.new_nodes += branch.state_mask.count_ones() as usize; + + // Extract hash/tree masks based on the node type (only branch nodes have masks). At the + // same time increase the new_nodes counter if the node is a type which has children. + let masks = match &proof_node { + TrieNode::Branch(branch) => { + // If it's a branch node, increase the number of new nodes by the number of children + // according to the state mask. + result.new_nodes += branch.state_mask.count_ones() as usize; + TrieMasks { + hash_mask: branch_node_hash_masks.get(&path).copied(), + tree_mask: branch_node_tree_masks.get(&path).copied(), + } + } + TrieNode::Extension(_) => { + // There is always exactly one child of an extension node. + result.new_nodes += 1; + TrieMasks::none() + } + _ => TrieMasks::none(), + }; + + let node = RevealedSparseNode { path, node: proof_node, masks }; + + if is_root { + // Perform sanity check. + if matches!(node.node, TrieNode::EmptyRoot) && proof_nodes_len > 1 { + return Err(SparseStateTrieErrorKind::InvalidRootNode { + path, + node: alloy_rlp::encode(&node.node).into(), + } + .into()) + } + + result.root_node = Some(node); + + continue } - result.nodes.push((path, node)); + result.nodes.push(node); } - result.nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); Ok(result) } #[cfg(test)] mod tests { use super::*; + use crate::provider::DefaultTrieNodeProviderFactory; use alloy_primitives::{ b256, map::{HashMap, HashSet}, - Bytes, U256, + U256, }; - use alloy_rlp::EMPTY_STRING_CODE; use arbitrary::Arbitrary; - use assert_matches::assert_matches; use rand::{rngs::StdRng, Rng, SeedableRng}; use reth_primitives_traits::Account; use reth_trie::{updates::StorageTrieUpdates, HashBuilder, MultiProof, EMPTY_ROOT_HASH}; @@ -951,67 +845,10 @@ mod tests { BranchNode, LeafNode, StorageMultiProof, TrieMask, }; - #[test] - fn validate_root_node_first_node_not_root() { - let sparse = SparseStateTrie::default(); - let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; - assert_matches!( - sparse.validate_root_node(&mut proof.into_iter().peekable()).map_err(|e| e.into_kind()), - Err(SparseStateTrieErrorKind::InvalidRootNode { .. }) - ); - } - - #[test] - fn validate_root_node_invalid_proof_with_empty_root() { - let sparse = SparseStateTrie::default(); - let proof = [ - (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), - (Nibbles::from_nibbles([0x1]), Bytes::new()), - ]; - assert_matches!( - sparse.validate_root_node(&mut proof.into_iter().peekable()).map_err(|e| e.into_kind()), - Err(SparseStateTrieErrorKind::InvalidRootNode { .. }) - ); - } - - #[test] - fn reveal_account_empty() { - let retainer = ProofRetainer::from_iter([Nibbles::default()]); - let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); - hash_builder.root(); - let proofs = hash_builder.take_proof_nodes(); - assert_eq!(proofs.len(), 1); - - let mut sparse = SparseStateTrie::default(); - assert_eq!(sparse.state, SparseTrie::Blind); - - sparse.reveal_account(Default::default(), proofs.into_inner()).unwrap(); - assert_eq!(sparse.state, SparseTrie::revealed_empty()); - } - - #[test] - fn reveal_storage_slot_empty() { - let retainer = ProofRetainer::from_iter([Nibbles::default()]); - let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); - hash_builder.root(); - let proofs = hash_builder.take_proof_nodes(); - assert_eq!(proofs.len(), 1); - - let mut sparse = SparseStateTrie::default(); - assert!(sparse.storages.is_empty()); - - sparse - .reveal_storage_slot(Default::default(), Default::default(), proofs.into_inner()) - .unwrap(); - assert_eq!( - sparse.storages, - HashMap::from_iter([(Default::default(), SparseTrie::revealed_empty())]) - ); - } - #[test] fn reveal_account_path_twice() { - let mut sparse = SparseStateTrie::default(); + let provider_factory = DefaultTrieNodeProviderFactory; + let mut sparse = SparseStateTrie::::default(); let leaf_value = alloy_rlp::encode(TrieAccount::default()); let leaf_1 = alloy_rlp::encode(TrieNode::Leaf(LeafNode::new( @@ -1053,7 +890,7 @@ mod tests { // Remove the leaf node and check that the state trie does not contain the leaf node and // value - sparse.remove_account_leaf(&Nibbles::from_nibbles([0x0])).unwrap(); + sparse.remove_account_leaf(&Nibbles::from_nibbles([0x0]), &provider_factory).unwrap(); assert!(!sparse .state_trie_ref() .unwrap() @@ -1082,7 +919,8 @@ mod tests { #[test] fn reveal_storage_path_twice() { - let mut sparse = SparseStateTrie::default(); + let provider_factory = DefaultTrieNodeProviderFactory; + let mut sparse = SparseStateTrie::::default(); let leaf_value = alloy_rlp::encode(TrieAccount::default()); let leaf_1 = alloy_rlp::encode(TrieNode::Leaf(LeafNode::new( @@ -1135,7 +973,9 @@ mod tests { // Remove the leaf node and check that the storage trie does not contain the leaf node and // value - sparse.remove_storage_leaf(B256::ZERO, &Nibbles::from_nibbles([0x0])).unwrap(); + sparse + .remove_storage_leaf(B256::ZERO, &Nibbles::from_nibbles([0x0]), &provider_factory) + .unwrap(); assert!(!sparse .storage_trie_ref(&B256::ZERO) .unwrap() @@ -1211,7 +1051,8 @@ mod tests { let root = hash_builder.root(); let proof_nodes = hash_builder.take_proof_nodes(); - let mut sparse = SparseStateTrie::default().with_updates(true); + let provider_factory = DefaultTrieNodeProviderFactory; + let mut sparse = SparseStateTrie::::default().with_updates(true); sparse .reveal_decoded_multiproof( MultiProof { @@ -1247,24 +1088,49 @@ mod tests { ) .unwrap(); - assert_eq!(sparse.root().unwrap(), root); + assert_eq!(sparse.root(&provider_factory).unwrap(), root); let address_3 = b256!("0x2000000000000000000000000000000000000000000000000000000000000000"); let address_path_3 = Nibbles::unpack(address_3); let account_3 = Account { nonce: account_1.nonce + 1, ..account_1 }; let trie_account_3 = account_3.into_trie_account(EMPTY_ROOT_HASH); - sparse.update_account_leaf(address_path_3, alloy_rlp::encode(trie_account_3)).unwrap(); + sparse + .update_account_leaf( + address_path_3, + alloy_rlp::encode(trie_account_3), + &provider_factory, + ) + .unwrap(); - sparse.update_storage_leaf(address_1, slot_path_3, alloy_rlp::encode(value_3)).unwrap(); + sparse + .update_storage_leaf( + address_1, + slot_path_3, + alloy_rlp::encode(value_3), + &provider_factory, + ) + .unwrap(); trie_account_1.storage_root = sparse.storage_root(address_1).unwrap(); - sparse.update_account_leaf(address_path_1, alloy_rlp::encode(trie_account_1)).unwrap(); + sparse + .update_account_leaf( + address_path_1, + alloy_rlp::encode(trie_account_1), + &provider_factory, + ) + .unwrap(); sparse.wipe_storage(address_2).unwrap(); trie_account_2.storage_root = sparse.storage_root(address_2).unwrap(); - sparse.update_account_leaf(address_path_2, alloy_rlp::encode(trie_account_2)).unwrap(); + sparse + .update_account_leaf( + address_path_2, + alloy_rlp::encode(trie_account_2), + &provider_factory, + ) + .unwrap(); - sparse.root().unwrap(); + sparse.root(&provider_factory).unwrap(); let sparse_updates = sparse.take_trie_updates().unwrap(); // TODO(alexey): assert against real state root calculation updates @@ -1286,8 +1152,8 @@ mod tests { } #[test] - fn test_filter_revealed_nodes() { - let revealed_nodes = HashSet::from_iter([Nibbles::from_nibbles([0x0])]); + fn test_filter_map_revealed_nodes() { + let mut revealed_nodes = HashSet::from_iter([Nibbles::from_nibbles([0x0])]); let leaf = TrieNode::Leaf(LeafNode::new(Nibbles::default(), alloy_rlp::encode([]))); let leaf_encoded = alloy_rlp::encode(&leaf); let branch = TrieNode::Branch(BranchNode::new( @@ -1300,12 +1166,30 @@ mod tests { (Nibbles::from_nibbles([0x1]), leaf.clone()), ]); - let decoded = filter_revealed_nodes(proof_nodes, &revealed_nodes).unwrap(); + let branch_node_hash_masks = HashMap::default(); + let branch_node_tree_masks = HashMap::default(); + + let decoded = filter_map_revealed_nodes( + proof_nodes, + &mut revealed_nodes, + &branch_node_hash_masks, + &branch_node_tree_masks, + ) + .unwrap(); assert_eq!( decoded, - FilteredProofNodes { - nodes: vec![(Nibbles::default(), branch), (Nibbles::from_nibbles([0x1]), leaf)], + FilterMappedProofNodes { + root_node: Some(RevealedSparseNode { + path: Nibbles::default(), + node: branch, + masks: TrieMasks::none(), + }), + nodes: vec![RevealedSparseNode { + path: Nibbles::from_nibbles([0x1]), + node: leaf, + masks: TrieMasks::none(), + }], // Branch, leaf, leaf total_nodes: 3, // Revealed leaf node with path 0x1 diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs new file mode 100644 index 00000000000..300ac39c1b6 --- /dev/null +++ b/crates/trie/sparse/src/traits.rs @@ -0,0 +1,312 @@ +//! Traits for sparse trie implementations. + +use core::fmt::Debug; + +use alloc::{borrow::Cow, vec, vec::Vec}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, +}; +use alloy_trie::{BranchNodeCompact, TrieMask}; +use reth_execution_errors::SparseTrieResult; +use reth_trie_common::{Nibbles, TrieNode}; + +use crate::provider::TrieNodeProvider; + +/// Trait defining common operations for revealed sparse trie implementations. +/// +/// This trait abstracts over different sparse trie implementations (serial vs parallel) +/// while providing a unified interface for the core trie operations needed by the +/// [`crate::SparseTrie`] enum. +pub trait SparseTrieInterface: Sized + Debug + Send + Sync { + /// Configures the trie to have the given root node revealed. + /// + /// # Arguments + /// + /// * `root` - The root node to reveal + /// * `masks` - Trie masks for root branch node + /// * `retain_updates` - Whether to track updates + /// + /// # Returns + /// + /// Self if successful, or an error if revealing fails. + /// + /// # Panics + /// + /// May panic if the trie is not new/cleared, and has already revealed nodes. + fn with_root( + self, + root: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult; + + /// Configures the trie to retain information about updates. + /// + /// If `retain_updates` is true, the trie will record branch node updates + /// and deletions. This information can be used to efficiently update + /// an external database. + /// + /// # Arguments + /// + /// * `retain_updates` - Whether to track updates + /// + /// # Returns + /// + /// Self for method chaining. + fn with_updates(self, retain_updates: bool) -> Self; + + /// Reserves capacity for additional trie nodes. + /// + /// # Arguments + /// + /// * `additional` - The number of additional trie nodes to reserve capacity for. + fn reserve_nodes(&mut self, _additional: usize) {} + + /// The single-node version of `reveal_nodes`. + /// + /// # Returns + /// + /// `Ok(())` if successful, or an error if the node was not revealed. + fn reveal_node( + &mut self, + path: Nibbles, + node: TrieNode, + masks: TrieMasks, + ) -> SparseTrieResult<()> { + self.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]) + } + + /// Reveals one or more trie nodes if they have not been revealed before. + /// + /// This function decodes trie nodes and inserts them into the trie structure. It handles + /// different node types (leaf, extension, branch) by appropriately adding them to the trie and + /// recursively revealing their children. + /// + /// # Arguments + /// + /// * `nodes` - The nodes to be revealed, each having a path and optional set of branch node + /// masks. The nodes will be unsorted. + /// + /// # Returns + /// + /// `Ok(())` if successful, or an error if any of the nodes was not revealed. + fn reveal_nodes(&mut self, nodes: Vec) -> SparseTrieResult<()>; + + /// Updates the value of a leaf node at the specified path. + /// + /// If the leaf doesn't exist, it will be created. + /// If it does exist, its value will be updated. + /// + /// # Arguments + /// + /// * `full_path` - The full path to the leaf + /// * `value` - The new value for the leaf + /// * `provider` - The trie provider for resolving missing nodes + /// + /// # Returns + /// + /// `Ok(())` if successful, or an error if the update failed. + fn update_leaf( + &mut self, + full_path: Nibbles, + value: Vec, + provider: P, + ) -> SparseTrieResult<()>; + + /// Removes a leaf node at the specified path. + /// + /// This will also handle collapsing the trie structure as needed + /// (e.g., removing branch nodes that become unnecessary). + /// + /// # Arguments + /// + /// * `full_path` - The full path to the leaf to remove + /// * `provider` - The trie node provider for resolving missing nodes + /// + /// # Returns + /// + /// `Ok(())` if successful, or an error if the removal failed. + fn remove_leaf( + &mut self, + full_path: &Nibbles, + provider: P, + ) -> SparseTrieResult<()>; + + /// Calculates and returns the root hash of the trie. + /// + /// This processes any dirty nodes by updating their RLP encodings + /// and returns the root hash. + /// + /// # Returns + /// + /// The root hash of the trie. + fn root(&mut self) -> B256; + + /// Recalculates and updates the RLP hashes of subtries deeper than a certain level. The level + /// is defined in the implementation. + /// + /// The root node is considered to be at level 0. This method is useful for optimizing + /// hash recalculations after localized changes to the trie structure. + fn update_subtrie_hashes(&mut self); + + /// Retrieves a reference to the leaf value at the specified path. + /// + /// # Arguments + /// + /// * `full_path` - The full path to the leaf value + /// + /// # Returns + /// + /// A reference to the leaf value stored at the given full path, if it is revealed. + /// + /// Note: a value can exist in the full trie and this function still returns `None` + /// because the value has not been revealed. + /// + /// Hence a `None` indicates two possibilities: + /// - The value does not exists in the trie, so it cannot be revealed + /// - The value has not yet been revealed. In order to determine which is true, one would need + /// an exclusion proof. + fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec>; + + /// Attempts to find a leaf node at the specified path. + /// + /// This method traverses the trie from the root down to the given path, checking + /// if a leaf exists at that path. It can be used to verify the existence of a leaf + /// or to generate an exclusion proof (proof that a leaf does not exist). + /// + /// # Parameters + /// + /// - `full_path`: The path to search for. + /// - `expected_value`: Optional expected value. If provided, will verify the leaf value + /// matches. + /// + /// # Returns + /// + /// - `Ok(LeafLookup::Exists)` if the leaf exists with the expected value. + /// - `Ok(LeafLookup::NonExistent)` if the leaf definitely does not exist (exclusion proof). + /// - `Err(LeafLookupError)` if the search encountered a blinded node or found a different + /// value. + fn find_leaf( + &self, + full_path: &Nibbles, + expected_value: Option<&Vec>, + ) -> Result; + + /// Returns a reference to the current sparse trie updates. + /// + /// If no updates have been made/recorded, returns an empty update set. + fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates>; + + /// Consumes and returns the currently accumulated trie updates. + /// + /// This is useful when you want to apply the updates to an external database + /// and then start tracking a new set of updates. + /// + /// # Returns + /// + /// The accumulated updates, or an empty set if updates weren't being tracked. + fn take_updates(&mut self) -> SparseTrieUpdates; + + /// Removes all nodes and values from the trie, resetting it to a blank state + /// with only an empty root node. This is used when a storage root is deleted. + /// + /// This should not be used when intending to reuse the trie for a fresh account/storage root; + /// use `clear` for that. + /// + /// Note: All previously tracked changes to the trie are also removed. + fn wipe(&mut self); + + /// This clears all data structures in the sparse trie, keeping the backing data structures + /// allocated. A [`crate::SparseNode::Empty`] is inserted at the root. + /// + /// This is useful for reusing the trie without needing to reallocate memory. + fn clear(&mut self); +} + +/// Struct for passing around branch node mask information. +/// +/// Branch nodes can have up to 16 children (one for each nibble). +/// The masks represent which children are stored in different ways: +/// - `hash_mask`: Indicates which children are stored as hashes in the database +/// - `tree_mask`: Indicates which children are complete subtrees stored in the database +/// +/// These masks are essential for efficient trie traversal and serialization, as they +/// determine how nodes should be encoded and stored on disk. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub struct TrieMasks { + /// Branch node hash mask, if any. + /// + /// When a bit is set, the corresponding child node's hash is stored in the trie. + /// + /// This mask enables selective hashing of child nodes. + pub hash_mask: Option, + /// Branch node tree mask, if any. + /// + /// When a bit is set, the corresponding child subtree is stored in the database. + pub tree_mask: Option, +} + +impl TrieMasks { + /// Helper function, returns both fields `hash_mask` and `tree_mask` as [`None`] + pub const fn none() -> Self { + Self { hash_mask: None, tree_mask: None } + } +} + +/// Tracks modifications to the sparse trie structure. +/// +/// Maintains references to both modified and pruned/removed branches, enabling +/// one to make batch updates to a persistent database. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct SparseTrieUpdates { + /// Collection of updated intermediate nodes indexed by full path. + pub updated_nodes: HashMap, + /// Collection of removed intermediate nodes indexed by full path. + pub removed_nodes: HashSet, + /// Flag indicating whether the trie was wiped. + pub wiped: bool, +} + +/// Error type for a leaf lookup operation +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum LeafLookupError { + /// The path leads to a blinded node, cannot determine if leaf exists. + /// This means the witness is not complete. + BlindedNode { + /// Path to the blinded node. + path: Nibbles, + /// Hash of the blinded node. + hash: B256, + }, + /// The path leads to a leaf with a different value than expected. + /// This means the witness is malformed. + ValueMismatch { + /// Path to the leaf. + path: Nibbles, + /// Expected value. + expected: Option>, + /// Actual value found. + actual: Vec, + }, +} + +/// Success value for a leaf lookup operation +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum LeafLookup { + /// Leaf exists with expected value. + Exists, + /// Leaf does not exist (exclusion proof found). + NonExistent, +} + +/// Carries all information needed by a sparse trie to reveal a particular node. +#[derive(Debug, PartialEq, Eq)] +pub struct RevealedSparseNode { + /// Path of the node. + pub path: Nibbles, + /// The node itself. + pub node: TrieNode, + /// Tree and hash masks for the node, if known. + pub masks: TrieMasks, +} diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index e2f28c2417f..3189a8c3b66 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,4 +1,8 @@ -use crate::blinded::{BlindedProvider, DefaultBlindedProvider, RevealedNode}; +use crate::{ + provider::{RevealedNode, TrieNodeProvider}, + LeafLookup, LeafLookupError, RevealedSparseNode, SparseTrieInterface, SparseTrieUpdates, + TrieMasks, +}; use alloc::{ borrow::Cow, boxed::Box, @@ -22,48 +26,9 @@ use reth_trie_common::{ use smallvec::SmallVec; use tracing::trace; -/// Struct for passing around branch node mask information. -/// -/// Branch nodes can have up to 16 children (one for each nibble). -/// The masks represent which children are stored in different ways: -/// - `hash_mask`: Indicates which children are stored as hashes in the database -/// - `tree_mask`: Indicates which children are complete subtrees stored in the database -/// -/// These masks are essential for efficient trie traversal and serialization, as they -/// determine how nodes should be encoded and stored on disk. -#[derive(Debug)] -pub struct TrieMasks { - /// Branch node hash mask, if any. - /// - /// When a bit is set, the corresponding child node's hash is stored in the trie. - /// - /// This mask enables selective hashing of child nodes. - pub hash_mask: Option, - /// Branch node tree mask, if any. - /// - /// When a bit is set, the corresponding child subtree is stored in the database. - pub tree_mask: Option, -} - -impl TrieMasks { - /// Helper function, returns both fields `hash_mask` and `tree_mask` as [`None`] - pub const fn none() -> Self { - Self { hash_mask: None, tree_mask: None } - } -} - -/// A struct for keeping the hashmaps from `RevealedSparseTrie`. -#[derive(Debug, Clone, PartialEq, Eq, Default)] -pub struct SparseTrieState { - /// Map from a path (nibbles) to its corresponding sparse trie node. - nodes: HashMap, - /// When a branch is set, the corresponding child subtree is stored in the database. - branch_node_tree_masks: HashMap, - /// When a bit is set, the corresponding child is stored as a hash in the database. - branch_node_hash_masks: HashMap, - /// Map from leaf key paths to their values. - values: HashMap>, -} +/// The level below which the sparse trie hashes are calculated in +/// [`SerialSparseTrie::update_subtrie_hashes`]. +const SPARSE_TRIE_SUBTRIE_HASHES_LEVEL: usize = 2; /// A sparse trie that is either in a "blind" state (no nodes are revealed, root node hash is /// unknown) or in a "revealed" state (root node has been revealed and the trie can be updated). @@ -77,64 +42,39 @@ pub struct SparseTrieState { /// 2. Update tracking - changes to the trie structure can be tracked and selectively persisted /// 3. Incremental operations - nodes can be revealed as needed without loading the entire trie. /// This is what gives rise to the notion of a "sparse" trie. -#[derive(PartialEq, Eq, Default, Clone)] -pub enum SparseTrie

{ - /// This is a variant that can be used to store a previously allocated trie. In these cases, - /// the trie will still be treated as blind, but the allocated trie will be reused if the trie - /// becomes revealed. - AllocatedEmpty { - /// This is the state of the allocated trie. - allocated: SparseTrieState, - }, +#[derive(PartialEq, Eq, Debug)] +pub enum SparseTrie { /// The trie is blind -- no nodes have been revealed /// - /// This is the default state. In this state, - /// the trie cannot be directly queried or modified until nodes are revealed. - #[default] - Blind, + /// This is the default state. In this state, the trie cannot be directly queried or modified + /// until nodes are revealed. + /// + /// In this state the `SparseTrie` can optionally carry with it a cleared `SerialSparseTrie`. + /// This allows for reusing the trie's allocations between payload executions. + Blind(Option>), /// Some nodes in the Trie have been revealed. /// /// In this state, the trie can be queried and modified for the parts /// that have been revealed. Other parts remain blind and require revealing /// before they can be accessed. - Revealed(Box>), + Revealed(Box), } -impl

fmt::Debug for SparseTrie

{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::AllocatedEmpty { .. } => write!(f, "AllocatedEmpty"), - Self::Blind => write!(f, "Blind"), - Self::Revealed(revealed) => write!(f, "Revealed({revealed:?})"), - } +impl Default for SparseTrie { + fn default() -> Self { + Self::Blind(None) } } -impl SparseTrie { - /// Creates a new blind sparse trie. - /// - /// # Examples - /// - /// ``` - /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SparseTrie}; - /// - /// let trie: SparseTrie = SparseTrie::blind(); - /// assert!(trie.is_blind()); - /// let trie: SparseTrie = SparseTrie::default(); - /// assert!(trie.is_blind()); - /// ``` - pub const fn blind() -> Self { - Self::Blind - } - +impl SparseTrie { /// Creates a new revealed but empty sparse trie with `SparseNode::Empty` as root node. /// /// # Examples /// /// ``` - /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SparseTrie}; + /// use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; /// - /// let trie: SparseTrie = SparseTrie::revealed_empty(); + /// let trie = SparseTrie::::revealed_empty(); /// assert!(!trie.is_blind()); /// ``` pub fn revealed_empty() -> Self { @@ -151,27 +91,61 @@ impl SparseTrie { /// /// # Returns /// - /// A mutable reference to the underlying [`RevealedSparseTrie`]. + /// A mutable reference to the underlying [`SparseTrieInterface`]. pub fn reveal_root( &mut self, root: TrieNode, masks: TrieMasks, retain_updates: bool, - ) -> SparseTrieResult<&mut RevealedSparseTrie> { - self.reveal_root_with_provider(Default::default(), root, masks, retain_updates) + ) -> SparseTrieResult<&mut T> { + // if `Blind`, we initialize the revealed trie with the given root node, using a + // pre-allocated trie if available. + if self.is_blind() { + let mut revealed_trie = if let Self::Blind(Some(cleared_trie)) = core::mem::take(self) { + cleared_trie + } else { + Box::default() + }; + + *revealed_trie = revealed_trie.with_root(root, masks, retain_updates)?; + *self = Self::Revealed(revealed_trie); + } + + Ok(self.as_revealed_mut().unwrap()) } } -impl

SparseTrie

{ +impl SparseTrie { + /// Creates a new blind sparse trie. + /// + /// # Examples + /// + /// ``` + /// use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; + /// + /// let trie = SparseTrie::::blind(); + /// assert!(trie.is_blind()); + /// let trie = SparseTrie::::default(); + /// assert!(trie.is_blind()); + /// ``` + pub const fn blind() -> Self { + Self::Blind(None) + } + /// Returns `true` if the sparse trie has no revealed nodes. pub const fn is_blind(&self) -> bool { - matches!(self, Self::Blind) + matches!(self, Self::Blind(_)) + } + + /// Returns `true` if the sparse trie is revealed. + pub const fn is_revealed(&self) -> bool { + matches!(self, Self::Revealed(_)) } /// Returns an immutable reference to the underlying revealed sparse trie. /// /// Returns `None` if the trie is blinded. - pub const fn as_revealed_ref(&self) -> Option<&RevealedSparseTrie

> { + pub const fn as_revealed_ref(&self) -> Option<&T> { if let Self::Revealed(revealed) = self { Some(revealed) } else { @@ -182,7 +156,7 @@ impl

SparseTrie

{ /// Returns a mutable reference to the underlying revealed sparse trie. /// /// Returns `None` if the trie is blinded. - pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie

> { + pub fn as_revealed_mut(&mut self) -> Option<&mut T> { if let Self::Revealed(revealed) = self { Some(revealed) } else { @@ -190,54 +164,6 @@ impl

SparseTrie

{ } } - /// Reveals the root node using a specified provider. - /// - /// This function is similar to [`Self::reveal_root`] but allows the caller to provide - /// a custom provider for fetching blinded nodes. - /// - /// # Returns - /// - /// Mutable reference to [`RevealedSparseTrie`]. - pub fn reveal_root_with_provider( - &mut self, - provider: P, - root: TrieNode, - masks: TrieMasks, - retain_updates: bool, - ) -> SparseTrieResult<&mut RevealedSparseTrie

> { - // we take the allocated state here, which will make sure we are either `Blind` or - // `Revealed`, and giving us the allocated state if we were `AllocatedEmpty`. - let allocated = self.take_allocated_state(); - - // if `Blind`, we initialize the revealed trie - if self.is_blind() { - let mut revealed = - RevealedSparseTrie::from_provider_and_root(provider, root, masks, retain_updates)?; - - // If we had an allocated state, we use its maps internally. use_allocated_state copies - // over any information we had from revealing. - if let Some(allocated) = allocated { - revealed.use_allocated_state(allocated); - } - - *self = Self::Revealed(Box::new(revealed)); - } - Ok(self.as_revealed_mut().unwrap()) - } - - /// Take the allocated state if this is `AllocatedEmpty`, otherwise returns `None`. - /// - /// Converts this `SparseTrie` into `Blind` if this was `AllocatedEmpty`. - pub fn take_allocated_state(&mut self) -> Option { - if let Self::AllocatedEmpty { allocated } = self { - let state = core::mem::take(allocated); - *self = Self::Blind; - Some(state) - } else { - None - } - } - /// Wipes the trie by removing all nodes and values, /// and resetting the trie to only contain an empty root node. /// @@ -248,16 +174,6 @@ impl

SparseTrie

{ Ok(()) } - /// Returns a `SparseTrieState` obtained by clearing the sparse trie state and reusing the - /// allocated state if it was `AllocatedEmpty` or `Revealed`. - pub fn cleared(self) -> SparseTrieState { - match self { - Self::Revealed(revealed) => revealed.cleared_state(), - Self::AllocatedEmpty { allocated } => allocated, - Self::Blind => Default::default(), - } - } - /// Calculates the root hash of the trie. /// /// This will update any remaining dirty nodes before computing the root hash. @@ -288,17 +204,33 @@ impl

SparseTrie

{ let revealed = self.as_revealed_mut()?; Some((revealed.root(), revealed.take_updates())) } -} -impl SparseTrie

{ + /// Returns a [`SparseTrie::Blind`] based on this one. If this instance was revealed, or was + /// itself a `Blind` with a pre-allocated [`SparseTrieInterface`], this will return + /// a `Blind` carrying a cleared pre-allocated [`SparseTrieInterface`]. + pub fn clear(self) -> Self { + match self { + Self::Blind(_) => self, + Self::Revealed(mut trie) => { + trie.clear(); + Self::Blind(Some(trie)) + } + } + } + /// Updates (or inserts) a leaf at the given key path with the specified RLP-encoded value. /// /// # Errors /// /// Returns an error if the trie is still blind, or if the update fails. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + pub fn update_leaf( + &mut self, + path: Nibbles, + value: Vec, + provider: impl TrieNodeProvider, + ) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; - revealed.update_leaf(path, value)?; + revealed.update_leaf(path, value, provider)?; Ok(()) } @@ -307,9 +239,13 @@ impl SparseTrie

{ /// # Errors /// /// Returns an error if the trie is still blind, or if the leaf cannot be removed - pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + pub fn remove_leaf( + &mut self, + path: &Nibbles, + provider: impl TrieNodeProvider, + ) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; - revealed.remove_leaf(path)?; + revealed.remove_leaf(path, provider)?; Ok(()) } } @@ -328,10 +264,7 @@ impl SparseTrie

{ /// The opposite is also true. /// - All keys in `values` collection are full leaf paths. #[derive(Clone, PartialEq, Eq)] -pub struct RevealedSparseTrie

{ - /// Provider used for retrieving blinded nodes. - /// This allows lazily loading parts of the trie from an external source. - provider: P, +pub struct SerialSparseTrie { /// Map from a path (nibbles) to its corresponding sparse trie node. /// This contains all of the revealed nodes in trie. nodes: HashMap, @@ -351,9 +284,9 @@ pub struct RevealedSparseTrie

{ rlp_buf: Vec, } -impl

fmt::Debug for RevealedSparseTrie

{ +impl fmt::Debug for SerialSparseTrie { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RevealedSparseTrie") + f.debug_struct("SerialSparseTrie") .field("nodes", &self.nodes) .field("branch_tree_masks", &self.branch_node_tree_masks) .field("branch_hash_masks", &self.branch_node_hash_masks) @@ -371,7 +304,7 @@ fn encode_nibbles(nibbles: &Nibbles) -> String { encoded[..nibbles.len()].to_string() } -impl fmt::Display for RevealedSparseTrie

{ +impl fmt::Display for SerialSparseTrie { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // This prints the trie in preorder traversal, using a stack let mut stack = Vec::new(); @@ -437,10 +370,9 @@ impl fmt::Display for RevealedSparseTrie

{ } } -impl Default for RevealedSparseTrie { +impl Default for SerialSparseTrie { fn default() -> Self { Self { - provider: Default::default(), nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), branch_node_tree_masks: HashMap::default(), branch_node_hash_masks: HashMap::default(), @@ -452,181 +384,36 @@ impl Default for RevealedSparseTrie { } } -impl RevealedSparseTrie { - /// Creates a new revealed sparse trie from the given root node. - /// - /// This function initializes the internal structures and then reveals the root. - /// It is a convenient method to create a [`RevealedSparseTrie`] when you already have - /// the root node available. - /// - /// # Returns - /// - /// A [`RevealedSparseTrie`] if successful, or an error if revealing fails. - pub fn from_root( +impl SparseTrieInterface for SerialSparseTrie { + fn with_root( + mut self, root: TrieNode, masks: TrieMasks, retain_updates: bool, ) -> SparseTrieResult { - let mut this = Self { - provider: Default::default(), - nodes: HashMap::default(), - branch_node_tree_masks: HashMap::default(), - branch_node_hash_masks: HashMap::default(), - values: HashMap::default(), - prefix_set: PrefixSetMut::default(), - rlp_buf: Vec::new(), - updates: None, - } - .with_updates(retain_updates); - this.reveal_node(Nibbles::default(), root, masks)?; - Ok(this) - } -} - -impl

RevealedSparseTrie

{ - /// Creates a new revealed sparse trie from the given provider and root node. - /// - /// Similar to `from_root`, but allows specifying a custom provider for - /// retrieving blinded nodes. - /// - /// # Returns - /// - /// A [`RevealedSparseTrie`] if successful, or an error if revealing fails. - pub fn from_provider_and_root( - provider: P, - node: TrieNode, - masks: TrieMasks, - retain_updates: bool, - ) -> SparseTrieResult { - let mut this = Self { - provider, - nodes: HashMap::default(), - branch_node_tree_masks: HashMap::default(), - branch_node_hash_masks: HashMap::default(), - values: HashMap::default(), - prefix_set: PrefixSetMut::default(), - updates: None, - rlp_buf: Vec::new(), - } - .with_updates(retain_updates); - this.reveal_node(Nibbles::default(), node, masks)?; - Ok(this) - } - - /// Replaces the current provider with a new provider. - /// - /// This allows changing how blinded nodes are retrieved without - /// rebuilding the entire trie structure. - /// - /// # Returns - /// - /// A new [`RevealedSparseTrie`] with the updated provider. - pub fn with_provider(self, provider: BP) -> RevealedSparseTrie { - RevealedSparseTrie { - provider, - nodes: self.nodes, - branch_node_tree_masks: self.branch_node_tree_masks, - branch_node_hash_masks: self.branch_node_hash_masks, - values: self.values, - prefix_set: self.prefix_set, - updates: self.updates, - rlp_buf: self.rlp_buf, - } - } - - /// Sets the fields of this `RevealedSparseTrie` to the fields of the input - /// `SparseTrieState`. - /// - /// This is meant for reusing the allocated maps contained in the `SparseTrieState`. - /// - /// Copies over any existing nodes, branch masks, and values. - pub fn use_allocated_state(&mut self, mut other: SparseTrieState) { - for (path, node) in self.nodes.drain() { - other.nodes.insert(path, node); - } - for (path, mask) in self.branch_node_tree_masks.drain() { - other.branch_node_tree_masks.insert(path, mask); - } - for (path, mask) in self.branch_node_hash_masks.drain() { - other.branch_node_hash_masks.insert(path, mask); - } - for (path, value) in self.values.drain() { - other.values.insert(path, value); - } + self = self.with_updates(retain_updates); - self.nodes = other.nodes; - self.branch_node_tree_masks = other.branch_node_tree_masks; - self.branch_node_hash_masks = other.branch_node_hash_masks; - self.values = other.values; - } + // A fresh/cleared `SerialSparseTrie` has a `SparseNode::Empty` at its root. Delete that + // so we can reveal the new root node. + let path = Nibbles::default(); + let _removed_root = self.nodes.remove(&path).expect("root node should exist"); + debug_assert_eq!(_removed_root, SparseNode::Empty); - /// Set the provider for the trie. - pub fn set_provider(&mut self, provider: P) { - self.provider = provider; + self.reveal_node(path, root, masks)?; + Ok(self) } - /// Configures the trie to retain information about updates. - /// - /// If `retain_updates` is true, the trie will record branch node updates and deletions. - /// This information can then be used to efficiently update an external database. - pub fn with_updates(mut self, retain_updates: bool) -> Self { + fn with_updates(mut self, retain_updates: bool) -> Self { if retain_updates { self.updates = Some(SparseTrieUpdates::default()); } self } - /// Returns a reference to the current sparse trie updates. - /// - /// If no updates have been made/recorded, returns an empty update set. - pub fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { - self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) - } - - /// Returns an immutable reference to all nodes in the sparse trie. - pub const fn nodes_ref(&self) -> &HashMap { - &self.nodes - } - - /// Retrieves a reference to the leaf value stored at the given key path, if it is revealed. - /// - /// This method efficiently retrieves values from the trie without traversing - /// the entire node structure, as values are stored in a separate map. - /// - /// Note: a value can exist in the full trie and this function still returns `None` - /// because the value has not been revealed. - /// Hence a `None` indicates two possibilities: - /// - The value does not exists in the trie, so it cannot be revealed - /// - The value has not yet been revealed. In order to determine which is true, one would need - /// an exclusion proof. - pub fn get_leaf_value(&self, path: &Nibbles) -> Option<&Vec> { - self.values.get(path) - } - - /// Consumes and returns the currently accumulated trie updates. - /// - /// This is useful when you want to apply the updates to an external database, - /// and then start tracking a new set of updates. - pub fn take_updates(&mut self) -> SparseTrieUpdates { - self.updates.take().unwrap_or_default() - } - - /// Reserves capacity in the nodes map for at least `additional` more nodes. - pub fn reserve_nodes(&mut self, additional: usize) { + fn reserve_nodes(&mut self, additional: usize) { self.nodes.reserve(additional); } - - /// Reveals a trie node if it has not been revealed before. - /// - /// This internal function decodes a trie node and inserts it into the nodes map. - /// It handles different node types (leaf, extension, branch) by appropriately - /// adding them to the trie structure and recursively revealing their children. - /// - /// - /// # Returns - /// - /// `Ok(())` if successful, or an error if node was not revealed. - pub fn reveal_node( + fn reveal_node( &mut self, path: Nibbles, node: TrieNode, @@ -736,7 +523,7 @@ impl

RevealedSparseTrie

{ SparseNode::Hash(hash) => { let mut full = *entry.key(); full.extend(&leaf.key); - self.values.insert(full, leaf.value); + self.values.insert(full, leaf.value.clone()); entry.insert(SparseNode::Leaf { key: leaf.key, // Memoize the hash of a previously blinded node in a new leaf @@ -761,7 +548,7 @@ impl

RevealedSparseTrie

{ let mut full = *entry.key(); full.extend(&leaf.key); entry.insert(SparseNode::new_leaf(leaf.key)); - self.values.insert(full, leaf.value); + self.values.insert(full, leaf.value.clone()); } }, } @@ -769,690 +556,393 @@ impl

RevealedSparseTrie

{ Ok(()) } - /// Reveals either a node or its hash placeholder based on the provided child data. - /// - /// When traversing the trie, we often encounter references to child nodes that - /// are either directly embedded or represented by their hash. This method - /// handles both cases: - /// - /// 1. If the child data represents a hash (32+1=33 bytes), store it as a hash node - /// 2. Otherwise, decode the data as a [`TrieNode`] and recursively reveal it using - /// `reveal_node` - /// - /// # Returns - /// - /// Returns `Ok(())` if successful, or an error if the node cannot be revealed. - /// - /// # Error Handling - /// - /// Will error if there's a conflict between a new hash node and an existing one - /// at the same path - fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { - if child.len() == B256::len_bytes() + 1 { - let hash = B256::from_slice(&child[1..]); - match self.nodes.entry(path) { - Entry::Occupied(entry) => match entry.get() { - // Hash node with a different hash can't be handled. - SparseNode::Hash(previous_hash) if previous_hash != &hash => { - return Err(SparseTrieErrorKind::Reveal { - path: *entry.key(), - node: Box::new(SparseNode::Hash(hash)), - } - .into()) - } - _ => {} - }, - Entry::Vacant(entry) => { - entry.insert(SparseNode::Hash(hash)); - } - } - return Ok(()) + fn reveal_nodes(&mut self, mut nodes: Vec) -> SparseTrieResult<()> { + nodes.sort_unstable_by_key(|node| node.path); + for node in nodes { + self.reveal_node(node.path, node.node, node.masks)?; } - - self.reveal_node(path, TrieNode::decode(&mut &child[..])?, TrieMasks::none()) + Ok(()) } - /// Traverse the trie from the root down to the leaf at the given path, - /// removing and collecting all nodes along that path. - /// - /// This helper function is used during leaf removal to extract the nodes of the trie - /// that will be affected by the deletion. These nodes are then re-inserted and modified - /// as needed (collapsing extension nodes etc) given that the leaf has now been removed. - /// - /// # Returns - /// - /// Returns a vector of [`RemovedSparseNode`] representing the nodes removed during the - /// traversal. - /// - /// # Errors - /// - /// Returns an error if a blinded node or an empty node is encountered unexpectedly, - /// as these prevent proper removal of the leaf. - fn take_nodes_for_path(&mut self, path: &Nibbles) -> SparseTrieResult> { - let mut current = Nibbles::default(); // Start traversal from the root - let mut nodes = Vec::new(); // Collect traversed nodes + fn update_leaf( + &mut self, + full_path: Nibbles, + value: Vec, + provider: P, + ) -> SparseTrieResult<()> { + self.prefix_set.insert(full_path); + let existing = self.values.insert(full_path, value); + if existing.is_some() { + // trie structure unchanged, return immediately + return Ok(()) + } - while let Some(node) = self.nodes.remove(¤t) { - match &node { - SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), - &SparseNode::Hash(hash) => { + let mut current = Nibbles::default(); + while let Some(node) = self.nodes.get_mut(¤t) { + match node { + SparseNode::Empty => { + *node = SparseNode::new_leaf(full_path); + break + } + &mut SparseNode::Hash(hash) => { return Err(SparseTrieErrorKind::BlindedNode { path: current, hash }.into()) } - SparseNode::Leaf { key: _key, .. } => { - // Leaf node is always the one that we're deleting, and no other leaf nodes can - // be found during traversal. + SparseNode::Leaf { key: current_key, .. } => { + current.extend(current_key); - #[cfg(debug_assertions)] - { - let mut current = current; - current.extend(_key); - assert_eq!(¤t, path); + // this leaf is being updated + if current == full_path { + unreachable!("we already checked leaf presence in the beginning"); } - nodes.push(RemovedSparseNode { - path: current, - node, - unset_branch_nibble: None, - }); - break + // find the common prefix + let common = current.common_prefix_length(&full_path); + + // update existing node + let new_ext_key = current.slice(current.len() - current_key.len()..common); + *node = SparseNode::new_ext(new_ext_key); + + // create a branch node and corresponding leaves + self.nodes.reserve(3); + self.nodes.insert( + current.slice(..common), + SparseNode::new_split_branch( + current.get_unchecked(common), + full_path.get_unchecked(common), + ), + ); + self.nodes.insert( + full_path.slice(..=common), + SparseNode::new_leaf(full_path.slice(common + 1..)), + ); + self.nodes.insert( + current.slice(..=common), + SparseNode::new_leaf(current.slice(common + 1..)), + ); + + break; } SparseNode::Extension { key, .. } => { - #[cfg(debug_assertions)] - { - let mut current = current; - current.extend(key); - assert!( - path.starts_with(¤t), - "path: {path:?}, current: {current:?}, key: {key:?}", - ); - } - - let path = current; current.extend(key); - nodes.push(RemovedSparseNode { path, node, unset_branch_nibble: None }); - } - SparseNode::Branch { state_mask, .. } => { - let nibble = path.get_unchecked(current.len()); - debug_assert!( - state_mask.is_bit_set(nibble), - "current: {current:?}, path: {path:?}, nibble: {nibble:?}, state_mask: {state_mask:?}", - ); - // If the branch node has a child that is a leaf node that we're removing, - // we need to unset this nibble. - // Any other branch nodes will not require unsetting the nibble, because - // deleting one leaf node can not remove the whole path - // where the branch node is located. - let mut child_path = current; - child_path.push_unchecked(nibble); - let unset_branch_nibble = self - .nodes - .get(&child_path) - .is_some_and(move |node| match node { - SparseNode::Leaf { key, .. } => { - // Get full path of the leaf node - child_path.extend(key); - &child_path == path + if !full_path.starts_with(¤t) { + // find the common prefix + let common = current.common_prefix_length(&full_path); + *key = current.slice(current.len() - key.len()..common); + + // If branch node updates retention is enabled, we need to query the + // extension node child to later set the hash mask for a parent branch node + // correctly. + if self.updates.is_some() { + // Check if the extension node child is a hash that needs to be revealed + if self.nodes.get(¤t).unwrap().is_hash() { + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(¤t)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::sparse", + ?current, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing extension node child", + ); + self.reveal_node( + current, + decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + } } - _ => false, - }) - .then_some(nibble); + } - nodes.push(RemovedSparseNode { path: current, node, unset_branch_nibble }); + // create state mask for new branch node + // NOTE: this might overwrite the current extension node + self.nodes.reserve(3); + let branch = SparseNode::new_split_branch( + current.get_unchecked(common), + full_path.get_unchecked(common), + ); + self.nodes.insert(current.slice(..common), branch); + + // create new leaf + let new_leaf = SparseNode::new_leaf(full_path.slice(common + 1..)); + self.nodes.insert(full_path.slice(..=common), new_leaf); + + // recreate extension to previous child if needed + let key = current.slice(common + 1..); + if !key.is_empty() { + self.nodes.insert(current.slice(..=common), SparseNode::new_ext(key)); + } + break; + } + } + SparseNode::Branch { state_mask, .. } => { + let nibble = full_path.get_unchecked(current.len()); current.push_unchecked(nibble); + if !state_mask.is_bit_set(nibble) { + state_mask.set_bit(nibble); + let new_leaf = SparseNode::new_leaf(full_path.slice(current.len()..)); + self.nodes.insert(current, new_leaf); + break; + } } - } + }; } - Ok(nodes) + Ok(()) } - /// Removes all nodes and values from the trie, resetting it to a blank state - /// with only an empty root node. - /// - /// Note: All previously tracked changes to the trie are also removed. - pub fn wipe(&mut self) { - self.nodes = HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]); - self.values = HashMap::default(); - self.prefix_set = PrefixSetMut::all(); - self.updates = self.updates.is_some().then(SparseTrieUpdates::wiped); - } + fn remove_leaf( + &mut self, + full_path: &Nibbles, + provider: P, + ) -> SparseTrieResult<()> { + if self.values.remove(full_path).is_none() { + if let Some(&SparseNode::Hash(hash)) = self.nodes.get(full_path) { + // Leaf is present in the trie, but it's blinded. + return Err(SparseTrieErrorKind::BlindedNode { path: *full_path, hash }.into()) + } - /// This clears all data structures in the sparse trie, keeping the backing data structures - /// allocated. - /// - /// This is useful for reusing the trie without needing to reallocate memory. - pub fn clear(&mut self) { - self.nodes.clear(); - self.branch_node_tree_masks.clear(); - self.branch_node_hash_masks.clear(); - self.values.clear(); - self.prefix_set.clear(); - if let Some(updates) = self.updates.as_mut() { - updates.clear() + trace!(target: "trie::sparse", ?full_path, "Leaf node is not present in the trie"); + // Leaf is not present in the trie. + return Ok(()) } - self.rlp_buf.clear(); - } + self.prefix_set.insert(*full_path); - /// Returns the cleared `SparseTrieState` for this `RevealedSparseTrie`. - pub fn cleared_state(mut self) -> SparseTrieState { - self.clear(); - SparseTrieState { - nodes: self.nodes, - branch_node_tree_masks: self.branch_node_tree_masks, - branch_node_hash_masks: self.branch_node_hash_masks, - values: self.values, - } - } + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. - /// Calculates and returns the root hash of the trie. - /// - /// Before computing the hash, this function processes any remaining (dirty) nodes by - /// updating their RLP encodings. The root hash is either: - /// 1. The cached hash (if no dirty nodes were found) - /// 2. The keccak256 hash of the root node's RLP representation - pub fn root(&mut self) -> B256 { - // Take the current prefix set - let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); - let rlp_node = self.rlp_node_allocate(&mut prefix_set); - if let Some(root_hash) = rlp_node.as_hash() { - root_hash - } else { - keccak256(rlp_node) + let mut removed_nodes = self.take_nodes_for_path(full_path)?; + // Pop the first node from the stack which is the leaf node we want to remove. + let mut child = removed_nodes.pop().expect("leaf exists"); + #[cfg(debug_assertions)] + { + let mut child_path = child.path; + let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; + child_path.extend(key); + assert_eq!(&child_path, full_path); } - } - - /// Recalculates and updates the RLP hashes of nodes deeper than or equal to the specified - /// `depth`. - /// - /// The root node is considered to be at level 0. This method is useful for optimizing - /// hash recalculations after localized changes to the trie structure: - /// - /// This function identifies all nodes that have changed (based on the prefix set) at the given - /// depth and recalculates their RLP representation. - pub fn update_rlp_node_level(&mut self, depth: usize) { - // Take the current prefix set - let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); - let mut buffers = RlpNodeBuffers::default(); - - // Get the nodes that have changed at the given depth. - let (targets, new_prefix_set) = self.get_changed_nodes_at_depth(&mut prefix_set, depth); - // Update the prefix set to the prefix set of the nodes that still need to be updated. - self.prefix_set = new_prefix_set; - trace!(target: "trie::sparse", ?depth, ?targets, "Updating nodes at depth"); + // If we don't have any other removed nodes, insert an empty node at the root. + if removed_nodes.is_empty() { + debug_assert!(self.nodes.is_empty()); + self.nodes.insert(Nibbles::default(), SparseNode::Empty); - let mut temp_rlp_buf = core::mem::take(&mut self.rlp_buf); - for (level, path) in targets { - buffers.path_stack.push(RlpNodePathStackItem { - level, - path, - is_in_prefix_set: Some(true), - }); - self.rlp_node(&mut prefix_set, &mut buffers, &mut temp_rlp_buf); + return Ok(()) } - self.rlp_buf = temp_rlp_buf; - } - - /// Returns a list of (level, path) tuples identifying the nodes that have changed at the - /// specified depth, along with a new prefix set for the paths above the provided depth that - /// remain unchanged. - /// - /// Leaf nodes with a depth less than `depth` are returned too. - /// - /// This method helps optimize hash recalculations by identifying which specific - /// nodes need to be updated at each level of the trie. - /// - /// # Parameters - /// - /// - `prefix_set`: The current prefix set tracking which paths need updates. - /// - `depth`: The minimum depth (relative to the root) to include nodes in the targets. - /// - /// # Returns - /// - /// A tuple containing: - /// - A vector of `(level, Nibbles)` pairs for nodes that require updates at or below the - /// specified depth. - /// - A `PrefixSetMut` containing paths shallower than the specified depth that still need to be - /// tracked for future updates. - fn get_changed_nodes_at_depth( - &self, - prefix_set: &mut PrefixSet, - depth: usize, - ) -> (Vec<(usize, Nibbles)>, PrefixSetMut) { - let mut unchanged_prefix_set = PrefixSetMut::default(); - let mut paths = Vec::from([(Nibbles::default(), 0)]); - let mut targets = Vec::new(); - while let Some((mut path, level)) = paths.pop() { - match self.nodes.get(&path).unwrap() { - SparseNode::Empty | SparseNode::Hash(_) => {} - SparseNode::Leaf { key: _, hash } => { - if hash.is_some() && !prefix_set.contains(&path) { - continue - } + // Walk the stack of removed nodes from the back and re-insert them back into the trie, + // adjusting the node type as needed. + while let Some(removed_node) = removed_nodes.pop() { + let removed_path = removed_node.path; - targets.push((level, path)); + let new_node = match &removed_node.node { + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { + return Err(SparseTrieErrorKind::BlindedNode { path: removed_path, hash }.into()) } - SparseNode::Extension { key, hash, store_in_db_trie: _ } => { - if hash.is_some() && !prefix_set.contains(&path) { - continue - } - - if level >= depth { - targets.push((level, path)); - } else { - unchanged_prefix_set.insert(path); - - path.extend(key); - paths.push((path, level + 1)); - } + SparseNode::Leaf { .. } => { + unreachable!("we already popped the leaf node") } - SparseNode::Branch { state_mask, hash, store_in_db_trie: _ } => { - if hash.is_some() && !prefix_set.contains(&path) { - continue - } + SparseNode::Extension { key, .. } => { + // If the node is an extension node, we need to look at its child to see if we + // need to merge them. + match &child.node { + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { + return Err( + SparseTrieErrorKind::BlindedNode { path: child.path, hash }.into() + ) + } + // For a leaf node, we collapse the extension node into a leaf node, + // extending the key. While it's impossible to encounter an extension node + // followed by a leaf node in a complete trie, it's possible here because we + // could have downgraded the extension node's child into a leaf node from + // another node type. + SparseNode::Leaf { key: leaf_key, .. } => { + self.nodes.remove(&child.path); - if level >= depth { - targets.push((level, path)); - } else { - unchanged_prefix_set.insert(path); + let mut new_key = *key; + new_key.extend(leaf_key); + SparseNode::new_leaf(new_key) + } + // For an extension node, we collapse them into one extension node, + // extending the key + SparseNode::Extension { key: extension_key, .. } => { + self.nodes.remove(&child.path); - for bit in CHILD_INDEX_RANGE.rev() { - if state_mask.is_bit_set(bit) { - let mut child_path = path; - child_path.push_unchecked(bit); - paths.push((child_path, level + 1)); - } + let mut new_key = *key; + new_key.extend(extension_key); + SparseNode::new_ext(new_key) } + // For a branch node, we just leave the extension node as-is. + SparseNode::Branch { .. } => removed_node.node, } } - } - } - - (targets, unchanged_prefix_set) - } - - /// Look up or calculate the RLP of the node at the root path. - /// - /// # Panics - /// - /// If the node at provided path does not exist. - pub fn rlp_node_allocate(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { - let mut buffers = RlpNodeBuffers::new_with_root_path(); - let mut temp_rlp_buf = core::mem::take(&mut self.rlp_buf); - let result = self.rlp_node(prefix_set, &mut buffers, &mut temp_rlp_buf); - self.rlp_buf = temp_rlp_buf; - - result - } - - /// Looks up or computes the RLP encoding of the node specified by the current - /// path in the provided buffers. - /// - /// The function uses a stack (`RlpNodeBuffers::path_stack`) to track the traversal and - /// accumulate RLP encodings. - /// - /// # Parameters - /// - /// - `prefix_set`: The set of trie paths that need their nodes updated. - /// - `buffers`: The reusable buffers for stack management and temporary RLP values. - /// - /// # Panics - /// - /// If the node at provided path does not exist. - pub fn rlp_node( - &mut self, - prefix_set: &mut PrefixSet, - buffers: &mut RlpNodeBuffers, - rlp_buf: &mut Vec, - ) -> RlpNode { - let _starting_path = buffers.path_stack.last().map(|item| item.path); - - 'main: while let Some(RlpNodePathStackItem { level, path, mut is_in_prefix_set }) = - buffers.path_stack.pop() - { - let node = self.nodes.get_mut(&path).unwrap(); - trace!( - target: "trie::sparse", - ?_starting_path, - ?level, - ?path, - ?is_in_prefix_set, - ?node, - "Popped node from path stack" - ); - - // Check if the path is in the prefix set. - // First, check the cached value. If it's `None`, then check the prefix set, and update - // the cached value. - let mut prefix_set_contains = - |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); + &SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { + // If the node is a branch node, we need to check the number of children left + // after deleting the child at the given nibble. - let (rlp_node, node_type) = match node { - SparseNode::Empty => (RlpNode::word_rlp(&EMPTY_ROOT_HASH), SparseNodeType::Empty), - SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), SparseNodeType::Hash), - SparseNode::Leaf { key, hash } => { - let mut path = path; - path.extend(key); - if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - (RlpNode::word_rlp(&hash), SparseNodeType::Leaf) - } else { - let value = self.values.get(&path).unwrap(); - rlp_buf.clear(); - let rlp_node = LeafNodeRef { key, value }.rlp(rlp_buf); - *hash = rlp_node.as_hash(); - (rlp_node, SparseNodeType::Leaf) + if let Some(removed_nibble) = removed_node.unset_branch_nibble { + state_mask.unset_bit(removed_nibble); } - } - SparseNode::Extension { key, hash, store_in_db_trie } => { - let mut child_path = path; - child_path.extend(key); - if let Some((hash, store_in_db_trie)) = - hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) - { - ( - RlpNode::word_rlp(&hash), - SparseNodeType::Extension { store_in_db_trie: Some(store_in_db_trie) }, - ) - } else if buffers.rlp_node_stack.last().is_some_and(|e| e.path == child_path) { - let RlpNodeStackItem { - path: _, - rlp_node: child, - node_type: child_node_type, - } = buffers.rlp_node_stack.pop().unwrap(); - rlp_buf.clear(); - let rlp_node = ExtensionNodeRef::new(key, &child).rlp(rlp_buf); - *hash = rlp_node.as_hash(); - let store_in_db_trie_value = child_node_type.store_in_db_trie(); - - trace!( - target: "trie::sparse", - ?path, - ?child_path, - ?child_node_type, - "Extension node" - ); + // If only one child is left set in the branch node, we need to collapse it. + if state_mask.count_bits() == 1 { + let child_nibble = + state_mask.first_set_bit_index().expect("state mask is not empty"); - *store_in_db_trie = store_in_db_trie_value; + // Get full path of the only child node left. + let mut child_path = removed_path; + child_path.push_unchecked(child_nibble); - ( - rlp_node, - SparseNodeType::Extension { - // Inherit the `store_in_db_trie` flag from the child node, which is - // always the branch node - store_in_db_trie: store_in_db_trie_value, - }, - ) - } else { - // need to get rlp node for child first - buffers.path_stack.extend([ - RlpNodePathStackItem { level, path, is_in_prefix_set }, - RlpNodePathStackItem { - level: level + 1, - path: child_path, - is_in_prefix_set: None, - }, - ]); - continue - } - } - SparseNode::Branch { state_mask, hash, store_in_db_trie } => { - if let Some((hash, store_in_db_trie)) = - hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) - { - buffers.rlp_node_stack.push(RlpNodeStackItem { - path, - rlp_node: RlpNode::word_rlp(&hash), - node_type: SparseNodeType::Branch { - store_in_db_trie: Some(store_in_db_trie), - }, - }); - continue - } - let retain_updates = self.updates.is_some() && prefix_set_contains(&path); + trace!(target: "trie::sparse", ?removed_path, ?child_path, "Branch node has only one child"); - buffers.branch_child_buf.clear(); - // Walk children in a reverse order from `f` to `0`, so we pop the `0` first - // from the stack and keep walking in the sorted order. - for bit in CHILD_INDEX_RANGE.rev() { - if state_mask.is_bit_set(bit) { - let mut child = path; - child.push_unchecked(bit); - buffers.branch_child_buf.push(child); + if self.nodes.get(&child_path).unwrap().is_hash() { + trace!(target: "trie::sparse", ?child_path, "Retrieving remaining blinded branch child"); + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(&child_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::sparse", + ?child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + self.reveal_node( + child_path, + decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + } } - } - - buffers - .branch_value_stack_buf - .resize(buffers.branch_child_buf.len(), Default::default()); - let mut added_children = false; - let mut tree_mask = TrieMask::default(); - let mut hash_mask = TrieMask::default(); - let mut hashes = Vec::new(); - for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { - if buffers.rlp_node_stack.last().is_some_and(|e| &e.path == child_path) { - let RlpNodeStackItem { - path: _, - rlp_node: child, - node_type: child_node_type, - } = buffers.rlp_node_stack.pop().unwrap(); - - // Update the masks only if we need to retain trie updates - if retain_updates { - // SAFETY: it's a child, so it's never empty - let last_child_nibble = child_path.last().unwrap(); - - // Determine whether we need to set trie mask bit. - let should_set_tree_mask_bit = if let Some(store_in_db_trie) = - child_node_type.store_in_db_trie() - { - // A branch or an extension node explicitly set the - // `store_in_db_trie` flag - store_in_db_trie - } else { - // A blinded node has the tree mask bit set - child_node_type.is_hash() && - self.branch_node_tree_masks.get(&path).is_some_and( - |mask| mask.is_bit_set(last_child_nibble), - ) - }; - if should_set_tree_mask_bit { - tree_mask.set_bit(last_child_nibble); - } - - // Set the hash mask. If a child node is a revealed branch node OR - // is a blinded node that has its hash mask bit set according to the - // database, set the hash mask bit and save the hash. - let hash = child.as_hash().filter(|_| { - child_node_type.is_branch() || - (child_node_type.is_hash() && - self.branch_node_hash_masks - .get(&path) - .is_some_and(|mask| { - mask.is_bit_set(last_child_nibble) - })) - }); - if let Some(hash) = hash { - hash_mask.set_bit(last_child_nibble); - hashes.push(hash); - } - } + // Get the only child node. + let child = self.nodes.get(&child_path).unwrap(); - // Insert children in the resulting buffer in a normal order, - // because initially we iterated in reverse. - // SAFETY: i < len and len is never 0 - let original_idx = buffers.branch_child_buf.len() - i - 1; - buffers.branch_value_stack_buf[original_idx] = child; - added_children = true; - } else { - debug_assert!(!added_children); - buffers.path_stack.push(RlpNodePathStackItem { - level, - path, - is_in_prefix_set, - }); - buffers.path_stack.extend(buffers.branch_child_buf.drain(..).map( - |path| RlpNodePathStackItem { - level: level + 1, - path, - is_in_prefix_set: None, - }, - )); - continue 'main - } - } + let mut delete_child = false; + let new_node = match child { + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { + return Err(SparseTrieErrorKind::BlindedNode { + path: child_path, + hash, + } + .into()) + } + // If the only child is a leaf node, we downgrade the branch node into a + // leaf node, prepending the nibble to the key, and delete the old + // child. + SparseNode::Leaf { key, .. } => { + delete_child = true; - trace!( - target: "trie::sparse", - ?path, - ?tree_mask, - ?hash_mask, - "Branch node masks" - ); + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend(key); + SparseNode::new_leaf(new_key) + } + // If the only child node is an extension node, we downgrade the branch + // node into an even longer extension node, prepending the nibble to the + // key, and delete the old child. + SparseNode::Extension { key, .. } => { + delete_child = true; - rlp_buf.clear(); - let branch_node_ref = - BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask); - let rlp_node = branch_node_ref.rlp(rlp_buf); - *hash = rlp_node.as_hash(); + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend(key); + SparseNode::new_ext(new_key) + } + // If the only child is a branch node, we downgrade the current branch + // node into a one-nibble extension node. + SparseNode::Branch { .. } => { + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([child_nibble])) + } + }; - // Save a branch node update only if it's not a root node, and we need to - // persist updates. - let store_in_db_trie_value = if let Some(updates) = - self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) - { - let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); - if store_in_db_trie { - // Store in DB trie if there are either any children that are stored in - // the DB trie, or any children represent hashed values - hashes.reverse(); - let branch_node = BranchNodeCompact::new( - *state_mask, - tree_mask, - hash_mask, - hashes, - hash.filter(|_| path.is_empty()), - ); - updates.updated_nodes.insert(path, branch_node); - } else if self - .branch_node_tree_masks - .get(&path) - .is_some_and(|mask| !mask.is_empty()) || - self.branch_node_hash_masks - .get(&path) - .is_some_and(|mask| !mask.is_empty()) - { - // If new tree and hash masks are empty, but previously they weren't, we - // need to remove the node update and add the node itself to the list of - // removed nodes. - updates.updated_nodes.remove(&path); - updates.removed_nodes.insert(path); - } else if self - .branch_node_hash_masks - .get(&path) - .is_none_or(|mask| mask.is_empty()) && - self.branch_node_hash_masks - .get(&path) - .is_none_or(|mask| mask.is_empty()) - { - // If new tree and hash masks are empty, and they were previously empty - // as well, we need to remove the node update. - updates.updated_nodes.remove(&path); + if delete_child { + self.nodes.remove(&child_path); } - store_in_db_trie - } else { - false - }; - *store_in_db_trie = Some(store_in_db_trie_value); + if let Some(updates) = self.updates.as_mut() { + updates.updated_nodes.remove(&removed_path); + updates.removed_nodes.insert(removed_path); + } - ( - rlp_node, - SparseNodeType::Branch { store_in_db_trie: Some(store_in_db_trie_value) }, - ) + new_node + } + // If more than one child is left set in the branch, we just re-insert it as-is. + else { + SparseNode::new_branch(state_mask) + } } }; - trace!( - target: "trie::sparse", - ?_starting_path, - ?level, - ?path, - ?node, - ?node_type, - ?is_in_prefix_set, - "Added node to rlp node stack" - ); + child = RemovedSparseNode { + path: removed_path, + node: new_node.clone(), + unset_branch_nibble: None, + }; + trace!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); + self.nodes.insert(removed_path, new_node); + } - buffers.rlp_node_stack.push(RlpNodeStackItem { path, rlp_node, node_type }); + Ok(()) + } + + fn root(&mut self) -> B256 { + // Take the current prefix set + let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); + let rlp_node = self.rlp_node_allocate(&mut prefix_set); + if let Some(root_hash) = rlp_node.as_hash() { + root_hash + } else { + keccak256(rlp_node) } + } - debug_assert_eq!(buffers.rlp_node_stack.len(), 1); - buffers.rlp_node_stack.pop().unwrap().rlp_node + fn update_subtrie_hashes(&mut self) { + self.update_rlp_node_level(SPARSE_TRIE_SUBTRIE_HASHES_LEVEL); } -} -/// Error type for a leaf lookup operation -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum LeafLookupError { - /// The path leads to a blinded node, cannot determine if leaf exists. - /// This means the witness is not complete. - BlindedNode { - /// Path to the blinded node. - path: Nibbles, - /// Hash of the blinded node. - hash: B256, - }, - /// The path leads to a leaf with a different value than expected. - /// This means the witness is malformed. - ValueMismatch { - /// Path to the leaf. - path: Nibbles, - /// Expected value. - expected: Option>, - /// Actual value found. - actual: Vec, - }, -} + fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec> { + self.values.get(full_path) + } -/// Success value for a leaf lookup operation -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum LeafLookup { - /// Leaf exists with expected value. - Exists, - /// Leaf does not exist (exclusion proof found). - NonExistent { - /// Path where the search diverged from the target path. - diverged_at: Nibbles, - }, -} + fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) + } -impl RevealedSparseTrie

{ - /// Attempts to find a leaf node at the specified path. - /// - /// This method traverses the trie from the root down to the given path, checking - /// if a leaf exists at that path. It can be used to verify the existence of a leaf - /// or to generate an exclusion proof (proof that a leaf does not exist). - /// - /// # Parameters - /// - /// - `path`: The path to search for. - /// - `expected_value`: Optional expected value. If provided, will verify the leaf value - /// matches. - /// - /// # Returns - /// - /// - `Ok(LeafLookup::Exists)` if the leaf exists with the expected value. - /// - `Ok(LeafLookup::NonExistent)` if the leaf definitely does not exist (exclusion proof). - /// - `Err(LeafLookupError)` if the search encountered a blinded node or found a different - /// value. - pub fn find_leaf( + fn take_updates(&mut self) -> SparseTrieUpdates { + self.updates.take().unwrap_or_default() + } + + fn wipe(&mut self) { + self.nodes = HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]); + self.values = HashMap::default(); + self.prefix_set = PrefixSetMut::all(); + self.updates = self.updates.is_some().then(SparseTrieUpdates::wiped); + } + + fn clear(&mut self) { + self.nodes.clear(); + self.nodes.insert(Nibbles::default(), SparseNode::Empty); + + self.branch_node_tree_masks.clear(); + self.branch_node_hash_masks.clear(); + self.values.clear(); + self.prefix_set.clear(); + self.updates = None; + self.rlp_buf.clear(); + } + + fn find_leaf( &self, - path: &Nibbles, + full_path: &Nibbles, expected_value: Option<&Vec>, ) -> Result { // Helper function to check if a value matches the expected value @@ -1480,9 +970,9 @@ impl RevealedSparseTrie

{ // First, do a quick check if the value exists in our values map. // We assume that if there exists a leaf node, then its value will // be in the `values` map. - if let Some(actual_value) = self.values.get(path) { + if let Some(actual_value) = self.values.get(full_path) { // We found the leaf, check if the value matches (if expected value was provided) - check_value_match(actual_value, expected_value, path)?; + check_value_match(actual_value, expected_value, full_path)?; return Ok(LeafLookup::Exists); } @@ -1492,12 +982,12 @@ impl RevealedSparseTrie

{ // We traverse the trie to find the location where this leaf would have been, showing // that it is not in the trie. Or we find a blinded node, showing that the witness is // not complete. - while current.len() < path.len() { + while current.len() < full_path.len() { match self.nodes.get(¤t) { Some(SparseNode::Empty) | None => { // None implies no node is at the current path (even in the full trie) // Empty node means there is a node at this path and it is "Empty" - return Ok(LeafLookup::NonExistent { diverged_at: current }); + return Ok(LeafLookup::NonExistent); } Some(&SparseNode::Hash(hash)) => { // We hit a blinded node - cannot determine if leaf exists @@ -1505,421 +995,673 @@ impl RevealedSparseTrie

{ } Some(SparseNode::Leaf { key, .. }) => { // We found a leaf node before reaching our target depth - - // Temporarily append the leaf key to `current` - let saved_len = current.len(); current.extend(key); - - if ¤t == path { + if ¤t == full_path { // This should have been handled by our initial values map check - if let Some(value) = self.values.get(path) { - check_value_match(value, expected_value, path)?; + if let Some(value) = self.values.get(full_path) { + check_value_match(value, expected_value, full_path)?; return Ok(LeafLookup::Exists); } } - let diverged_at = current.slice(..saved_len); - // The leaf node's path doesn't match our target path, // providing an exclusion proof - return Ok(LeafLookup::NonExistent { diverged_at }); + return Ok(LeafLookup::NonExistent); } Some(SparseNode::Extension { key, .. }) => { // Temporarily append the extension key to `current` let saved_len = current.len(); current.extend(key); - if path.len() < current.len() || !path.starts_with(¤t) { - let diverged_at = current.slice(..saved_len); + if full_path.len() < current.len() || !full_path.starts_with(¤t) { current.truncate(saved_len); // restore - return Ok(LeafLookup::NonExistent { diverged_at }); + return Ok(LeafLookup::NonExistent); } // Prefix matched, so we keep walking with the longer `current`. } Some(SparseNode::Branch { state_mask, .. }) => { // Check if branch has a child at the next nibble in our path - let nibble = path.get_unchecked(current.len()); + let nibble = full_path.get_unchecked(current.len()); if !state_mask.is_bit_set(nibble) { // No child at this nibble - exclusion proof - return Ok(LeafLookup::NonExistent { diverged_at: current }); + return Ok(LeafLookup::NonExistent); + } + + // Continue down the branch + current.push_unchecked(nibble); + } + } + } + + // We've traversed to the end of the path and didn't find a leaf + // Check if there's a node exactly at our target path + match self.nodes.get(full_path) { + Some(SparseNode::Leaf { key, .. }) if key.is_empty() => { + // We found a leaf with an empty key (exact match) + // This should be handled by the values map check above + if let Some(value) = self.values.get(full_path) { + check_value_match(value, expected_value, full_path)?; + return Ok(LeafLookup::Exists); + } + } + Some(&SparseNode::Hash(hash)) => { + return Err(LeafLookupError::BlindedNode { path: *full_path, hash }); + } + _ => { + // No leaf at exactly the target path + return Ok(LeafLookup::NonExistent); + } + } + + // If we get here, there's no leaf at the target path + Ok(LeafLookup::NonExistent) + } +} + +impl SerialSparseTrie { + /// Creates a new revealed sparse trie from the given root node. + /// + /// This function initializes the internal structures and then reveals the root. + /// It is a convenient method to create a trie when you already have the root node available. + /// + /// # Arguments + /// + /// * `root` - The root node of the trie + /// * `masks` - Trie masks for root branch node + /// * `retain_updates` - Whether to track updates + /// + /// # Returns + /// + /// Self if successful, or an error if revealing fails. + pub fn from_root( + root: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult { + Self::default().with_root(root, masks, retain_updates) + } + + /// Returns a reference to the current sparse trie updates. + /// + /// If no updates have been made/recorded, returns an empty update set. + pub fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) + } + + /// Returns an immutable reference to all nodes in the sparse trie. + pub const fn nodes_ref(&self) -> &HashMap { + &self.nodes + } + + /// Reveals either a node or its hash placeholder based on the provided child data. + /// + /// When traversing the trie, we often encounter references to child nodes that + /// are either directly embedded or represented by their hash. This method + /// handles both cases: + /// + /// 1. If the child data represents a hash (32+1=33 bytes), store it as a hash node + /// 2. Otherwise, decode the data as a [`TrieNode`] and recursively reveal it using + /// `reveal_node` + /// + /// # Returns + /// + /// Returns `Ok(())` if successful, or an error if the node cannot be revealed. + /// + /// # Error Handling + /// + /// Will error if there's a conflict between a new hash node and an existing one + /// at the same path + fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { + if child.len() == B256::len_bytes() + 1 { + let hash = B256::from_slice(&child[1..]); + match self.nodes.entry(path) { + Entry::Occupied(entry) => match entry.get() { + // Hash node with a different hash can't be handled. + SparseNode::Hash(previous_hash) if previous_hash != &hash => { + return Err(SparseTrieErrorKind::Reveal { + path: *entry.key(), + node: Box::new(SparseNode::Hash(hash)), + } + .into()) } - - // Continue down the branch - current.push_unchecked(nibble); - } - } - } - - // We've traversed to the end of the path and didn't find a leaf - // Check if there's a node exactly at our target path - match self.nodes.get(path) { - Some(SparseNode::Leaf { key, .. }) if key.is_empty() => { - // We found a leaf with an empty key (exact match) - // This should be handled by the values map check above - if let Some(value) = self.values.get(path) { - check_value_match(value, expected_value, path)?; - return Ok(LeafLookup::Exists); + _ => {} + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::Hash(hash)); } } - Some(&SparseNode::Hash(hash)) => { - return Err(LeafLookupError::BlindedNode { path: *path, hash }); - } - _ => { - // No leaf at exactly the target path - let parent_path = if path.is_empty() { - Nibbles::default() - } else { - path.slice(0..path.len() - 1) - }; - return Ok(LeafLookup::NonExistent { diverged_at: parent_path }); - } + return Ok(()) } - // If we get here, there's no leaf at the target path - Ok(LeafLookup::NonExistent { diverged_at: current }) + self.reveal_node(path, TrieNode::decode(&mut &child[..])?, TrieMasks::none()) } - /// Updates or inserts a leaf node at the specified key path with the provided RLP-encoded - /// value. + /// Traverse the trie from the root down to the leaf at the given path, + /// removing and collecting all nodes along that path. /// - /// This method updates the internal prefix set and, if the leaf did not previously exist, - /// adjusts the trie structure by inserting new leaf nodes, splitting branch nodes, or - /// collapsing extension nodes as needed. + /// This helper function is used during leaf removal to extract the nodes of the trie + /// that will be affected by the deletion. These nodes are then re-inserted and modified + /// as needed (collapsing extension nodes etc) given that the leaf has now been removed. /// /// # Returns /// - /// Returns `Ok(())` if the update is successful. + /// Returns a vector of [`RemovedSparseNode`] representing the nodes removed during the + /// traversal. + /// + /// # Errors /// - /// Note: If an update requires revealing a blinded node, an error is returned if the blinded - /// provider returns an error. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { - self.prefix_set.insert(path); - let existing = self.values.insert(path, value); - if existing.is_some() { - // trie structure unchanged, return immediately - return Ok(()) - } + /// Returns an error if a blinded node or an empty node is encountered unexpectedly, + /// as these prevent proper removal of the leaf. + fn take_nodes_for_path(&mut self, path: &Nibbles) -> SparseTrieResult> { + let mut current = Nibbles::default(); // Start traversal from the root + let mut nodes = Vec::new(); // Collect traversed nodes - let mut current = Nibbles::default(); - while let Some(node) = self.nodes.get_mut(¤t) { - match node { - SparseNode::Empty => { - *node = SparseNode::new_leaf(path); - break - } - &mut SparseNode::Hash(hash) => { + while let Some(node) = self.nodes.remove(¤t) { + match &node { + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { return Err(SparseTrieErrorKind::BlindedNode { path: current, hash }.into()) } - SparseNode::Leaf { key: current_key, .. } => { - current.extend(current_key); + SparseNode::Leaf { key: _key, .. } => { + // Leaf node is always the one that we're deleting, and no other leaf nodes can + // be found during traversal. - // this leaf is being updated - if current == path { - unreachable!("we already checked leaf presence in the beginning"); + #[cfg(debug_assertions)] + { + let mut current = current; + current.extend(_key); + assert_eq!(¤t, path); } - // find the common prefix - let common = current.common_prefix_length(&path); - - // update existing node - let new_ext_key = current.slice(current.len() - current_key.len()..common); - *node = SparseNode::new_ext(new_ext_key); + nodes.push(RemovedSparseNode { + path: current, + node, + unset_branch_nibble: None, + }); + break + } + SparseNode::Extension { key, .. } => { + #[cfg(debug_assertions)] + { + let mut current = current; + current.extend(key); + assert!( + path.starts_with(¤t), + "path: {path:?}, current: {current:?}, key: {key:?}", + ); + } - // create a branch node and corresponding leaves - self.nodes.reserve(3); - self.nodes.insert( - current.slice(..common), - SparseNode::new_split_branch( - current.get_unchecked(common), - path.get_unchecked(common), - ), - ); - self.nodes.insert( - path.slice(..=common), - SparseNode::new_leaf(path.slice(common + 1..)), - ); - self.nodes.insert( - current.slice(..=common), - SparseNode::new_leaf(current.slice(common + 1..)), + let path = current; + current.extend(key); + nodes.push(RemovedSparseNode { path, node, unset_branch_nibble: None }); + } + SparseNode::Branch { state_mask, .. } => { + let nibble = path.get_unchecked(current.len()); + debug_assert!( + state_mask.is_bit_set(nibble), + "current: {current:?}, path: {path:?}, nibble: {nibble:?}, state_mask: {state_mask:?}", ); - break; + // If the branch node has a child that is a leaf node that we're removing, + // we need to unset this nibble. + // Any other branch nodes will not require unsetting the nibble, because + // deleting one leaf node can not remove the whole path + // where the branch node is located. + let mut child_path = current; + child_path.push_unchecked(nibble); + let unset_branch_nibble = self + .nodes + .get(&child_path) + .is_some_and(move |node| match node { + SparseNode::Leaf { key, .. } => { + // Get full path of the leaf node + child_path.extend(key); + &child_path == path + } + _ => false, + }) + .then_some(nibble); + + nodes.push(RemovedSparseNode { path: current, node, unset_branch_nibble }); + + current.push_unchecked(nibble); } - SparseNode::Extension { key, .. } => { - current.extend(key); + } + } - if !path.starts_with(¤t) { - // find the common prefix - let common = current.common_prefix_length(&path); - *key = current.slice(current.len() - key.len()..common); + Ok(nodes) + } - // If branch node updates retention is enabled, we need to query the - // extension node child to later set the hash mask for a parent branch node - // correctly. - if self.updates.is_some() { - // Check if the extension node child is a hash that needs to be revealed - if self.nodes.get(¤t).unwrap().is_hash() { - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - self.provider.blinded_node(¤t)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::sparse", - ?current, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing extension node child", - ); - self.reveal_node( - current, - decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - } - } - } + /// Recalculates and updates the RLP hashes of nodes deeper than or equal to the specified + /// `depth`. + /// + /// The root node is considered to be at level 0. This method is useful for optimizing + /// hash recalculations after localized changes to the trie structure: + /// + /// This function identifies all nodes that have changed (based on the prefix set) at the given + /// depth and recalculates their RLP representation. + pub fn update_rlp_node_level(&mut self, depth: usize) { + // Take the current prefix set + let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); + let mut buffers = RlpNodeBuffers::default(); - // create state mask for new branch node - // NOTE: this might overwrite the current extension node - self.nodes.reserve(3); - let branch = SparseNode::new_split_branch( - current.get_unchecked(common), - path.get_unchecked(common), - ); - self.nodes.insert(current.slice(..common), branch); + // Get the nodes that have changed at the given depth. + let (targets, new_prefix_set) = self.get_changed_nodes_at_depth(&mut prefix_set, depth); + // Update the prefix set to the prefix set of the nodes that still need to be updated. + self.prefix_set = new_prefix_set; - // create new leaf - let new_leaf = SparseNode::new_leaf(path.slice(common + 1..)); - self.nodes.insert(path.slice(..=common), new_leaf); + trace!(target: "trie::sparse", ?depth, ?targets, "Updating nodes at depth"); + + let mut temp_rlp_buf = core::mem::take(&mut self.rlp_buf); + for (level, path) in targets { + buffers.path_stack.push(RlpNodePathStackItem { + level, + path, + is_in_prefix_set: Some(true), + }); + self.rlp_node(&mut prefix_set, &mut buffers, &mut temp_rlp_buf); + } + self.rlp_buf = temp_rlp_buf; + } + + /// Returns a list of (level, path) tuples identifying the nodes that have changed at the + /// specified depth, along with a new prefix set for the paths above the provided depth that + /// remain unchanged. + /// + /// Leaf nodes with a depth less than `depth` are returned too. + /// + /// This method helps optimize hash recalculations by identifying which specific + /// nodes need to be updated at each level of the trie. + /// + /// # Parameters + /// + /// - `prefix_set`: The current prefix set tracking which paths need updates. + /// - `depth`: The minimum depth (relative to the root) to include nodes in the targets. + /// + /// # Returns + /// + /// A tuple containing: + /// - A vector of `(level, Nibbles)` pairs for nodes that require updates at or below the + /// specified depth. + /// - A `PrefixSetMut` containing paths shallower than the specified depth that still need to be + /// tracked for future updates. + fn get_changed_nodes_at_depth( + &self, + prefix_set: &mut PrefixSet, + depth: usize, + ) -> (Vec<(usize, Nibbles)>, PrefixSetMut) { + let mut unchanged_prefix_set = PrefixSetMut::default(); + let mut paths = Vec::from([(Nibbles::default(), 0)]); + let mut targets = Vec::new(); + + while let Some((mut path, level)) = paths.pop() { + match self.nodes.get(&path).unwrap() { + SparseNode::Empty | SparseNode::Hash(_) => {} + SparseNode::Leaf { key: _, hash } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + + targets.push((level, path)); + } + SparseNode::Extension { key, hash, store_in_db_trie: _ } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } - // recreate extension to previous child if needed - let key = current.slice(common + 1..); - if !key.is_empty() { - self.nodes.insert(current.slice(..=common), SparseNode::new_ext(key)); - } + if level >= depth { + targets.push((level, path)); + } else { + unchanged_prefix_set.insert(path); - break; + path.extend(key); + paths.push((path, level + 1)); } } - SparseNode::Branch { state_mask, .. } => { - let nibble = path.get_unchecked(current.len()); - current.push_unchecked(nibble); - if !state_mask.is_bit_set(nibble) { - state_mask.set_bit(nibble); - let new_leaf = SparseNode::new_leaf(path.slice(current.len()..)); - self.nodes.insert(current, new_leaf); - break; + SparseNode::Branch { state_mask, hash, store_in_db_trie: _ } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + + if level >= depth { + targets.push((level, path)); + } else { + unchanged_prefix_set.insert(path); + + for bit in CHILD_INDEX_RANGE.rev() { + if state_mask.is_bit_set(bit) { + let mut child_path = path; + child_path.push_unchecked(bit); + paths.push((child_path, level + 1)); + } + } } } - }; + } } - Ok(()) + (targets, unchanged_prefix_set) } - /// Removes a leaf node from the trie at the specified key path. - /// - /// This function removes the leaf value from the internal values map and then traverses - /// the trie to remove or adjust intermediate nodes, merging or collapsing them as necessary. + /// Look up or calculate the RLP of the node at the root path. /// - /// # Returns + /// # Panics /// - /// Returns `Ok(())` if the leaf is successfully removed, otherwise returns an error - /// if the leaf is not present or if a blinded node prevents removal. - pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { - if self.values.remove(path).is_none() { - if let Some(&SparseNode::Hash(hash)) = self.nodes.get(path) { - // Leaf is present in the trie, but it's blinded. - return Err(SparseTrieErrorKind::BlindedNode { path: *path, hash }.into()) - } + /// If the node at provided path does not exist. + pub fn rlp_node_allocate(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { + let mut buffers = RlpNodeBuffers::new_with_root_path(); + let mut temp_rlp_buf = core::mem::take(&mut self.rlp_buf); + let result = self.rlp_node(prefix_set, &mut buffers, &mut temp_rlp_buf); + self.rlp_buf = temp_rlp_buf; - trace!(target: "trie::sparse", ?path, "Leaf node is not present in the trie"); - // Leaf is not present in the trie. - return Ok(()) - } - self.prefix_set.insert(*path); + result + } - // If the path wasn't present in `values`, we still need to walk the trie and ensure that - // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry - // in `nodes`, but not in the `values`. + /// Looks up or computes the RLP encoding of the node specified by the current + /// path in the provided buffers. + /// + /// The function uses a stack (`RlpNodeBuffers::path_stack`) to track the traversal and + /// accumulate RLP encodings. + /// + /// # Parameters + /// + /// - `prefix_set`: The set of trie paths that need their nodes updated. + /// - `buffers`: The reusable buffers for stack management and temporary RLP values. + /// + /// # Panics + /// + /// If the node at provided path does not exist. + pub fn rlp_node( + &mut self, + prefix_set: &mut PrefixSet, + buffers: &mut RlpNodeBuffers, + rlp_buf: &mut Vec, + ) -> RlpNode { + let _starting_path = buffers.path_stack.last().map(|item| item.path); - let mut removed_nodes = self.take_nodes_for_path(path)?; - // Pop the first node from the stack which is the leaf node we want to remove. - let mut child = removed_nodes.pop().expect("leaf exists"); - #[cfg(debug_assertions)] + 'main: while let Some(RlpNodePathStackItem { level, path, mut is_in_prefix_set }) = + buffers.path_stack.pop() { - let mut child_path = child.path; - let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; - child_path.extend(key); - assert_eq!(&child_path, path); - } + let node = self.nodes.get_mut(&path).unwrap(); + trace!( + target: "trie::sparse", + ?_starting_path, + ?level, + ?path, + ?is_in_prefix_set, + ?node, + "Popped node from path stack" + ); - // If we don't have any other removed nodes, insert an empty node at the root. - if removed_nodes.is_empty() { - debug_assert!(self.nodes.is_empty()); - self.nodes.insert(Nibbles::default(), SparseNode::Empty); + // Check if the path is in the prefix set. + // First, check the cached value. If it's `None`, then check the prefix set, and update + // the cached value. + let mut prefix_set_contains = + |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); - return Ok(()) - } + let (rlp_node, node_type) = match node { + SparseNode::Empty => (RlpNode::word_rlp(&EMPTY_ROOT_HASH), SparseNodeType::Empty), + SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), SparseNodeType::Hash), + SparseNode::Leaf { key, hash } => { + let mut path = path; + path.extend(key); + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { + (RlpNode::word_rlp(&hash), SparseNodeType::Leaf) + } else { + let value = self.values.get(&path).unwrap(); + rlp_buf.clear(); + let rlp_node = LeafNodeRef { key, value }.rlp(rlp_buf); + *hash = rlp_node.as_hash(); + (rlp_node, SparseNodeType::Leaf) + } + } + SparseNode::Extension { key, hash, store_in_db_trie } => { + let mut child_path = path; + child_path.extend(key); + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + ( + RlpNode::word_rlp(&hash), + SparseNodeType::Extension { store_in_db_trie: Some(store_in_db_trie) }, + ) + } else if buffers.rlp_node_stack.last().is_some_and(|e| e.path == child_path) { + let RlpNodeStackItem { + path: _, + rlp_node: child, + node_type: child_node_type, + } = buffers.rlp_node_stack.pop().unwrap(); + rlp_buf.clear(); + let rlp_node = ExtensionNodeRef::new(key, &child).rlp(rlp_buf); + *hash = rlp_node.as_hash(); - // Walk the stack of removed nodes from the back and re-insert them back into the trie, - // adjusting the node type as needed. - while let Some(removed_node) = removed_nodes.pop() { - let removed_path = removed_node.path; + let store_in_db_trie_value = child_node_type.store_in_db_trie(); - let new_node = match &removed_node.node { - SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), - &SparseNode::Hash(hash) => { - return Err(SparseTrieErrorKind::BlindedNode { path: removed_path, hash }.into()) - } - SparseNode::Leaf { .. } => { - unreachable!("we already popped the leaf node") - } - SparseNode::Extension { key, .. } => { - // If the node is an extension node, we need to look at its child to see if we - // need to merge them. - match &child.node { - SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), - &SparseNode::Hash(hash) => { - return Err( - SparseTrieErrorKind::BlindedNode { path: child.path, hash }.into() - ) - } - // For a leaf node, we collapse the extension node into a leaf node, - // extending the key. While it's impossible to encounter an extension node - // followed by a leaf node in a complete trie, it's possible here because we - // could have downgraded the extension node's child into a leaf node from - // another node type. - SparseNode::Leaf { key: leaf_key, .. } => { - self.nodes.remove(&child.path); + trace!( + target: "trie::sparse", + ?path, + ?child_path, + ?child_node_type, + "Extension node" + ); - let mut new_key = *key; - new_key.extend(leaf_key); - SparseNode::new_leaf(new_key) - } - // For an extension node, we collapse them into one extension node, - // extending the key - SparseNode::Extension { key: extension_key, .. } => { - self.nodes.remove(&child.path); + *store_in_db_trie = store_in_db_trie_value; - let mut new_key = *key; - new_key.extend(extension_key); - SparseNode::new_ext(new_key) - } - // For a branch node, we just leave the extension node as-is. - SparseNode::Branch { .. } => removed_node.node, + ( + rlp_node, + SparseNodeType::Extension { + // Inherit the `store_in_db_trie` flag from the child node, which is + // always the branch node + store_in_db_trie: store_in_db_trie_value, + }, + ) + } else { + // need to get rlp node for child first + buffers.path_stack.extend([ + RlpNodePathStackItem { level, path, is_in_prefix_set }, + RlpNodePathStackItem { + level: level + 1, + path: child_path, + is_in_prefix_set: None, + }, + ]); + continue } } - &SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { - // If the node is a branch node, we need to check the number of children left - // after deleting the child at the given nibble. - - if let Some(removed_nibble) = removed_node.unset_branch_nibble { - state_mask.unset_bit(removed_nibble); + SparseNode::Branch { state_mask, hash, store_in_db_trie } => { + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + buffers.rlp_node_stack.push(RlpNodeStackItem { + path, + rlp_node: RlpNode::word_rlp(&hash), + node_type: SparseNodeType::Branch { + store_in_db_trie: Some(store_in_db_trie), + }, + }); + continue } + let retain_updates = self.updates.is_some() && prefix_set_contains(&path); - // If only one child is left set in the branch node, we need to collapse it. - if state_mask.count_bits() == 1 { - let child_nibble = - state_mask.first_set_bit_index().expect("state mask is not empty"); - - // Get full path of the only child node left. - let mut child_path = removed_path; - child_path.push_unchecked(child_nibble); + buffers.branch_child_buf.clear(); + // Walk children in a reverse order from `f` to `0`, so we pop the `0` first + // from the stack and keep walking in the sorted order. + for bit in CHILD_INDEX_RANGE.rev() { + if state_mask.is_bit_set(bit) { + let mut child = path; + child.push_unchecked(bit); + buffers.branch_child_buf.push(child); + } + } - trace!(target: "trie::sparse", ?removed_path, ?child_path, "Branch node has only one child"); + buffers + .branch_value_stack_buf + .resize(buffers.branch_child_buf.len(), Default::default()); + let mut added_children = false; - if self.nodes.get(&child_path).unwrap().is_hash() { - trace!(target: "trie::sparse", ?child_path, "Retrieving remaining blinded branch child"); - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - self.provider.blinded_node(&child_path)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::sparse", - ?child_path, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing remaining blinded branch child" - ); - self.reveal_node( - child_path, - decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - } - } + let mut tree_mask = TrieMask::default(); + let mut hash_mask = TrieMask::default(); + let mut hashes = Vec::new(); + for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { + if buffers.rlp_node_stack.last().is_some_and(|e| &e.path == child_path) { + let RlpNodeStackItem { + path: _, + rlp_node: child, + node_type: child_node_type, + } = buffers.rlp_node_stack.pop().unwrap(); - // Get the only child node. - let child = self.nodes.get(&child_path).unwrap(); + // Update the masks only if we need to retain trie updates + if retain_updates { + // SAFETY: it's a child, so it's never empty + let last_child_nibble = child_path.last().unwrap(); - let mut delete_child = false; - let new_node = match child { - SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), - &SparseNode::Hash(hash) => { - return Err(SparseTrieErrorKind::BlindedNode { - path: child_path, - hash, + // Determine whether we need to set trie mask bit. + let should_set_tree_mask_bit = if let Some(store_in_db_trie) = + child_node_type.store_in_db_trie() + { + // A branch or an extension node explicitly set the + // `store_in_db_trie` flag + store_in_db_trie + } else { + // A blinded node has the tree mask bit set + child_node_type.is_hash() && + self.branch_node_tree_masks.get(&path).is_some_and( + |mask| mask.is_bit_set(last_child_nibble), + ) + }; + if should_set_tree_mask_bit { + tree_mask.set_bit(last_child_nibble); } - .into()) - } - // If the only child is a leaf node, we downgrade the branch node into a - // leaf node, prepending the nibble to the key, and delete the old - // child. - SparseNode::Leaf { key, .. } => { - delete_child = true; - - let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); - new_key.extend(key); - SparseNode::new_leaf(new_key) - } - // If the only child node is an extension node, we downgrade the branch - // node into an even longer extension node, prepending the nibble to the - // key, and delete the old child. - SparseNode::Extension { key, .. } => { - delete_child = true; - let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); - new_key.extend(key); - SparseNode::new_ext(new_key) - } - // If the only child is a branch node, we downgrade the current branch - // node into a one-nibble extension node. - SparseNode::Branch { .. } => { - SparseNode::new_ext(Nibbles::from_nibbles_unchecked([child_nibble])) + // Set the hash mask. If a child node is a revealed branch node OR + // is a blinded node that has its hash mask bit set according to the + // database, set the hash mask bit and save the hash. + let hash = child.as_hash().filter(|_| { + child_node_type.is_branch() || + (child_node_type.is_hash() && + self.branch_node_hash_masks + .get(&path) + .is_some_and(|mask| { + mask.is_bit_set(last_child_nibble) + })) + }); + if let Some(hash) = hash { + hash_mask.set_bit(last_child_nibble); + hashes.push(hash); + } } - }; - if delete_child { - self.nodes.remove(&child_path); + // Insert children in the resulting buffer in a normal order, + // because initially we iterated in reverse. + // SAFETY: i < len and len is never 0 + let original_idx = buffers.branch_child_buf.len() - i - 1; + buffers.branch_value_stack_buf[original_idx] = child; + added_children = true; + } else { + debug_assert!(!added_children); + buffers.path_stack.push(RlpNodePathStackItem { + level, + path, + is_in_prefix_set, + }); + buffers.path_stack.extend(buffers.branch_child_buf.drain(..).map( + |path| RlpNodePathStackItem { + level: level + 1, + path, + is_in_prefix_set: None, + }, + )); + continue 'main } + } - if let Some(updates) = self.updates.as_mut() { - updates.updated_nodes.remove(&removed_path); - updates.removed_nodes.insert(removed_path); + trace!( + target: "trie::sparse", + ?path, + ?tree_mask, + ?hash_mask, + "Branch node masks" + ); + + rlp_buf.clear(); + let branch_node_ref = + BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask); + let rlp_node = branch_node_ref.rlp(rlp_buf); + *hash = rlp_node.as_hash(); + + // Save a branch node update only if it's not a root node, and we need to + // persist updates. + let store_in_db_trie_value = if let Some(updates) = + self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) + { + let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); + if store_in_db_trie { + // Store in DB trie if there are either any children that are stored in + // the DB trie, or any children represent hashed values + hashes.reverse(); + let branch_node = BranchNodeCompact::new( + *state_mask, + tree_mask, + hash_mask, + hashes, + hash.filter(|_| path.is_empty()), + ); + updates.updated_nodes.insert(path, branch_node); + } else if self + .branch_node_tree_masks + .get(&path) + .is_some_and(|mask| !mask.is_empty()) || + self.branch_node_hash_masks + .get(&path) + .is_some_and(|mask| !mask.is_empty()) + { + // If new tree and hash masks are empty, but previously they weren't, we + // need to remove the node update and add the node itself to the list of + // removed nodes. + updates.updated_nodes.remove(&path); + updates.removed_nodes.insert(path); + } else if self + .branch_node_tree_masks + .get(&path) + .is_none_or(|mask| mask.is_empty()) && + self.branch_node_hash_masks + .get(&path) + .is_none_or(|mask| mask.is_empty()) + { + // If new tree and hash masks are empty, and they were previously empty + // as well, we need to remove the node update. + updates.updated_nodes.remove(&path); } - new_node - } - // If more than one child is left set in the branch, we just re-insert it as-is. - else { - SparseNode::new_branch(state_mask) - } + store_in_db_trie + } else { + false + }; + *store_in_db_trie = Some(store_in_db_trie_value); + + ( + rlp_node, + SparseNodeType::Branch { store_in_db_trie: Some(store_in_db_trie_value) }, + ) } }; - child = RemovedSparseNode { - path: removed_path, - node: new_node.clone(), - unset_branch_nibble: None, - }; - trace!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); - self.nodes.insert(removed_path, new_node); + trace!( + target: "trie::sparse", + ?_starting_path, + ?level, + ?path, + ?node, + ?node_type, + ?is_in_prefix_set, + "Added node to rlp node stack" + ); + + buffers.rlp_node_stack.push(RlpNodeStackItem { path, rlp_node, node_type }); } - Ok(()) + debug_assert_eq!(buffers.rlp_node_stack.len(), 1); + buffers.rlp_node_stack.pop().unwrap().rlp_node } } @@ -2099,7 +1841,7 @@ struct RemovedSparseNode { unset_branch_nibble: Option, } -/// Collection of reusable buffers for [`RevealedSparseTrie::rlp_node`] calculations. +/// Collection of reusable buffers for [`SerialSparseTrie::rlp_node`] calculations. /// /// These buffers reduce allocations when computing RLP representations during trie updates. #[derive(Debug, Default)] @@ -2152,20 +1894,6 @@ pub struct RlpNodeStackItem { pub node_type: SparseNodeType, } -/// Tracks modifications to the sparse trie structure. -/// -/// Maintains references to both modified and pruned/removed branches, enabling -/// one to make batch updates to a persistent database. -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct SparseTrieUpdates { - /// Collection of updated intermediate nodes indexed by full path. - pub updated_nodes: HashMap, - /// Collection of removed intermediate nodes indexed by full path. - pub removed_nodes: HashSet, - /// Flag indicating whether the trie was wiped. - pub wiped: bool, -} - impl SparseTrieUpdates { /// Create new wiped sparse trie updates. pub fn wiped() -> Self { @@ -2192,7 +1920,7 @@ impl SparseTrieUpdates { #[cfg(test)] mod find_leaf_tests { use super::*; - use crate::blinded::DefaultBlindedProvider; + use crate::provider::DefaultTrieNodeProvider; use alloy_primitives::map::foldhash::fast::RandomState; // Assuming this exists use alloy_rlp::Encodable; @@ -2215,11 +1943,12 @@ mod find_leaf_tests { #[test] fn find_leaf_existing_leaf() { // Create a simple trie with one leaf - let mut sparse = RevealedSparseTrie::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let value = b"test_value".to_vec(); - sparse.update_leaf(path, value.clone()).unwrap(); + sparse.update_leaf(path, value.clone(), &provider).unwrap(); // Check that the leaf exists let result = sparse.find_leaf(&path, None); @@ -2233,12 +1962,13 @@ mod find_leaf_tests { #[test] fn find_leaf_value_mismatch() { // Create a simple trie with one leaf - let mut sparse = RevealedSparseTrie::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let value = b"test_value".to_vec(); let wrong_value = b"wrong_value".to_vec(); - sparse.update_leaf(path, value).unwrap(); + sparse.update_leaf(path, value, &provider).unwrap(); // Check with wrong expected value let result = sparse.find_leaf(&path, Some(&wrong_value)); @@ -2251,33 +1981,29 @@ mod find_leaf_tests { #[test] fn find_leaf_not_found_empty_trie() { // Empty trie - let sparse = RevealedSparseTrie::default(); + let sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); // Leaf should not exist let result = sparse.find_leaf(&path, None); - assert_matches!( - result, - Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == Nibbles::default() - ); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] fn find_leaf_empty_trie() { - let sparse = RevealedSparseTrie::::default(); + let sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let result = sparse.find_leaf(&path, None); - - // In an empty trie, the search diverges immediately at the root. - assert_matches!(result, Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == Nibbles::default()); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] fn find_leaf_exists_no_value_check() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); - sparse.update_leaf(path, VALUE_A()).unwrap(); + sparse.update_leaf(path, VALUE_A(), &provider).unwrap(); let result = sparse.find_leaf(&path, None); assert_matches!(result, Ok(LeafLookup::Exists)); @@ -2285,10 +2011,11 @@ mod find_leaf_tests { #[test] fn find_leaf_exists_with_value_check_ok() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let value = VALUE_A(); - sparse.update_leaf(path, value.clone()).unwrap(); + sparse.update_leaf(path, value.clone(), &provider).unwrap(); let result = sparse.find_leaf(&path, Some(&value)); assert_matches!(result, Ok(LeafLookup::Exists)); @@ -2296,75 +2023,64 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_branch_divergence() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); // Belongs to same branch let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x7, 0x8]); // Diverges at nibble 7 - sparse.update_leaf(path1, VALUE_A()).unwrap(); - sparse.update_leaf(path2, VALUE_B()).unwrap(); + sparse.update_leaf(path1, VALUE_A(), &provider).unwrap(); + sparse.update_leaf(path2, VALUE_B(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); - - // Diverged at the branch node because nibble '7' is not present. - let expected_divergence = Nibbles::from_nibbles_unchecked([0x1, 0x2]); - assert_matches!(result, Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == expected_divergence); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] fn find_leaf_exclusion_extension_divergence() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); // This will create an extension node at root with key 0x12 let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); // This path diverges from the extension key let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x7, 0x8]); - sparse.update_leaf(path1, VALUE_A()).unwrap(); + sparse.update_leaf(path1, VALUE_A(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); - - // Diverged where the extension node started because the path doesn't match its key prefix. - let expected_divergence = Nibbles::default(); - assert_matches!(result, Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == expected_divergence); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] fn find_leaf_exclusion_leaf_divergence() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); let existing_leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); - sparse.update_leaf(existing_leaf_path, VALUE_A()).unwrap(); + sparse.update_leaf(existing_leaf_path, VALUE_A(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); - - // Diverged when it hit the leaf node at the root, because the search path is longer - // than the leaf's key stored there. The code returns the path of the node (root) - // where the divergence occurred. - let expected_divergence = Nibbles::default(); - assert_matches!(result, Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == expected_divergence); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] fn find_leaf_exclusion_path_ends_at_branch() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2]); // Path of the branch itself - sparse.update_leaf(path1, VALUE_A()).unwrap(); - sparse.update_leaf(path2, VALUE_B()).unwrap(); + sparse.update_leaf(path1, VALUE_A(), &provider).unwrap(); + sparse.update_leaf(path2, VALUE_B(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); - - // The path ends, but the node at the path is a branch, not a leaf. - // Diverged at the parent of the node found at the search path. - let expected_divergence = Nibbles::from_nibbles_unchecked([0x1]); - assert_matches!(result, Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == expected_divergence); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] - fn find_leaf_error_blinded_node_at_leaf_path() { + fn find_leaf_error_trie_node_at_leaf_path() { // Scenario: The node *at* the leaf path is blinded. let blinded_hash = B256::repeat_byte(0xBB); let leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); @@ -2385,8 +2101,7 @@ mod find_leaf_tests { ); // Branch at 0x123, child 4 nodes.insert(leaf_path, SparseNode::Hash(blinded_hash)); // Blinded node at 0x1234 - let sparse = RevealedSparseTrie { - provider: DefaultBlindedProvider, + let sparse = SerialSparseTrie { nodes, branch_node_tree_masks: Default::default(), branch_node_hash_masks: Default::default(), @@ -2406,7 +2121,7 @@ mod find_leaf_tests { } #[test] - fn find_leaf_error_blinded_node() { + fn find_leaf_error_trie_node() { let blinded_hash = B256::repeat_byte(0xAA); let path_to_blind = Nibbles::from_nibbles_unchecked([0x1]); let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); @@ -2429,8 +2144,7 @@ mod find_leaf_tests { let mut values = HashMap::with_hasher(RandomState::default()); values.insert(path_revealed_leaf, VALUE_A()); - let sparse = RevealedSparseTrie { - provider: DefaultBlindedProvider, + let sparse = SerialSparseTrie { nodes, branch_node_tree_masks: Default::default(), branch_node_hash_masks: Default::default(), @@ -2449,7 +2163,7 @@ mod find_leaf_tests { } #[test] - fn find_leaf_error_blinded_node_via_reveal() { + fn find_leaf_error_trie_node_via_reveal() { let blinded_hash = B256::repeat_byte(0xAA); let path_to_blind = Nibbles::from_nibbles_unchecked([0x1]); // Path of the blinded node itself let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Path we will search for @@ -2477,7 +2191,7 @@ mod find_leaf_tests { // 3. Initialize the sparse trie using from_root // This will internally create Hash nodes for paths "1" and "5" initially. - let mut sparse = RevealedSparseTrie::from_root(root_trie_node, TrieMasks::none(), false) + let mut sparse = SerialSparseTrie::from_root(root_trie_node, TrieMasks::none(), false) .expect("Failed to create trie from root"); // Assertions before we reveal child5 @@ -2510,6 +2224,7 @@ mod find_leaf_tests { #[cfg(test)] mod tests { use super::*; + use crate::provider::DefaultTrieNodeProvider; use alloy_primitives::{map::B256Set, U256}; use alloy_rlp::Encodable; use assert_matches::assert_matches; @@ -2625,10 +2340,7 @@ mod tests { } /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. - fn assert_eq_sparse_trie_proof_nodes( - sparse_trie: &RevealedSparseTrie, - proof_nodes: ProofNodes, - ) { + fn assert_eq_sparse_trie_proof_nodes(sparse_trie: &SerialSparseTrie, proof_nodes: ProofNodes) { let proof_nodes = proof_nodes .into_nodes_sorted() .into_iter() @@ -2672,8 +2384,8 @@ mod tests { #[test] fn sparse_trie_is_blind() { - assert!(SparseTrie::blind().is_blind()); - assert!(!SparseTrie::revealed_empty().is_blind()); + assert!(SparseTrie::::blind().is_blind()); + assert!(!SparseTrie::::revealed_empty().is_blind()); } #[test] @@ -2694,8 +2406,9 @@ mod tests { [key], ); - let mut sparse = RevealedSparseTrie::default().with_updates(true); - sparse.update_leaf(key, value_encoded()).unwrap(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default().with_updates(true); + sparse.update_leaf(key, value_encoded(), &provider).unwrap(); let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2724,9 +2437,10 @@ mod tests { paths.clone(), ); - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(*path, value_encoded()).unwrap(); + sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2754,9 +2468,10 @@ mod tests { paths.clone(), ); - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(*path, value_encoded()).unwrap(); + sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2792,9 +2507,10 @@ mod tests { paths.clone(), ); - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(*path, value_encoded()).unwrap(); + sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2831,9 +2547,10 @@ mod tests { paths.clone(), ); - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(*path, old_value_encoded.clone()).unwrap(); + sparse.update_leaf(*path, old_value_encoded.clone(), &provider).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.updates_ref(); @@ -2851,7 +2568,7 @@ mod tests { ); for path in &paths { - sparse.update_leaf(*path, new_value_encoded.clone()).unwrap(); + sparse.update_leaf(*path, new_value_encoded.clone(), &provider).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2865,26 +2582,29 @@ mod tests { fn sparse_trie_remove_leaf() { reth_tracing::init_test_tracing(); - let mut sparse = RevealedSparseTrie::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone(), &provider) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value, &provider) .unwrap(); - sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1011) @@ -2940,7 +2660,7 @@ mod tests { ]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), &provider).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -2991,7 +2711,7 @@ mod tests { ]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), &provider).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -3027,7 +2747,7 @@ mod tests { ]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), &provider).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -3060,7 +2780,7 @@ mod tests { ]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), &provider).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -3082,7 +2802,7 @@ mod tests { ]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), &provider).unwrap(); // Leaf (Key = 53302) pretty_assertions::assert_eq!( @@ -3093,7 +2813,7 @@ mod tests { ),]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), &provider).unwrap(); // Empty pretty_assertions::assert_eq!( @@ -3116,7 +2836,8 @@ mod tests { TrieMask::new(0b11), )); - let mut sparse = RevealedSparseTrie::from_root( + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::from_root( branch.clone(), TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, false, @@ -3141,7 +2862,7 @@ mod tests { // Removing a blinded leaf should result in an error assert_matches!( - sparse.remove_leaf(&Nibbles::from_nibbles([0x0])).map_err(|e| e.into_kind()), + sparse.remove_leaf(&Nibbles::from_nibbles([0x0]), &provider).map_err(|e| e.into_kind()), Err(SparseTrieErrorKind::BlindedNode { path, hash }) if path == Nibbles::from_nibbles([0x0]) && hash == B256::repeat_byte(1) ); } @@ -3160,7 +2881,8 @@ mod tests { TrieMask::new(0b11), )); - let mut sparse = RevealedSparseTrie::from_root( + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::from_root( branch.clone(), TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, false, @@ -3185,7 +2907,7 @@ mod tests { // Removing a non-existent leaf should be a noop let sparse_old = sparse.clone(); - assert_matches!(sparse.remove_leaf(&Nibbles::from_nibbles([0x2])), Ok(())); + assert_matches!(sparse.remove_leaf(&Nibbles::from_nibbles([0x2]), &provider), Ok(())); assert_eq!(sparse, sparse_old); } @@ -3199,8 +2921,9 @@ mod tests { fn test(updates: Vec<(BTreeMap, BTreeSet)>) { { let mut state = BTreeMap::default(); + let default_provider = DefaultTrieNodeProvider; let provider_factory = create_test_provider_factory(); - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let mut sparse = SerialSparseTrie::default().with_updates(true); for (update, keys_to_delete) in updates { // Insert state updates into the sparse trie and calculate the root @@ -3208,7 +2931,7 @@ mod tests { let account = account.into_trie_account(EMPTY_ROOT_HASH); let mut account_rlp = Vec::new(); account.encode(&mut account_rlp); - sparse.update_leaf(key, account_rlp).unwrap(); + sparse.update_leaf(key, account_rlp, &default_provider).unwrap(); } // We need to clone the sparse trie, so that all updated branch nodes are // preserved, and not only those that were changed after the last call to @@ -3248,7 +2971,7 @@ mod tests { // that the sparse trie root still matches the hash builder root for key in &keys_to_delete { state.remove(key).unwrap(); - sparse.remove_leaf(key).unwrap(); + sparse.remove_leaf(key, &default_provider).unwrap(); } // We need to clone the sparse trie, so that all updated branch nodes are @@ -3358,7 +3081,9 @@ mod tests { Default::default(), [Nibbles::default()], ); - let mut sparse = RevealedSparseTrie::from_root( + + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), @@ -3395,7 +3120,7 @@ mod tests { ); // Insert the leaf for the second key - sparse.update_leaf(key2(), value_encoded()).unwrap(); + sparse.update_leaf(key2(), value_encoded(), &provider).unwrap(); // Check that the branch node was updated and another nibble was set assert_eq!( @@ -3466,7 +3191,9 @@ mod tests { Default::default(), [Nibbles::default()], ); - let mut sparse = RevealedSparseTrie::from_root( + + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), @@ -3504,7 +3231,7 @@ mod tests { ); // Remove the leaf for the first key - sparse.remove_leaf(&key1()).unwrap(); + sparse.remove_leaf(&key1(), &provider).unwrap(); // Check that the branch node was turned into an extension node assert_eq!( @@ -3567,7 +3294,9 @@ mod tests { Default::default(), [Nibbles::default()], ); - let mut sparse = RevealedSparseTrie::from_root( + + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), @@ -3584,7 +3313,7 @@ mod tests { ); // Insert the leaf with a different prefix - sparse.update_leaf(key3(), value_encoded()).unwrap(); + sparse.update_leaf(key3(), value_encoded(), &provider).unwrap(); // Check that the extension node was turned into a branch node assert_matches!( @@ -3621,7 +3350,8 @@ mod tests { #[test] fn sparse_trie_get_changed_nodes_at_depth() { - let mut sparse = RevealedSparseTrie::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -3638,21 +3368,23 @@ mod tests { // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone(), &provider) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value, &provider) .unwrap(); - sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); assert_eq!( sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 0), @@ -3732,9 +3464,11 @@ mod tests { Default::default(), [Nibbles::default()], ); - let mut sparse = RevealedSparseTrie::default(); - sparse.update_leaf(key1(), value_encoded()).unwrap(); - sparse.update_leaf(key2(), value_encoded()).unwrap(); + + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); + sparse.update_leaf(key1(), value_encoded(), &provider).unwrap(); + sparse.update_leaf(key2(), value_encoded(), &provider).unwrap(); let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -3744,7 +3478,8 @@ mod tests { #[test] fn sparse_trie_wipe() { - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default().with_updates(true); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -3761,24 +3496,31 @@ mod tests { // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone(), &provider) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value, &provider) .unwrap(); - sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); sparse.wipe(); + assert_matches!( + &sparse.updates, + Some(SparseTrieUpdates{ updated_nodes, removed_nodes, wiped }) + if updated_nodes.is_empty() && removed_nodes.is_empty() && *wiped + ); assert_eq!(sparse.root(), EMPTY_ROOT_HASH); } @@ -3786,34 +3528,32 @@ mod tests { fn sparse_trie_clear() { // tests that if we fill a sparse trie with some nodes and then clear it, it has the same // contents as an empty sparse trie - let mut sparse = RevealedSparseTrie::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone(), &provider) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value, &provider) .unwrap(); - sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value).unwrap(); sparse.clear(); - // we have to update the root hash to be an empty one, because the `Default` impl of - // `RevealedSparseTrie` sets the root hash to `EMPTY_ROOT_HASH` in the constructor. - // - // The default impl is only used in tests. - sparse.nodes.insert(Nibbles::default(), SparseNode::Empty); - - let empty_trie = RevealedSparseTrie::default(); + let empty_trie = SerialSparseTrie::default(); assert_eq!(empty_trie, sparse); } #[test] fn sparse_trie_display() { - let mut sparse = RevealedSparseTrie::default(); + let provider = DefaultTrieNodeProvider; + let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -3830,21 +3570,23 @@ mod tests { // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone(), &provider) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value, &provider) .unwrap(); - sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); let normal_printed = format!("{sparse}"); let expected = "\ diff --git a/crates/trie/trie/src/proof/mod.rs b/crates/trie/trie/src/proof/mod.rs index 266aac19a39..10439b804f6 100644 --- a/crates/trie/trie/src/proof/mod.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -17,8 +17,8 @@ use reth_trie_common::{ proof::ProofRetainer, AccountProof, MultiProof, MultiProofTargets, StorageMultiProof, }; -mod blinded; -pub use blinded::*; +mod trie_node; +pub use trie_node::*; /// A struct for generating merkle proofs. /// diff --git a/crates/trie/trie/src/proof/blinded.rs b/crates/trie/trie/src/proof/trie_node.rs similarity index 90% rename from crates/trie/trie/src/proof/blinded.rs rename to crates/trie/trie/src/proof/trie_node.rs index 363add7116b..3d964cf5e8b 100644 --- a/crates/trie/trie/src/proof/blinded.rs +++ b/crates/trie/trie/src/proof/trie_node.rs @@ -3,15 +3,15 @@ use crate::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; use alloy_primitives::{map::HashSet, B256}; use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_trie_common::{prefix_set::TriePrefixSetsMut, MultiProofTargets, Nibbles}; -use reth_trie_sparse::blinded::{ - pad_path_to_key, BlindedProvider, BlindedProviderFactory, RevealedNode, +use reth_trie_sparse::provider::{ + pad_path_to_key, RevealedNode, TrieNodeProvider, TrieNodeProviderFactory, }; use std::{sync::Arc, time::Instant}; use tracing::{enabled, trace, Level}; /// Factory for instantiating providers capable of retrieving blinded trie nodes via proofs. #[derive(Debug, Clone)] -pub struct ProofBlindedProviderFactory { +pub struct ProofTrieNodeProviderFactory { /// The cursor factory for traversing trie nodes. trie_cursor_factory: T, /// The factory for hashed cursors. @@ -20,7 +20,7 @@ pub struct ProofBlindedProviderFactory { prefix_sets: Arc, } -impl ProofBlindedProviderFactory { +impl ProofTrieNodeProviderFactory { /// Create new proof-based blinded provider factory. pub const fn new( trie_cursor_factory: T, @@ -31,7 +31,7 @@ impl ProofBlindedProviderFactory { } } -impl BlindedProviderFactory for ProofBlindedProviderFactory +impl TrieNodeProviderFactory for ProofTrieNodeProviderFactory where T: TrieCursorFactory + Clone + Send + Sync, H: HashedCursorFactory + Clone + Send + Sync, @@ -79,12 +79,12 @@ impl ProofBlindedAccountProvider { } } -impl BlindedProvider for ProofBlindedAccountProvider +impl TrieNodeProvider for ProofBlindedAccountProvider where T: TrieCursorFactory + Clone + Send + Sync, H: HashedCursorFactory + Clone + Send + Sync, { - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError> { + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let start = enabled!(target: "trie::proof::blinded", Level::TRACE).then(Instant::now); let targets = MultiProofTargets::from_iter([(pad_path_to_key(path), HashSet::default())]); @@ -136,12 +136,12 @@ impl ProofBlindedStorageProvider { } } -impl BlindedProvider for ProofBlindedStorageProvider +impl TrieNodeProvider for ProofBlindedStorageProvider where T: TrieCursorFactory + Clone + Send + Sync, H: HashedCursorFactory + Clone + Send + Sync, { - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError> { + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let start = enabled!(target: "trie::proof::blinded", Level::TRACE).then(Instant::now); let targets = HashSet::from_iter([pad_path_to_key(path)]); diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index e6f5463b7df..c4e3dfcb477 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -134,7 +134,7 @@ where pub fn root(self) -> Result { match self.calculate(false)? { StateRootProgress::Complete(root, _, _) => Ok(root), - StateRootProgress::Progress(..) => unreachable!(), // update retenion is disabled + StateRootProgress::Progress(..) => unreachable!(), // update retention is disabled } } diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index ce40a01e1c0..67da561f3d8 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -1,12 +1,13 @@ use crate::{ hashed_cursor::{HashedCursor, HashedCursorFactory}, prefix_set::TriePrefixSetsMut, - proof::{Proof, ProofBlindedProviderFactory}, + proof::{Proof, ProofTrieNodeProviderFactory}, trie_cursor::TrieCursorFactory, }; use alloy_rlp::EMPTY_STRING_CODE; use alloy_trie::EMPTY_ROOT_HASH; use reth_trie_common::HashedPostState; +use reth_trie_sparse::SparseTrieInterface; use alloy_primitives::{ keccak256, @@ -20,8 +21,8 @@ use reth_execution_errors::{ }; use reth_trie_common::{MultiProofTargets, Nibbles}; use reth_trie_sparse::{ - blinded::{BlindedProvider, BlindedProviderFactory, RevealedNode}, - SparseStateTrie, + provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}, + SerialSparseTrie, SparseStateTrie, }; use std::sync::{mpsc, Arc}; @@ -145,15 +146,15 @@ where } let (tx, rx) = mpsc::channel(); - let blinded_provider_factory = WitnessBlindedProviderFactory::new( - ProofBlindedProviderFactory::new( + let blinded_provider_factory = WitnessTrieNodeProviderFactory::new( + ProofTrieNodeProviderFactory::new( self.trie_cursor_factory, self.hashed_cursor_factory, Arc::new(self.prefix_sets), ), tx, ); - let mut sparse_trie = SparseStateTrie::new(blinded_provider_factory); + let mut sparse_trie = SparseStateTrie::::new(); sparse_trie.reveal_multiproof(multiproof)?; // Attempt to update state trie to gather additional information for the witness. @@ -161,6 +162,7 @@ where proof_targets.into_iter().sorted_unstable_by_key(|(ha, _)| *ha) { // Update storage trie first. + let provider = blinded_provider_factory.storage_node_provider(hashed_address); let storage = state.storages.get(&hashed_address); let storage_trie = sparse_trie.storage_trie_mut(&hashed_address).ok_or( SparseStateTrieErrorKind::SparseStorageTrie( @@ -176,11 +178,11 @@ where .map(|v| alloy_rlp::encode_fixed_size(v).to_vec()); if let Some(value) = maybe_leaf_value { - storage_trie.update_leaf(storage_nibbles, value).map_err(|err| { + storage_trie.update_leaf(storage_nibbles, value, &provider).map_err(|err| { SparseStateTrieErrorKind::SparseStorageTrie(hashed_address, err.into_kind()) })?; } else { - storage_trie.remove_leaf(&storage_nibbles).map_err(|err| { + storage_trie.remove_leaf(&storage_nibbles, &provider).map_err(|err| { SparseStateTrieErrorKind::SparseStorageTrie(hashed_address, err.into_kind()) })?; } @@ -194,7 +196,7 @@ where .get(&hashed_address) .ok_or(TrieWitnessError::MissingAccount(hashed_address))? .unwrap_or_default(); - sparse_trie.update_account(hashed_address, account)?; + sparse_trie.update_account(hashed_address, account, &blinded_provider_factory)?; while let Ok(node) = rx.try_recv() { self.witness.insert(keccak256(&node), node); @@ -235,56 +237,56 @@ where } #[derive(Debug, Clone)] -struct WitnessBlindedProviderFactory { - /// Blinded node provider factory. +struct WitnessTrieNodeProviderFactory { + /// Trie node provider factory. provider_factory: F, - /// Sender for forwarding fetched blinded node. + /// Sender for forwarding fetched trie node. tx: mpsc::Sender, } -impl WitnessBlindedProviderFactory { +impl WitnessTrieNodeProviderFactory { const fn new(provider_factory: F, tx: mpsc::Sender) -> Self { Self { provider_factory, tx } } } -impl BlindedProviderFactory for WitnessBlindedProviderFactory +impl TrieNodeProviderFactory for WitnessTrieNodeProviderFactory where - F: BlindedProviderFactory, - F::AccountNodeProvider: BlindedProvider, - F::StorageNodeProvider: BlindedProvider, + F: TrieNodeProviderFactory, + F::AccountNodeProvider: TrieNodeProvider, + F::StorageNodeProvider: TrieNodeProvider, { - type AccountNodeProvider = WitnessBlindedProvider; - type StorageNodeProvider = WitnessBlindedProvider; + type AccountNodeProvider = WitnessTrieNodeProvider; + type StorageNodeProvider = WitnessTrieNodeProvider; fn account_node_provider(&self) -> Self::AccountNodeProvider { let provider = self.provider_factory.account_node_provider(); - WitnessBlindedProvider::new(provider, self.tx.clone()) + WitnessTrieNodeProvider::new(provider, self.tx.clone()) } fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { let provider = self.provider_factory.storage_node_provider(account); - WitnessBlindedProvider::new(provider, self.tx.clone()) + WitnessTrieNodeProvider::new(provider, self.tx.clone()) } } #[derive(Debug)] -struct WitnessBlindedProvider

{ +struct WitnessTrieNodeProvider

{ /// Proof-based blinded. provider: P, /// Sender for forwarding fetched blinded node. tx: mpsc::Sender, } -impl

WitnessBlindedProvider

{ +impl

WitnessTrieNodeProvider

{ const fn new(provider: P, tx: mpsc::Sender) -> Self { Self { provider, tx } } } -impl BlindedProvider for WitnessBlindedProvider

{ - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError> { - let maybe_node = self.provider.blinded_node(path)?; +impl TrieNodeProvider for WitnessTrieNodeProvider

{ + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { + let maybe_node = self.provider.trie_node(path)?; if let Some(node) = &maybe_node { self.tx .send(node.node.clone()) diff --git a/docs/cli/help.rs b/docs/cli/help.rs index e97d0bbfc46..e6813a483a5 100755 --- a/docs/cli/help.rs +++ b/docs/cli/help.rs @@ -5,26 +5,20 @@ edition = "2021" [dependencies] clap = { version = "4", features = ["derive"] } -pathdiff = "0.2" regex = "1" --- use clap::Parser; use regex::Regex; use std::{ borrow::Cow, - fmt, - fs::{self, File}, - io::{self, Write}, + fmt, fs, io, iter::once, path::{Path, PathBuf}, - process, process::{Command, Stdio}, str, sync::LazyLock, }; -const SECTION_START: &str = "{/* CLI_REFERENCE START */}"; -const SECTION_END: &str = "{/* CLI_REFERENCE END */"; const README: &str = r#"import Summary from './SUMMARY.mdx'; # CLI Reference @@ -124,10 +118,11 @@ fn main() -> io::Result<()> { // Generate SUMMARY.mdx. let summary: String = output .iter() - .map(|(cmd, _)| cmd_summary(None, cmd, 0)) + .map(|(cmd, _)| cmd_summary(cmd, 0)) .chain(once("\n".to_string())) .collect(); + println!("Writing SUMMARY.mdx to \"{}\"", out_dir.to_string_lossy()); write_file(&out_dir.clone().join("SUMMARY.mdx"), &summary)?; // Generate README.md. @@ -143,10 +138,7 @@ fn main() -> io::Result<()> { if args.root_summary { let root_summary: String = output .iter() - .map(|(cmd, _)| { - let root_path = pathdiff::diff_paths(&out_dir, &args.root_dir); - cmd_summary(root_path, cmd, args.root_indentation) - }) + .map(|(cmd, _)| cmd_summary(cmd, args.root_indentation)) .collect(); let path = Path::new(args.root_dir.as_str()); @@ -154,7 +146,7 @@ fn main() -> io::Result<()> { println!("Updating root summary in \"{}\"", path.to_string_lossy()); } // TODO: This is where we update the cli reference sidebar.ts - // update_root_summary(path, &root_summary)?; + update_root_summary(path, &root_summary)?; } Ok(()) @@ -244,47 +236,20 @@ fn parse_description(s: &str) -> (&str, &str) { } /// Returns the summary for a command and its subcommands. -fn cmd_summary(md_root: Option, cmd: &Cmd, indent: usize) -> String { +fn cmd_summary(cmd: &Cmd, indent: usize) -> String { let cmd_s = cmd.to_string(); let cmd_path = cmd_s.replace(" ", "/"); - let full_cmd_path = match md_root { - None => cmd_path, - Some(md_root) => format!("{}/{}", md_root.to_string_lossy(), cmd_path), - }; let indent_string = " ".repeat(indent + (cmd.subcommands.len() * 2)); - format!("{}- [`{}`](/cli/{})\n", indent_string, cmd_s, full_cmd_path) + format!("{}- [`{}`](/cli/{})\n", indent_string, cmd_s, cmd_path) } -/// Replaces the CLI_REFERENCE section in the root SUMMARY.mdx file. +/// Overwrites the root SUMMARY.mdx file with the generated content. fn update_root_summary(root_dir: &Path, root_summary: &str) -> io::Result<()> { - let summary_file = root_dir.join("SUMMARY.mdx"); - let original_summary_content = fs::read_to_string(&summary_file)?; - - let section_re = regex!(&format!(r"(?s)\s*{SECTION_START}.*?{SECTION_END}")); - if !section_re.is_match(&original_summary_content) { - eprintln!( - "Could not find CLI_REFERENCE section in {}. Please add the following section to the file:\n{}\n... CLI Reference goes here ...\n\n{}", - summary_file.display(), - SECTION_START, - SECTION_END - ); - process::exit(1); - } - - let section_end_re = regex!(&format!(r".*{SECTION_END}")); - let last_line = section_end_re - .find(&original_summary_content) - .map(|m| m.as_str().to_string()) - .expect("Could not extract last line of CLI_REFERENCE section"); - - let root_summary_s = root_summary.trim_end().replace("\n\n", "\n"); - let replace_with = format!(" {}\n{}\n{}", SECTION_START, root_summary_s, last_line); - - let new_root_summary = - section_re.replace(&original_summary_content, replace_with.as_str()).to_string(); + let summary_file = root_dir.join("vocs/docs/pages/cli/SUMMARY.mdx"); + println!("Overwriting {}", summary_file.display()); - let mut root_summary_file = File::create(&summary_file)?; - root_summary_file.write_all(new_root_summary.as_bytes()) + // Simply write the root summary content to the file + write_file(&summary_file, root_summary) } /// Preprocesses the help output of a command. @@ -309,6 +274,16 @@ fn preprocess_help(s: &str) -> Cow<'_, str> { r"(rpc.max-tracing-requests \n.*\n.*\n.*\n.*\n.*)\[default: \d+\]", r"$1[default: ]", ), + // Handle engine.max-proof-task-concurrency dynamic default + ( + r"(engine\.max-proof-task-concurrency.*)\[default: \d+\]", + r"$1[default: ]", + ), + // Handle engine.reserved-cpu-cores dynamic default + ( + r"(engine\.reserved-cpu-cores.*)\[default: \d+\]", + r"$1[default: ]", + ), ]; patterns .iter() diff --git a/docs/crates/eth-wire.md b/docs/crates/eth-wire.md index 1b4ba2d80e3..ece625764cb 100644 --- a/docs/crates/eth-wire.md +++ b/docs/crates/eth-wire.md @@ -10,7 +10,7 @@ This crate can be thought of as having 2 components: (Note that ECIES is implemented in a separate `reth-ecies` crate.) ## Types -The most basic Eth-wire type is an `ProtocolMessage`. It describes all messages that reth can send/receive. +The most basic Eth-wire type is a `ProtocolMessage`. It describes all messages that reth can send/receive. [File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/message.rs) ```rust, ignore @@ -78,7 +78,7 @@ In reth all [RLP](https://ethereum.org/en/developers/docs/data-structures-and-en Note that the `ProtocolMessage` itself implements these traits, so any stream of bytes can be converted into it by calling `ProtocolMessage::decode()` and vice versa with `ProtocolMessage::encode()`. The message type is determined by the first byte of the byte stream. ### Example: The Transactions message -Let's understand how an `EthMessage` is implemented by taking a look at the `Transactions` Message. The eth specification describes a Transaction message as a list of RLP encoded transactions: +Let's understand how an `EthMessage` is implemented by taking a look at the `Transactions` Message. The eth specification describes a Transaction message as a list of RLP-encoded transactions: [File: ethereum/devp2p/caps/eth.md](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#transactions-0x02) ``` @@ -138,7 +138,7 @@ Now that we know how the types work, let's take a look at how these are utilized ## P2PStream The lowest level stream to communicate with other peers is the P2P stream. It takes an underlying Tokio stream and does the following: -- Tracks and Manages Ping and pong messages and sends them when needed. +- Tracks and Manages Ping and Pong messages and sends them when needed. - Keeps track of the SharedCapabilities between the reth node and its peers. - Receives bytes from peers, decompresses and forwards them to its parent stream. - Receives bytes from its parent stream, compresses them and sends it to peers. @@ -161,7 +161,7 @@ pub struct P2PStream { } ``` ### Pinger -To manage pinging, an instance of the `Pinger` struct is used. This is a state machine which keeps track of how many pings +To manage pinging, an instance of the `Pinger` struct is used. This is a state machine that keeps track of how many pings we have sent/received and the timeouts associated with them. [File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/pinger.rs) @@ -218,7 +218,7 @@ pub(crate) fn poll_ping( ``` ### Sending and receiving data -To send and receive data, the P2PStream itself is a future which implements the `Stream` and `Sink` traits from the `futures` crate. +To send and receive data, the P2PStream itself is a future that implements the `Stream` and `Sink` traits from the `futures` crate. For the `Stream` trait, the `inner` stream is polled, decompressed and returned. Most of the code is just error handling and is omitted here for clarity. diff --git a/docs/crates/network.md b/docs/crates/network.md index 15c9c2494f5..9aa112b17ef 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -215,7 +215,7 @@ pub struct NetworkManager { /// Sender half to send events to the /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler) task, if configured. to_eth_request_handler: Option>, - /// Tracks the number of active session (connected peers). + /// Tracks the number of active sessions (connected peers). /// /// This is updated via internal events and shared via `Arc` with the [`NetworkHandle`] /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. @@ -400,7 +400,7 @@ pub struct BodiesDownloader { } ``` -Here, similarly, a `FetchClient` is passed in to the `client` field, and the `get_block_bodies` method it implements is used when constructing the stream created by the `BodiesDownloader` in the `execute` method of the `BodyStage`. +Here, similarly, a `FetchClient` is passed into the `client` field, and the `get_block_bodies` method it implements is used when constructing the stream created by the `BodiesDownloader` in the `execute` method of the `BodyStage`. [File: crates/net/downloaders/src/bodies/bodies.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/downloaders/src/bodies/bodies.rs) ```rust,ignore @@ -657,9 +657,9 @@ pub struct TransactionsManager { pool: Pool, /// Network access. network: NetworkHandle, - /// Subscriptions to all network related events. + /// Subscriptions to all network-related events. /// - /// From which we get all new incoming transaction related messages. + /// From which we get all new incoming transaction-related messages. network_events: UnboundedReceiverStream, /// All currently active requests for pooled transactions. inflight_requests: Vec, @@ -696,7 +696,7 @@ pub struct TransactionsHandle { ### Input Streams to the Transactions Task We'll touch on most of the fields in the `TransactionsManager` as the chapter continues, but some worth noting now are the 4 streams from which inputs to the task are fed: -- `transaction_events`: A listener for `NetworkTransactionEvent`s sent from the `NetworkManager`, which consist solely of events related to transactions emitted by the network. +- `transaction_events`: A listener for `NetworkTransactionEvent`s sent from the `NetworkManager`, which consists solely of events related to transactions emitted by the network. - `network_events`: A listener for `NetworkEvent`s sent from the `NetworkManager`, which consist of other "meta" events such as sessions with peers being established or closed. - `command_rx`: A listener for `TransactionsCommand`s sent from the `TransactionsHandle` - `pending`: A listener for new pending transactions added to the `TransactionPool` @@ -1121,7 +1121,7 @@ It iterates over `TransactionsManager.pool_imports`, polling each one, and if it `on_good_import`, called when the transaction was successfully imported into the transaction pool, removes the entry for the given transaction hash from `TransactionsManager.transactions_by_peers`. -`on_bad_import` also removes the entry for the given transaction hash from `TransactionsManager.transactions_by_peers`, but also calls `report_bad_message` for each peer in the entry, decreasing all of their reputation scores as they were propagating a transaction that could not validated. +`on_bad_import` also removes the entry for the given transaction hash from `TransactionsManager.transactions_by_peers`, but also calls `report_bad_message` for each peer in the entry, decreasing all of their reputation scores as they were propagating a transaction that could not be validated. #### Checking on `pending_transactions` diff --git a/docs/crates/stages.md b/docs/crates/stages.md index cfa2d5012d5..a6f107c2c0b 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -1,6 +1,6 @@ # Stages -The `stages` lib plays a central role in syncing the node, maintaining state, updating the database and more. The stages involved in the Reth pipeline are the `HeaderStage`, `BodyStage`, `SenderRecoveryStage`, and `ExecutionStage` (note that this list is non-exhaustive, and more pipeline stages will be added in the near future). Each of these stages are queued up and stored within the Reth pipeline. +The `stages` lib plays a central role in syncing the node, maintaining state, updating the database and more. The stages involved in the Reth pipeline are the `HeaderStage`, `BodyStage`, `SenderRecoveryStage`, and `ExecutionStage` (note that this list is non-exhaustive, and more pipeline stages will be added in the near future). Each of these stages is queued up and stored within the Reth pipeline. When the node is first started, a new `Pipeline` is initialized and all of the stages are added into `Pipeline.stages`. Then, the `Pipeline::run` function is called, which starts the pipeline, executing all of the stages continuously in an infinite loop. This process syncs the chain, keeping everything up to date with the chain tip. @@ -36,7 +36,7 @@ The transactions root is a value that is calculated based on the transactions in When the `BodyStage` is looking at the headers to determine which block to download, it will skip the blocks where the `header.ommers_hash` and the `header.transaction_root` are empty, denoting that the block is empty as well. -Once the `BodyStage` determines which block bodies to fetch, a new `bodies_stream` is created which downloads all of the bodies from the `starting_block`, up until the `target_block` specified. Each time the `bodies_stream` yields a value, a `SealedBlock` is created using the block header, the ommers hash and the newly downloaded block body. +Once the `BodyStage` determines which block bodies to fetch, a new `bodies_stream` is created which downloads all of the bodies from the `starting_block`, up until the `target_block` is specified. Each time the `bodies_stream` yields a value, a `SealedBlock` is created using the block header, the ommers hash and the newly downloaded block body. The new block is then pre-validated, checking that the ommers hash and transactions root in the block header are the same in the block body. Following a successful pre-validation, the `BodyStage` loops through each transaction in the `block.body`, adding the transaction to the database. This process is repeated for every downloaded block body, with the `BodyStage` returning `Ok(ExecOutput { stage_progress, done: true })` signaling it successfully completed. @@ -108,7 +108,7 @@ The `IndexAccountHistoryStage` builds indices for account history, tracking how ## FinishStage -The `FinishStage` is the final stage in the pipeline that performs cleanup and verification tasks. It ensures that all previous stages have completed successfully and that the node's state is consistent. This stage may also update various metrics and status indicators to reflect the completion of a sync cycle. +The `FinishStage` is the final stage in the pipeline that performs cleanup and verification tasks. It ensures that all previous stages have been completed successfully and that the node's state is consistent. This stage may also update various metrics and status indicators to reflect the completion of a sync cycle.
diff --git a/docs/design/codecs.md b/docs/design/codecs.md deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/docs/design/database.md b/docs/design/database.md index d81aced6f0c..381136d7bf0 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -2,15 +2,15 @@ ## Abstractions -- We created a [Database trait abstraction](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/mod.rs) using Rust Stable GATs which frees us from being bound to a single database implementation. We currently use MDBX, but are exploring [redb](https://github.com/cberner/redb) as an alternative. -- We then iterated on [`Transaction`](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/stages/src/db.rs#L14-L19) as a non-leaky abstraction with helpers for strictly-typed and unit-tested higher-level database abstractions. +- We created a [Database trait abstraction](https://github.com/paradigmxyz/reth/blob/main/crates/cli/commands/src/db/mod.rs) using Rust Stable GATs which frees us from being bound to a single database implementation. We currently use MDBX, but are exploring [redb](https://github.com/cberner/redb) as an alternative. +- We then iterated on [`Transaction`](https://github.com/paradigmxyz/reth/blob/main/crates/storage/errors/src/db.rs) as a non-leaky abstraction with helpers for strictly-typed and unit-tested higher-level database abstractions. ## Codecs - We want Reth's serialized format to be able to trade off read/write speed for size, depending on who the user is. -- To achieve that, we created the [Encode/Decode/Compress/Decompress traits](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/table.rs#L9-L36) to make the (de)serialization of database `Table::Key` and `Table::Values` generic. - - This allows for [out-of-the-box benchmarking](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/db/benches/encoding_iai.rs#L5) (using [Criterion](https://github.com/bheisler/criterion.rs)) - - It also enables [out-of-the-box fuzzing](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/codecs/fuzz/mod.rs) using [trailofbits/test-fuzz](https://github.com/trailofbits/test-fuzz). +- To achieve that, we created the [Encode/Decode/Compress/Decompress traits](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/table.rs) to make the (de)serialization of database `Table::Key` and `Table::Values` generic. + - This allows for [out-of-the-box benchmarking](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/benches/criterion.rs) (using [Criterion](https://github.com/bheisler/criterion.rs)) + - It also enables [out-of-the-box fuzzing](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/tables/codecs/fuzz/mod.rs) using [trailofbits/test-fuzz](https://github.com/trailofbits/test-fuzz). - We implemented that trait for the following encoding formats: - [Ethereum-specific Compact Encoding](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/compact/mod.rs): A lot of Ethereum datatypes have unnecessary zeros when serialized, or optional (e.g. on empty hashes) which would be nice not to pay in storage costs. - [Erigon](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) achieves that by having a `bitfield` set on Table "PlainState which adds a bitfield to Accounts. @@ -19,11 +19,11 @@ - [Scale Encoding](https://github.com/paritytech/parity-scale-codec) - [Postcard Encoding](https://github.com/jamesmunns/postcard) - Passthrough (called `no_codec` in the codebase) -- We made implementation of these traits easy via a derive macro called [`reth_codec`](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/lib.rs#L15) that delegates to one of Compact (default), Scale, Postcard or Passthrough encoding. This is [derived on every struct we need](https://github.com/search?q=repo%3Aparadigmxyz%2Freth%20%22%23%5Breth_codec%5D%22&type=code), and lets us experiment with different encoding formats without having to modify the entire codebase each time. +- We made implementation of these traits easy via a derive macro called [`reth_codec`](https://github.com/paradigmxyz/reth/blob/main/crates/storage/codecs/derive/src/lib.rs) that delegates to one of Compact (default), Scale, Postcard or Passthrough encoding. This is [derived on every struct we need](https://github.com/search?q=repo%3Aparadigmxyz%2Freth%20%22%23%5Breth_codec%5D%22&type=code), and lets us experiment with different encoding formats without having to modify the entire codebase each time. ### Table layout -Historical state changes are indexed by `BlockNumber`. This means that `reth` stores the state for every account after every block that touched it, and it provides indexes for accessing that data quickly. While this may make the database size bigger (needs benchmark once `reth` is closer to prod), it provides fast access to historical state. +Historical state changes are indexed by `BlockNumber`. This means that `reth` stores the state for every account after every block that touched it, and it provides indexes for accessing that data quickly. While this may make the database size bigger (needs benchmark once `reth` is closer to prod), it provides fast access to the historical state. Below, you can see the table design that implements this scheme: diff --git a/docs/design/goals.md b/docs/design/goals.md index a29b3a824c4..6edfb1282c7 100644 --- a/docs/design/goals.md +++ b/docs/design/goals.md @@ -44,7 +44,7 @@ Ideally, we can achieve such fast runtime operation that we can avoid storing ce **Control over tradeoffs** -Almost any given design choice or optimization to the client comes with its own tradeoffs. As such, our long-term goal is not to make opinionated decisions on behalf of everyone, as some users will be negatively impacted and turned away from what could be a great client. +Almost any given design choice or optimization for the client comes with its own tradeoffs. As such, our long-term goal is not to make opinionated decisions on behalf of everyone, as some users will be negatively impacted and turned away from what could be a great client. **Profiles** @@ -80,4 +80,4 @@ It goes without saying that verbose and thorough documentation is a must. The do **Issue tracking** -Everything that is (and is not) being worked on within the client should be tracked accordingly so that anyone in the community can stay on top of the state of development. This makes it clear what kind of help is needed, and where. \ No newline at end of file +Everything that is (and is not) being worked on within the client should be tracked accordingly so that anyone in the community can stay on top of the state of development. This makes it clear what kind of help is needed, and where. diff --git a/docs/design/headers-downloader.md b/docs/design/headers-downloader.md index 8b160265a2b..c31aeefc249 100644 --- a/docs/design/headers-downloader.md +++ b/docs/design/headers-downloader.md @@ -6,6 +6,6 @@ * First, we implemented the reverse linear download. It received the current chain tip and local head as arguments and requested blocks in batches starting from the tip, and retried on request failure. See [`reth#58`](https://github.com/paradigmxyz/reth/pull/58) and [`reth#119`](https://github.com/paradigmxyz/reth/pull/119). * The first complete implementation of the headers stage was introduced in [`reth#126`](https://github.com/paradigmxyz/reth/pull/126). The stage looked up the local head & queried the consensus for the chain tip and queried the downloader passing them as arguments. After the download finished, the stage would proceed to insert headers in the ascending order by appending the entries to the corresponding tables. * The original downloader was refactored in [`reth#249`](https://github.com/paradigmxyz/reth/pull/249) to return a `Future` which would resolve when either the download is completed or the error occurred during polling. This future kept a pointer to a current request at any time, allowing to retry the request in case of failure. The insert logic of the headers stage remained unchanged. - * NOTE: Up to this point the headers stage awaited full range of blocks (from local head to tip) to be downloaded before proceeding to insert. -* [`reth#296`](https://github.com/paradigmxyz/reth/pull/296) introduced the `Stream` implementation of the download as well as the commit threshold for the headers stage. The `Stream` implementation yields headers as soon as they are received and validated. It dispatches the request for the next header batch until the head is reached. The headers stage now has a configurable commit threshold which allows configuring the insert batch size. With this change, the headers stage no longer waits for the download to be complete, but rather collects the headers from the stream up to the commit threshold parameter. After collecting, the stage proceeds to insert the batch. The process is repeated until the stream is drained. At this point, we populated all tables except for HeadersTD since it has to be computed in a linear ascending order. The stage starts walking the populated headers table and computes & inserts new total difficulty values. -* This header implementation is unique because it is implemented as a Stream, it yields headers as soon as they become available (contrary to waiting for download to complete) and it keeps only one header in buffer (required to form the next header request) . + * NOTE: Up to this point the headers stage awaited the full range of blocks (from local head to tip) to be downloaded before proceeding to insert. +* [`reth#296`](https://github.com/paradigmxyz/reth/pull/296) introduced the `Stream` implementation of the download as well as the commit threshold for the headers stage. The `Stream` implementation yields headers as soon as they are received and validated. It dispatches the request for the next header batch until the head is reached. The headers stage now has a configurable commit threshold which allows configuring the insert batch size. With this change, the headers stage no longer waits for the download to be complete, but rather collects the headers from the stream up to the commit threshold parameter. After collecting, the stage proceeds to insert the batch. The process is repeated until the stream is drained. At this point, we populated all tables except for HeadersTD since it has to be computed in a linear ascending order. The stage starts walking through the populated headers table and computes & inserts new total difficulty values. +* This header implementation is unique because it is implemented as a Stream, it yields headers as soon as they become available (contrary to waiting for download to complete), and it keeps only one header in buffer (required to form the next header request) . diff --git a/docs/design/metrics.md b/docs/design/metrics.md index a769f9d625f..1aeb2f37c1e 100644 --- a/docs/design/metrics.md +++ b/docs/design/metrics.md @@ -13,7 +13,7 @@ The main difference between metrics and traces is therefore that metrics are sys **For most things, you likely want a metric**, except for two scenarios: - For contributors, traces are a good profiling tool -- For end-users that run complicated infrastructure, traces in the RPC component makes sense +- For end-users who run complicated infrastructure, traces in the RPC component make sense ### How to add a metric diff --git a/docs/design/review.md b/docs/design/review.md index 702ab7722f8..22a32ef904f 100644 --- a/docs/design/review.md +++ b/docs/design/review.md @@ -1,6 +1,6 @@ # Review of other codebases -This document contains some of our research in how other codebases designed various parts of their stack. +This document contains some of our research on how other codebases designed various parts of their stack. ## P2P @@ -18,7 +18,7 @@ This document contains some of our research in how other codebases designed vari ## Database -* [Erigon's DB walkthrough](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) contains an overview. They made the most noticeable improvements on storage reduction. +* [Erigon's DB walkthrough](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) contains an overview. They made the most noticeable improvements in storage reduction. * [Gio's erigon-db table macros](https://github.com/gio256/erigon-db) + [Akula's macros](https://github.com/akula-bft/akula/blob/74b172ee1d2d2a4f04ce057b5a76679c1b83df9c/src/kv/tables.rs#L61). ## Header Downloaders diff --git a/docs/repo/labels.md b/docs/repo/labels.md index 6772b828ffc..2c830194415 100644 --- a/docs/repo/labels.md +++ b/docs/repo/labels.md @@ -4,7 +4,7 @@ Each label in the repository has a description attached that describes what the There are 7 label categories in the repository: -- **Area labels**: These labels denote the general area of the project an issue or PR affects. These start with [`A-`][area]. +- **Area labels**: These labels denote the general area of the project that an issue or PR affects. These start with [`A-`][area]. - **Category labels**: These labels denote the type of issue or change being made, for example https://github.com/paradigmxyz/reth/labels/C-bug or https://github.com/paradigmxyz/reth/labels/C-enhancement. These start with [`C-`][category]. - **Difficulty labels**: These are reserved for the very easy or very hard issues. Any issue without one of these labels can be considered to be of "average difficulty". They start with [`D-`][difficulty]. - **Meta labels**: These start with [`M-`][meta] and convey meaning to the core contributors, usually about the release process. diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 8626d264432..22aae4c3512 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -29,7 +29,7 @@ The supporting crates are split into two categories: [primitives](#primitives) a ### Documentation -Contributor documentation is in [`docs`](../../docs) and end-user documentation is in [`book`](../../book). +Contributor documentation is in [`docs`](../../docs). ### Binaries diff --git a/docs/vocs/bun.lock b/docs/vocs/bun.lock new file mode 100644 index 00000000000..4203e94aa62 --- /dev/null +++ b/docs/vocs/bun.lock @@ -0,0 +1,1542 @@ +{ + "lockfileVersion": 1, + "workspaces": { + "": { + "name": "vocs", + "dependencies": { + "react": "^19.1.0", + "react-dom": "^19.1.0", + "vocs": "^1.0.13", + }, + "devDependencies": { + "@types/node": "^24.0.14", + "@types/react": "^19.1.8", + "glob": "^11.0.3", + "typescript": "^5.8.3", + }, + }, + }, + "packages": { + "@ampproject/remapping": ["@ampproject/remapping@2.3.0", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw=="], + + "@antfu/install-pkg": ["@antfu/install-pkg@1.1.0", "", { "dependencies": { "package-manager-detector": "^1.3.0", "tinyexec": "^1.0.1" } }, "sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ=="], + + "@antfu/utils": ["@antfu/utils@8.1.1", "", {}, "sha512-Mex9nXf9vR6AhcXmMrlz/HVgYYZpVGJ6YlPgwl7UnaFpnshXs6EK/oa5Gpf3CzENMjkvEx2tQtntGnb7UtSTOQ=="], + + "@babel/code-frame": ["@babel/code-frame@7.27.1", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.27.1", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg=="], + + "@babel/compat-data": ["@babel/compat-data@7.28.0", "", {}, "sha512-60X7qkglvrap8mn1lh2ebxXdZYtUcpd7gsmy9kLaBJ4i/WdY8PqTSdxyA8qraikqKQK5C1KRBKXqznrVapyNaw=="], + + "@babel/core": ["@babel/core@7.28.0", "", { "dependencies": { "@ampproject/remapping": "^2.2.0", "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.0", "@babel/helper-compilation-targets": "^7.27.2", "@babel/helper-module-transforms": "^7.27.3", "@babel/helpers": "^7.27.6", "@babel/parser": "^7.28.0", "@babel/template": "^7.27.2", "@babel/traverse": "^7.28.0", "@babel/types": "^7.28.0", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-UlLAnTPrFdNGoFtbSXwcGFQBtQZJCNjaN6hQNP3UPvuNXT1i82N26KL3dZeIpNalWywr9IuQuncaAfUaS1g6sQ=="], + + "@babel/generator": ["@babel/generator@7.28.0", "", { "dependencies": { "@babel/parser": "^7.28.0", "@babel/types": "^7.28.0", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-lJjzvrbEeWrhB4P3QBsH7tey117PjLZnDbLiQEKjQ/fNJTjuq4HSqgFA+UNSwZT8D7dxxbnuSBMsa1lrWzKlQg=="], + + "@babel/helper-compilation-targets": ["@babel/helper-compilation-targets@7.27.2", "", { "dependencies": { "@babel/compat-data": "^7.27.2", "@babel/helper-validator-option": "^7.27.1", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" } }, "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ=="], + + "@babel/helper-globals": ["@babel/helper-globals@7.28.0", "", {}, "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw=="], + + "@babel/helper-module-imports": ["@babel/helper-module-imports@7.27.1", "", { "dependencies": { "@babel/traverse": "^7.27.1", "@babel/types": "^7.27.1" } }, "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w=="], + + "@babel/helper-module-transforms": ["@babel/helper-module-transforms@7.27.3", "", { "dependencies": { "@babel/helper-module-imports": "^7.27.1", "@babel/helper-validator-identifier": "^7.27.1", "@babel/traverse": "^7.27.3" }, "peerDependencies": { "@babel/core": "^7.0.0" } }, "sha512-dSOvYwvyLsWBeIRyOeHXp5vPj5l1I011r52FM1+r1jCERv+aFXYk4whgQccYEGYxK2H3ZAIA8nuPkQ0HaUo3qg=="], + + "@babel/helper-plugin-utils": ["@babel/helper-plugin-utils@7.27.1", "", {}, "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw=="], + + "@babel/helper-string-parser": ["@babel/helper-string-parser@7.27.1", "", {}, "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA=="], + + "@babel/helper-validator-identifier": ["@babel/helper-validator-identifier@7.27.1", "", {}, "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow=="], + + "@babel/helper-validator-option": ["@babel/helper-validator-option@7.27.1", "", {}, "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg=="], + + "@babel/helpers": ["@babel/helpers@7.27.6", "", { "dependencies": { "@babel/template": "^7.27.2", "@babel/types": "^7.27.6" } }, "sha512-muE8Tt8M22638HU31A3CgfSUciwz1fhATfoVai05aPXGor//CdWDCbnlY1yvBPo07njuVOCNGCSp/GTt12lIug=="], + + "@babel/parser": ["@babel/parser@7.28.0", "", { "dependencies": { "@babel/types": "^7.28.0" }, "bin": "./bin/babel-parser.js" }, "sha512-jVZGvOxOuNSsuQuLRTh13nU0AogFlw32w/MT+LV6D3sP5WdbW61E77RnkbaO2dUvmPAYrBDJXGn5gGS6tH4j8g=="], + + "@babel/plugin-syntax-typescript": ["@babel/plugin-syntax-typescript@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ=="], + + "@babel/plugin-transform-react-jsx-self": ["@babel/plugin-transform-react-jsx-self@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw=="], + + "@babel/plugin-transform-react-jsx-source": ["@babel/plugin-transform-react-jsx-source@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw=="], + + "@babel/runtime": ["@babel/runtime@7.27.6", "", {}, "sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q=="], + + "@babel/template": ["@babel/template@7.27.2", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/parser": "^7.27.2", "@babel/types": "^7.27.1" } }, "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw=="], + + "@babel/traverse": ["@babel/traverse@7.28.0", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.0", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.28.0", "@babel/template": "^7.27.2", "@babel/types": "^7.28.0", "debug": "^4.3.1" } }, "sha512-mGe7UK5wWyh0bKRfupsUchrQGqvDbZDbKJw+kcRGSmdHVYrv+ltd0pnpDTVpiTqnaBru9iEvA8pz8W46v0Amwg=="], + + "@babel/types": ["@babel/types@7.28.1", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.27.1" } }, "sha512-x0LvFTekgSX+83TI28Y9wYPUfzrnl2aT5+5QLnO6v7mSJYtEEevuDRN0F0uSHRk1G1IWZC43o00Y0xDDrpBGPQ=="], + + "@braintree/sanitize-url": ["@braintree/sanitize-url@7.1.1", "", {}, "sha512-i1L7noDNxtFyL5DmZafWy1wRVhGehQmzZaz1HiN5e7iylJMSZR7ekOV7NsIqa5qBldlLrsKv4HbgFUVlQrz8Mw=="], + + "@chevrotain/cst-dts-gen": ["@chevrotain/cst-dts-gen@11.0.3", "", { "dependencies": { "@chevrotain/gast": "11.0.3", "@chevrotain/types": "11.0.3", "lodash-es": "4.17.21" } }, "sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ=="], + + "@chevrotain/gast": ["@chevrotain/gast@11.0.3", "", { "dependencies": { "@chevrotain/types": "11.0.3", "lodash-es": "4.17.21" } }, "sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q=="], + + "@chevrotain/regexp-to-ast": ["@chevrotain/regexp-to-ast@11.0.3", "", {}, "sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA=="], + + "@chevrotain/types": ["@chevrotain/types@11.0.3", "", {}, "sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ=="], + + "@chevrotain/utils": ["@chevrotain/utils@11.0.3", "", {}, "sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ=="], + + "@clack/core": ["@clack/core@0.3.5", "", { "dependencies": { "picocolors": "^1.0.0", "sisteransi": "^1.0.5" } }, "sha512-5cfhQNH+1VQ2xLQlmzXMqUoiaH0lRBq9/CLW9lTyMbuKLC3+xEK01tHVvyut++mLOn5urSHmkm6I0Lg9MaJSTQ=="], + + "@clack/prompts": ["@clack/prompts@0.7.0", "", { "dependencies": { "@clack/core": "^0.3.3", "is-unicode-supported": "*", "picocolors": "^1.0.0", "sisteransi": "^1.0.5" } }, "sha512-0MhX9/B4iL6Re04jPrttDm+BsP8y6mS7byuv0BvXgdXhbV5PdlsHt55dvNsuBCPZ7xq1oTAOOuotR9NFbQyMSA=="], + + "@emotion/hash": ["@emotion/hash@0.9.2", "", {}, "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g=="], + + "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.25.6", "", { "os": "aix", "cpu": "ppc64" }, "sha512-ShbM/3XxwuxjFiuVBHA+d3j5dyac0aEVVq1oluIDf71hUw0aRF59dV/efUsIwFnR6m8JNM2FjZOzmaZ8yG61kw=="], + + "@esbuild/android-arm": ["@esbuild/android-arm@0.25.6", "", { "os": "android", "cpu": "arm" }, "sha512-S8ToEOVfg++AU/bHwdksHNnyLyVM+eMVAOf6yRKFitnwnbwwPNqKr3srzFRe7nzV69RQKb5DgchIX5pt3L53xg=="], + + "@esbuild/android-arm64": ["@esbuild/android-arm64@0.25.6", "", { "os": "android", "cpu": "arm64" }, "sha512-hd5zdUarsK6strW+3Wxi5qWws+rJhCCbMiC9QZyzoxfk5uHRIE8T287giQxzVpEvCwuJ9Qjg6bEjcRJcgfLqoA=="], + + "@esbuild/android-x64": ["@esbuild/android-x64@0.25.6", "", { "os": "android", "cpu": "x64" }, "sha512-0Z7KpHSr3VBIO9A/1wcT3NTy7EB4oNC4upJ5ye3R7taCc2GUdeynSLArnon5G8scPwaU866d3H4BCrE5xLW25A=="], + + "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.25.6", "", { "os": "darwin", "cpu": "arm64" }, "sha512-FFCssz3XBavjxcFxKsGy2DYK5VSvJqa6y5HXljKzhRZ87LvEi13brPrf/wdyl/BbpbMKJNOr1Sd0jtW4Ge1pAA=="], + + "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.25.6", "", { "os": "darwin", "cpu": "x64" }, "sha512-GfXs5kry/TkGM2vKqK2oyiLFygJRqKVhawu3+DOCk7OxLy/6jYkWXhlHwOoTb0WqGnWGAS7sooxbZowy+pK9Yg=="], + + "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.25.6", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-aoLF2c3OvDn2XDTRvn8hN6DRzVVpDlj2B/F66clWd/FHLiHaG3aVZjxQX2DYphA5y/evbdGvC6Us13tvyt4pWg=="], + + "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.25.6", "", { "os": "freebsd", "cpu": "x64" }, "sha512-2SkqTjTSo2dYi/jzFbU9Plt1vk0+nNg8YC8rOXXea+iA3hfNJWebKYPs3xnOUf9+ZWhKAaxnQNUf2X9LOpeiMQ=="], + + "@esbuild/linux-arm": ["@esbuild/linux-arm@0.25.6", "", { "os": "linux", "cpu": "arm" }, "sha512-SZHQlzvqv4Du5PrKE2faN0qlbsaW/3QQfUUc6yO2EjFcA83xnwm91UbEEVx4ApZ9Z5oG8Bxz4qPE+HFwtVcfyw=="], + + "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.25.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-b967hU0gqKd9Drsh/UuAm21Khpoh6mPBSgz8mKRq4P5mVK8bpA+hQzmm/ZwGVULSNBzKdZPQBRT3+WuVavcWsQ=="], + + "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.25.6", "", { "os": "linux", "cpu": "ia32" }, "sha512-aHWdQ2AAltRkLPOsKdi3xv0mZ8fUGPdlKEjIEhxCPm5yKEThcUjHpWB1idN74lfXGnZ5SULQSgtr5Qos5B0bPw=="], + + "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.25.6", "", { "os": "linux", "cpu": "none" }, "sha512-VgKCsHdXRSQ7E1+QXGdRPlQ/e08bN6WMQb27/TMfV+vPjjTImuT9PmLXupRlC90S1JeNNW5lzkAEO/McKeJ2yg=="], + + "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.25.6", "", { "os": "linux", "cpu": "none" }, "sha512-WViNlpivRKT9/py3kCmkHnn44GkGXVdXfdc4drNmRl15zVQ2+D2uFwdlGh6IuK5AAnGTo2qPB1Djppj+t78rzw=="], + + "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.25.6", "", { "os": "linux", "cpu": "ppc64" }, "sha512-wyYKZ9NTdmAMb5730I38lBqVu6cKl4ZfYXIs31Baf8aoOtB4xSGi3THmDYt4BTFHk7/EcVixkOV2uZfwU3Q2Jw=="], + + "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.25.6", "", { "os": "linux", "cpu": "none" }, "sha512-KZh7bAGGcrinEj4qzilJ4hqTY3Dg2U82c8bv+e1xqNqZCrCyc+TL9AUEn5WGKDzm3CfC5RODE/qc96OcbIe33w=="], + + "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.25.6", "", { "os": "linux", "cpu": "s390x" }, "sha512-9N1LsTwAuE9oj6lHMyyAM+ucxGiVnEqUdp4v7IaMmrwb06ZTEVCIs3oPPplVsnjPfyjmxwHxHMF8b6vzUVAUGw=="], + + "@esbuild/linux-x64": ["@esbuild/linux-x64@0.25.6", "", { "os": "linux", "cpu": "x64" }, "sha512-A6bJB41b4lKFWRKNrWoP2LHsjVzNiaurf7wyj/XtFNTsnPuxwEBWHLty+ZE0dWBKuSK1fvKgrKaNjBS7qbFKig=="], + + "@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.25.6", "", { "os": "none", "cpu": "arm64" }, "sha512-IjA+DcwoVpjEvyxZddDqBY+uJ2Snc6duLpjmkXm/v4xuS3H+3FkLZlDm9ZsAbF9rsfP3zeA0/ArNDORZgrxR/Q=="], + + "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.25.6", "", { "os": "none", "cpu": "x64" }, "sha512-dUXuZr5WenIDlMHdMkvDc1FAu4xdWixTCRgP7RQLBOkkGgwuuzaGSYcOpW4jFxzpzL1ejb8yF620UxAqnBrR9g=="], + + "@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.25.6", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-l8ZCvXP0tbTJ3iaqdNf3pjaOSd5ex/e6/omLIQCVBLmHTlfXW3zAxQ4fnDmPLOB1x9xrcSi/xtCWFwCZRIaEwg=="], + + "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.25.6", "", { "os": "openbsd", "cpu": "x64" }, "sha512-hKrmDa0aOFOr71KQ/19JC7az1P0GWtCN1t2ahYAf4O007DHZt/dW8ym5+CUdJhQ/qkZmI1HAF8KkJbEFtCL7gw=="], + + "@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.25.6", "", { "os": "none", "cpu": "arm64" }, "sha512-+SqBcAWoB1fYKmpWoQP4pGtx+pUUC//RNYhFdbcSA16617cchuryuhOCRpPsjCblKukAckWsV+aQ3UKT/RMPcA=="], + + "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.25.6", "", { "os": "sunos", "cpu": "x64" }, "sha512-dyCGxv1/Br7MiSC42qinGL8KkG4kX0pEsdb0+TKhmJZgCUDBGmyo1/ArCjNGiOLiIAgdbWgmWgib4HoCi5t7kA=="], + + "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.25.6", "", { "os": "win32", "cpu": "arm64" }, "sha512-42QOgcZeZOvXfsCBJF5Afw73t4veOId//XD3i+/9gSkhSV6Gk3VPlWncctI+JcOyERv85FUo7RxuxGy+z8A43Q=="], + + "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.25.6", "", { "os": "win32", "cpu": "ia32" }, "sha512-4AWhgXmDuYN7rJI6ORB+uU9DHLq/erBbuMoAuB4VWJTu5KtCgcKYPynF0YI1VkBNuEfjNlLrFr9KZPJzrtLkrQ=="], + + "@esbuild/win32-x64": ["@esbuild/win32-x64@0.25.6", "", { "os": "win32", "cpu": "x64" }, "sha512-NgJPHHbEpLQgDH2MjQu90pzW/5vvXIZ7KOnPyNBm92A6WgZ/7b6fJyUBjoumLqeOQQGqY2QjQxRo97ah4Sj0cA=="], + + "@floating-ui/core": ["@floating-ui/core@1.7.2", "", { "dependencies": { "@floating-ui/utils": "^0.2.10" } }, "sha512-wNB5ooIKHQc+Kui96jE/n69rHFWAVoxn5CAzL1Xdd8FG03cgY3MLO+GF9U3W737fYDSgPWA6MReKhBQBop6Pcw=="], + + "@floating-ui/dom": ["@floating-ui/dom@1.7.2", "", { "dependencies": { "@floating-ui/core": "^1.7.2", "@floating-ui/utils": "^0.2.10" } }, "sha512-7cfaOQuCS27HD7DX+6ib2OrnW+b4ZBwDNnCcT0uTyidcmyWb03FnQqJybDBoCnpdxwBSfA94UAYlRCt7mV+TbA=="], + + "@floating-ui/react": ["@floating-ui/react@0.27.13", "", { "dependencies": { "@floating-ui/react-dom": "^2.1.4", "@floating-ui/utils": "^0.2.10", "tabbable": "^6.0.0" }, "peerDependencies": { "react": ">=17.0.0", "react-dom": ">=17.0.0" } }, "sha512-Qmj6t9TjgWAvbygNEu1hj4dbHI9CY0ziCMIJrmYoDIn9TUAH5lRmiIeZmRd4c6QEZkzdoH7jNnoNyoY1AIESiA=="], + + "@floating-ui/react-dom": ["@floating-ui/react-dom@2.1.4", "", { "dependencies": { "@floating-ui/dom": "^1.7.2" }, "peerDependencies": { "react": ">=16.8.0", "react-dom": ">=16.8.0" } }, "sha512-JbbpPhp38UmXDDAu60RJmbeme37Jbgsm7NrHGgzYYFKmblzRUh6Pa641dII6LsjwF4XlScDrde2UAzDo/b9KPw=="], + + "@floating-ui/utils": ["@floating-ui/utils@0.2.10", "", {}, "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ=="], + + "@fortawesome/fontawesome-free": ["@fortawesome/fontawesome-free@6.7.2", "", {}, "sha512-JUOtgFW6k9u4Y+xeIaEiLr3+cjoUPiAuLXoyKOJSia6Duzb7pq+A76P9ZdPDoAoxHdHzq6gE9/jKBGXlZT8FbA=="], + + "@hono/node-server": ["@hono/node-server@1.16.0", "", { "peerDependencies": { "hono": "^4" } }, "sha512-9LwRb5XOrTFapOABiQjGC50wRVlzUvWZsDHINCnkBniP+Q+LQf4waN0nzk9t+2kqcTsnGnieSmqpHsr6kH2bdw=="], + + "@iconify/types": ["@iconify/types@2.0.0", "", {}, "sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg=="], + + "@iconify/utils": ["@iconify/utils@2.3.0", "", { "dependencies": { "@antfu/install-pkg": "^1.0.0", "@antfu/utils": "^8.1.0", "@iconify/types": "^2.0.0", "debug": "^4.4.0", "globals": "^15.14.0", "kolorist": "^1.8.0", "local-pkg": "^1.0.0", "mlly": "^1.7.4" } }, "sha512-GmQ78prtwYW6EtzXRU1rY+KwOKfz32PD7iJh6Iyqw68GiKuoZ2A6pRtzWONz5VQJbp50mEjXh/7NkumtrAgRKA=="], + + "@isaacs/balanced-match": ["@isaacs/balanced-match@4.0.1", "", {}, "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ=="], + + "@isaacs/brace-expansion": ["@isaacs/brace-expansion@5.0.0", "", { "dependencies": { "@isaacs/balanced-match": "^4.0.1" } }, "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA=="], + + "@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="], + + "@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.3.12", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-OuLGC46TjB5BbN1dH8JULVVZY4WTdkF7tV9Ys6wLL1rubZnCMstOhNHueU5bLCrnRuDhKPDM4g6sw4Bel5Gzqg=="], + + "@jridgewell/resolve-uri": ["@jridgewell/resolve-uri@3.1.2", "", {}, "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw=="], + + "@jridgewell/sourcemap-codec": ["@jridgewell/sourcemap-codec@1.5.4", "", {}, "sha512-VT2+G1VQs/9oz078bLrYbecdZKs912zQlkelYpuf+SXF+QvZDYJlbx/LSx+meSAwdDFnF8FVXW92AVjjkVmgFw=="], + + "@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.29", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, "sha512-uw6guiW/gcAGPDhLmd77/6lW8QLeiV5RUTsAX46Db6oLhGaVj4lhnPwb184s1bkc8kdVg/+h988dro8GRDpmYQ=="], + + "@mdx-js/mdx": ["@mdx-js/mdx@3.1.0", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdx": "^2.0.0", "collapse-white-space": "^2.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "estree-util-scope": "^1.0.0", "estree-walker": "^3.0.0", "hast-util-to-jsx-runtime": "^2.0.0", "markdown-extensions": "^2.0.0", "recma-build-jsx": "^1.0.0", "recma-jsx": "^1.0.0", "recma-stringify": "^1.0.0", "rehype-recma": "^1.0.0", "remark-mdx": "^3.0.0", "remark-parse": "^11.0.0", "remark-rehype": "^11.0.0", "source-map": "^0.7.0", "unified": "^11.0.0", "unist-util-position-from-estree": "^2.0.0", "unist-util-stringify-position": "^4.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" } }, "sha512-/QxEhPAvGwbQmy1Px8F899L5Uc2KZ6JtXwlCgJmjSTBedwOZkByYcBG4GceIGPXRDsmfxhHazuS+hlOShRLeDw=="], + + "@mdx-js/react": ["@mdx-js/react@3.1.0", "", { "dependencies": { "@types/mdx": "^2.0.0" }, "peerDependencies": { "@types/react": ">=16", "react": ">=16" } }, "sha512-QjHtSaoameoalGnKDT3FoIl4+9RwyTmo9ZJGBdLOks/YOiWHoRDI3PUwEzOE7kEmGcV3AFcp9K6dYu9rEuKLAQ=="], + + "@mdx-js/rollup": ["@mdx-js/rollup@3.1.0", "", { "dependencies": { "@mdx-js/mdx": "^3.0.0", "@rollup/pluginutils": "^5.0.0", "source-map": "^0.7.0", "vfile": "^6.0.0" }, "peerDependencies": { "rollup": ">=2" } }, "sha512-q4xOtUXpCzeouE8GaJ8StT4rDxm/U5j6lkMHL2srb2Q3Y7cobE0aXyPzXVVlbeIMBi+5R5MpbiaVE5/vJUdnHg=="], + + "@mermaid-js/parser": ["@mermaid-js/parser@0.6.2", "", { "dependencies": { "langium": "3.3.1" } }, "sha512-+PO02uGF6L6Cs0Bw8RpGhikVvMWEysfAyl27qTlroUB8jSWr1lL0Sf6zi78ZxlSnmgSY2AMMKVgghnN9jTtwkQ=="], + + "@noble/hashes": ["@noble/hashes@1.8.0", "", {}, "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A=="], + + "@nodelib/fs.scandir": ["@nodelib/fs.scandir@2.1.5", "", { "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" } }, "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g=="], + + "@nodelib/fs.stat": ["@nodelib/fs.stat@2.0.5", "", {}, "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="], + + "@nodelib/fs.walk": ["@nodelib/fs.walk@1.2.8", "", { "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" } }, "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg=="], + + "@radix-ui/colors": ["@radix-ui/colors@3.0.0", "", {}, "sha512-FUOsGBkHrYJwCSEtWRCIfQbZG7q1e6DgxCIOe1SUQzDe/7rXXeA47s8yCn6fuTNQAj1Zq4oTFi9Yjp3wzElcxg=="], + + "@radix-ui/number": ["@radix-ui/number@1.1.1", "", {}, "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g=="], + + "@radix-ui/primitive": ["@radix-ui/primitive@1.1.2", "", {}, "sha512-XnbHrrprsNqZKQhStrSwgRUQzoCI1glLzdw79xiZPoofhGICeZRSQ3dIxAKH1gb3OHfNf4d6f+vAv3kil2eggA=="], + + "@radix-ui/react-accessible-icon": ["@radix-ui/react-accessible-icon@1.1.7", "", { "dependencies": { "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-XM+E4WXl0OqUJFovy6GjmxxFyx9opfCAIUku4dlKRd5YEPqt4kALOkQOp0Of6reHuUkJuiPBEc5k0o4z4lTC8A=="], + + "@radix-ui/react-accordion": ["@radix-ui/react-accordion@1.2.11", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collapsible": "1.1.11", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-l3W5D54emV2ues7jjeG1xcyN7S3jnK3zE2zHqgn0CmMsy9lNJwmgcrmaxS+7ipw15FAivzKNzH3d5EcGoFKw0A=="], + + "@radix-ui/react-alert-dialog": ["@radix-ui/react-alert-dialog@1.1.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dialog": "1.1.14", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-IOZfZ3nPvN6lXpJTBCunFQPRSvK8MDgSc1FB85xnIpUKOw9en0dJj8JmCAxV7BiZdtYlUpmrQjoTFkVYtdoWzQ=="], + + "@radix-ui/react-arrow": ["@radix-ui/react-arrow@1.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w=="], + + "@radix-ui/react-aspect-ratio": ["@radix-ui/react-aspect-ratio@1.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Yq6lvO9HQyPwev1onK1daHCHqXVLzPhSVjmsNjCa2Zcxy2f7uJD2itDtxknv6FzAKCwD1qQkeVDmX/cev13n/g=="], + + "@radix-ui/react-avatar": ["@radix-ui/react-avatar@1.1.10", "", { "dependencies": { "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-is-hydrated": "0.1.0", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-V8piFfWapM5OmNCXTzVQY+E1rDa53zY+MQ4Y7356v4fFz6vqCyUtIz2rUD44ZEdwg78/jKmMJHj07+C/Z/rcog=="], + + "@radix-ui/react-checkbox": ["@radix-ui/react-checkbox@1.3.2", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-yd+dI56KZqawxKZrJ31eENUwqc1QSqg4OZ15rybGjF2ZNwMO+wCyHzAVLRp9qoYJf7kYy0YpZ2b0JCzJ42HZpA=="], + + "@radix-ui/react-collapsible": ["@radix-ui/react-collapsible@1.1.11", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-2qrRsVGSCYasSz1RFOorXwl0H7g7J1frQtgpQgYrt+MOidtPAINHn9CPovQXb83r8ahapdx3Tu0fa/pdFFSdPg=="], + + "@radix-ui/react-collection": ["@radix-ui/react-collection@1.1.7", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw=="], + + "@radix-ui/react-compose-refs": ["@radix-ui/react-compose-refs@1.1.2", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg=="], + + "@radix-ui/react-context": ["@radix-ui/react-context@1.1.2", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA=="], + + "@radix-ui/react-context-menu": ["@radix-ui/react-context-menu@2.2.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-menu": "2.1.15", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-UsQUMjcYTsBjTSXw0P3GO0werEQvUY2plgRQuKoCTtkNr45q1DiL51j4m7gxhABzZ0BadoXNsIbg7F3KwiUBbw=="], + + "@radix-ui/react-dialog": ["@radix-ui/react-dialog@1.1.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-+CpweKjqpzTmwRwcYECQcNYbI8V9VSQt0SNFKeEBLgfucbsLssU6Ppq7wUdNXEGb573bMjFhVjKVll8rmV6zMw=="], + + "@radix-ui/react-direction": ["@radix-ui/react-direction@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw=="], + + "@radix-ui/react-dismissable-layer": ["@radix-ui/react-dismissable-layer@1.1.10", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-escape-keydown": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-IM1zzRV4W3HtVgftdQiiOmA0AdJlCtMLe00FXaHwgt3rAnNsIyDqshvkIW3hj/iu5hu8ERP7KIYki6NkqDxAwQ=="], + + "@radix-ui/react-dropdown-menu": ["@radix-ui/react-dropdown-menu@2.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-menu": "2.1.15", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-mIBnOjgwo9AH3FyKaSWoSu/dYj6VdhJ7frEPiGTeXCdUFHjl9h3mFh2wwhEtINOmYXWhdpf1rY2minFsmaNgVQ=="], + + "@radix-ui/react-focus-guards": ["@radix-ui/react-focus-guards@1.1.2", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-fyjAACV62oPV925xFCrH8DR5xWhg9KYtJT4s3u54jxp+L/hbpTY2kIeEFFbFe+a/HCE94zGQMZLIpVTPVZDhaA=="], + + "@radix-ui/react-focus-scope": ["@radix-ui/react-focus-scope@1.1.7", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw=="], + + "@radix-ui/react-form": ["@radix-ui/react-form@0.1.7", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-label": "2.1.7", "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-IXLKFnaYvFg/KkeV5QfOX7tRnwHXp127koOFUjLWMTrRv5Rny3DQcAtIFFeA/Cli4HHM8DuJCXAUsgnFVJndlw=="], + + "@radix-ui/react-hover-card": ["@radix-ui/react-hover-card@1.1.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-CPYZ24Mhirm+g6D8jArmLzjYu4Eyg3TTUHswR26QgzXBHBe64BO/RHOJKzmF/Dxb4y4f9PKyJdwm/O/AhNkb+Q=="], + + "@radix-ui/react-icons": ["@radix-ui/react-icons@1.3.2", "", { "peerDependencies": { "react": "^16.x || ^17.x || ^18.x || ^19.0.0 || ^19.0.0-rc" } }, "sha512-fyQIhGDhzfc9pK2kH6Pl9c4BDJGfMkPqkyIgYDthyNYoNg3wVhoJMMh19WS4Up/1KMPFVpNsT2q3WmXn2N1m6g=="], + + "@radix-ui/react-id": ["@radix-ui/react-id@1.1.1", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg=="], + + "@radix-ui/react-label": ["@radix-ui/react-label@2.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-YT1GqPSL8kJn20djelMX7/cTRp/Y9w5IZHvfxQTVHrOqa2yMl7i/UfMqKRU5V7mEyKTrUVgJXhNQPVCG8PBLoQ=="], + + "@radix-ui/react-menu": ["@radix-ui/react-menu@2.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-callback-ref": "1.1.1", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-tVlmA3Vb9n8SZSd+YSbuFR66l87Wiy4du+YE+0hzKQEANA+7cWKH1WgqcEX4pXqxUFQKrWQGHdvEfw00TjFiew=="], + + "@radix-ui/react-menubar": ["@radix-ui/react-menubar@1.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-menu": "2.1.15", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Z71C7LGD+YDYo3TV81paUs8f3Zbmkvg6VLRQpKYfzioOE6n7fOhA3ApK/V/2Odolxjoc4ENk8AYCjohCNayd5A=="], + + "@radix-ui/react-navigation-menu": ["@radix-ui/react-navigation-menu@1.2.13", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-WG8wWfDiJlSF5hELjwfjSGOXcBR/ZMhBFCGYe8vERpC39CQYZeq1PQ2kaYHdye3V95d06H89KGMsVCIE4LWo3g=="], + + "@radix-ui/react-one-time-password-field": ["@radix-ui/react-one-time-password-field@0.1.7", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-is-hydrated": "0.1.0", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-w1vm7AGI8tNXVovOK7TYQHrAGpRF7qQL+ENpT1a743De5Zmay2RbWGKAiYDKIyIuqptns+znCKwNztE2xl1n0Q=="], + + "@radix-ui/react-password-toggle-field": ["@radix-ui/react-password-toggle-field@0.1.2", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-is-hydrated": "0.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-F90uYnlBsLPU1UbSLciLsWQmk8+hdWa6SFw4GXaIdNWxFxI5ITKVdAG64f+Twaa9ic6xE7pqxPyUmodrGjT4pQ=="], + + "@radix-ui/react-popover": ["@radix-ui/react-popover@1.1.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-ODz16+1iIbGUfFEfKx2HTPKizg2MN39uIOV8MXeHnmdd3i/N9Wt7vU46wbHsqA0xoaQyXVcs0KIlBdOA2Y95bw=="], + + "@radix-ui/react-popper": ["@radix-ui/react-popper@1.2.7", "", { "dependencies": { "@floating-ui/react-dom": "^2.0.0", "@radix-ui/react-arrow": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-rect": "1.1.1", "@radix-ui/react-use-size": "1.1.1", "@radix-ui/rect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-IUFAccz1JyKcf/RjB552PlWwxjeCJB8/4KxT7EhBHOJM+mN7LdW+B3kacJXILm32xawcMMjb2i0cIZpo+f9kiQ=="], + + "@radix-ui/react-portal": ["@radix-ui/react-portal@1.1.9", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ=="], + + "@radix-ui/react-presence": ["@radix-ui/react-presence@1.1.4", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-ueDqRbdc4/bkaQT3GIpLQssRlFgWaL/U2z/S31qRwwLWoxHLgry3SIfCwhxeQNbirEUXFa+lq3RL3oBYXtcmIA=="], + + "@radix-ui/react-primitive": ["@radix-ui/react-primitive@2.1.3", "", { "dependencies": { "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ=="], + + "@radix-ui/react-progress": ["@radix-ui/react-progress@1.1.7", "", { "dependencies": { "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-vPdg/tF6YC/ynuBIJlk1mm7Le0VgW6ub6J2UWnTQ7/D23KXcPI1qy+0vBkgKgd38RCMJavBXpB83HPNFMTb0Fg=="], + + "@radix-ui/react-radio-group": ["@radix-ui/react-radio-group@1.3.7", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-9w5XhD0KPOrm92OTTE0SysH3sYzHsSTHNvZgUBo/VZ80VdYyB5RneDbc0dKpURS24IxkoFRu/hI0i4XyfFwY6g=="], + + "@radix-ui/react-roving-focus": ["@radix-ui/react-roving-focus@1.1.10", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-dT9aOXUen9JSsxnMPv/0VqySQf5eDQ6LCk5Sw28kamz8wSOW2bJdlX2Bg5VUIIcV+6XlHpWTIuTPCf/UNIyq8Q=="], + + "@radix-ui/react-scroll-area": ["@radix-ui/react-scroll-area@1.2.9", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-YSjEfBXnhUELsO2VzjdtYYD4CfQjvao+lhhrX5XsHD7/cyUNzljF1FHEbgTPN7LH2MClfwRMIsYlqTYpKTTe2A=="], + + "@radix-ui/react-select": ["@radix-ui/react-select@2.2.5", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-HnMTdXEVuuyzx63ME0ut4+sEMYW6oouHWNGUZc7ddvUWIcfCva/AMoqEW/3wnEllriMWBa0RHspCYnfCWJQYmA=="], + + "@radix-ui/react-separator": ["@radix-ui/react-separator@1.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-0HEb8R9E8A+jZjvmFCy/J4xhbXy3TV+9XSnGJ3KvTtjlIUy/YQ/p6UYZvi7YbeoeXdyU9+Y3scizK6hkY37baA=="], + + "@radix-ui/react-slider": ["@radix-ui/react-slider@1.3.5", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-rkfe2pU2NBAYfGaxa3Mqosi7VZEWX5CxKaanRv0vZd4Zhl9fvQrg0VM93dv3xGLGfrHuoTRF3JXH8nb9g+B3fw=="], + + "@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.3", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A=="], + + "@radix-ui/react-switch": ["@radix-ui/react-switch@1.2.5", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-5ijLkak6ZMylXsaImpZ8u4Rlf5grRmoc0p0QeX9VJtlrM4f5m3nCTX8tWga/zOA8PZYIR/t0p2Mnvd7InrJ6yQ=="], + + "@radix-ui/react-tabs": ["@radix-ui/react-tabs@1.1.12", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-GTVAlRVrQrSw3cEARM0nAx73ixrWDPNZAruETn3oHCNP6SbZ/hNxdxp+u7VkIEv3/sFoLq1PfcHrl7Pnp0CDpw=="], + + "@radix-ui/react-toast": ["@radix-ui/react-toast@1.2.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-nAP5FBxBJGQ/YfUB+r+O6USFVkWq3gAInkxyEnmvEV5jtSbfDhfa4hwX8CraCnbjMLsE7XSf/K75l9xXY7joWg=="], + + "@radix-ui/react-toggle": ["@radix-ui/react-toggle@1.1.9", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-ZoFkBBz9zv9GWer7wIjvdRxmh2wyc2oKWw6C6CseWd6/yq1DK/l5lJ+wnsmFwJZbBYqr02mrf8A2q/CVCuM3ZA=="], + + "@radix-ui/react-toggle-group": ["@radix-ui/react-toggle-group@1.1.10", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-toggle": "1.1.9", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-kiU694Km3WFLTC75DdqgM/3Jauf3rD9wxeS9XtyWFKsBUeZA337lC+6uUazT7I1DhanZ5gyD5Stf8uf2dbQxOQ=="], + + "@radix-ui/react-toolbar": ["@radix-ui/react-toolbar@1.1.10", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-separator": "1.1.7", "@radix-ui/react-toggle-group": "1.1.10" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-jiwQsduEL++M4YBIurjSa+voD86OIytCod0/dbIxFZDLD8NfO1//keXYMfsW8BPcfqwoNjt+y06XcJqAb4KR7A=="], + + "@radix-ui/react-tooltip": ["@radix-ui/react-tooltip@1.2.7", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Ap+fNYwKTYJ9pzqW+Xe2HtMRbQ/EeWkj2qykZ6SuEV4iS/o1bZI5ssJbk4D2r8XuDuOBVz/tIx2JObtuqU+5Zw=="], + + "@radix-ui/react-use-callback-ref": ["@radix-ui/react-use-callback-ref@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg=="], + + "@radix-ui/react-use-controllable-state": ["@radix-ui/react-use-controllable-state@1.2.2", "", { "dependencies": { "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg=="], + + "@radix-ui/react-use-effect-event": ["@radix-ui/react-use-effect-event@0.0.2", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA=="], + + "@radix-ui/react-use-escape-keydown": ["@radix-ui/react-use-escape-keydown@1.1.1", "", { "dependencies": { "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g=="], + + "@radix-ui/react-use-is-hydrated": ["@radix-ui/react-use-is-hydrated@0.1.0", "", { "dependencies": { "use-sync-external-store": "^1.5.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-U+UORVEq+cTnRIaostJv9AGdV3G6Y+zbVd+12e18jQ5A3c0xL03IhnHuiU4UV69wolOQp5GfR58NW/EgdQhwOA=="], + + "@radix-ui/react-use-layout-effect": ["@radix-ui/react-use-layout-effect@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ=="], + + "@radix-ui/react-use-previous": ["@radix-ui/react-use-previous@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ=="], + + "@radix-ui/react-use-rect": ["@radix-ui/react-use-rect@1.1.1", "", { "dependencies": { "@radix-ui/rect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w=="], + + "@radix-ui/react-use-size": ["@radix-ui/react-use-size@1.1.1", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ=="], + + "@radix-ui/react-visually-hidden": ["@radix-ui/react-visually-hidden@1.2.3", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug=="], + + "@radix-ui/rect": ["@radix-ui/rect@1.1.1", "", {}, "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw=="], + + "@rolldown/pluginutils": ["@rolldown/pluginutils@1.0.0-beta.19", "", {}, "sha512-3FL3mnMbPu0muGOCaKAhhFEYmqv9eTfPSJRJmANrCwtgK8VuxpsZDGK+m0LYAGoyO8+0j5uRe4PeyPDK1yA/hA=="], + + "@rollup/pluginutils": ["@rollup/pluginutils@5.2.0", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^2.0.2", "picomatch": "^4.0.2" }, "peerDependencies": { "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, "optionalPeers": ["rollup"] }, "sha512-qWJ2ZTbmumwiLFomfzTyt5Kng4hwPi9rwCYN4SHb6eaRU1KNO4ccxINHr/VhH4GgPlt1XfSTLX2LBTme8ne4Zw=="], + + "@rollup/rollup-android-arm-eabi": ["@rollup/rollup-android-arm-eabi@4.45.1", "", { "os": "android", "cpu": "arm" }, "sha512-NEySIFvMY0ZQO+utJkgoMiCAjMrGvnbDLHvcmlA33UXJpYBCvlBEbMMtV837uCkS+plG2umfhn0T5mMAxGrlRA=="], + + "@rollup/rollup-android-arm64": ["@rollup/rollup-android-arm64@4.45.1", "", { "os": "android", "cpu": "arm64" }, "sha512-ujQ+sMXJkg4LRJaYreaVx7Z/VMgBBd89wGS4qMrdtfUFZ+TSY5Rs9asgjitLwzeIbhwdEhyj29zhst3L1lKsRQ=="], + + "@rollup/rollup-darwin-arm64": ["@rollup/rollup-darwin-arm64@4.45.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-FSncqHvqTm3lC6Y13xncsdOYfxGSLnP+73k815EfNmpewPs+EyM49haPS105Rh4aF5mJKywk9X0ogzLXZzN9lA=="], + + "@rollup/rollup-darwin-x64": ["@rollup/rollup-darwin-x64@4.45.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-2/vVn/husP5XI7Fsf/RlhDaQJ7x9zjvC81anIVbr4b/f0xtSmXQTFcGIQ/B1cXIYM6h2nAhJkdMHTnD7OtQ9Og=="], + + "@rollup/rollup-freebsd-arm64": ["@rollup/rollup-freebsd-arm64@4.45.1", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-4g1kaDxQItZsrkVTdYQ0bxu4ZIQ32cotoQbmsAnW1jAE4XCMbcBPDirX5fyUzdhVCKgPcrwWuucI8yrVRBw2+g=="], + + "@rollup/rollup-freebsd-x64": ["@rollup/rollup-freebsd-x64@4.45.1", "", { "os": "freebsd", "cpu": "x64" }, "sha512-L/6JsfiL74i3uK1Ti2ZFSNsp5NMiM4/kbbGEcOCps99aZx3g8SJMO1/9Y0n/qKlWZfn6sScf98lEOUe2mBvW9A=="], + + "@rollup/rollup-linux-arm-gnueabihf": ["@rollup/rollup-linux-arm-gnueabihf@4.45.1", "", { "os": "linux", "cpu": "arm" }, "sha512-RkdOTu2jK7brlu+ZwjMIZfdV2sSYHK2qR08FUWcIoqJC2eywHbXr0L8T/pONFwkGukQqERDheaGTeedG+rra6Q=="], + + "@rollup/rollup-linux-arm-musleabihf": ["@rollup/rollup-linux-arm-musleabihf@4.45.1", "", { "os": "linux", "cpu": "arm" }, "sha512-3kJ8pgfBt6CIIr1o+HQA7OZ9mp/zDk3ctekGl9qn/pRBgrRgfwiffaUmqioUGN9hv0OHv2gxmvdKOkARCtRb8Q=="], + + "@rollup/rollup-linux-arm64-gnu": ["@rollup/rollup-linux-arm64-gnu@4.45.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-k3dOKCfIVixWjG7OXTCOmDfJj3vbdhN0QYEqB+OuGArOChek22hn7Uy5A/gTDNAcCy5v2YcXRJ/Qcnm4/ma1xw=="], + + "@rollup/rollup-linux-arm64-musl": ["@rollup/rollup-linux-arm64-musl@4.45.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-PmI1vxQetnM58ZmDFl9/Uk2lpBBby6B6rF4muJc65uZbxCs0EA7hhKCk2PKlmZKuyVSHAyIw3+/SiuMLxKxWog=="], + + "@rollup/rollup-linux-loongarch64-gnu": ["@rollup/rollup-linux-loongarch64-gnu@4.45.1", "", { "os": "linux", "cpu": "none" }, "sha512-9UmI0VzGmNJ28ibHW2GpE2nF0PBQqsyiS4kcJ5vK+wuwGnV5RlqdczVocDSUfGX/Na7/XINRVoUgJyFIgipoRg=="], + + "@rollup/rollup-linux-powerpc64le-gnu": ["@rollup/rollup-linux-powerpc64le-gnu@4.45.1", "", { "os": "linux", "cpu": "ppc64" }, "sha512-7nR2KY8oEOUTD3pBAxIBBbZr0U7U+R9HDTPNy+5nVVHDXI4ikYniH1oxQz9VoB5PbBU1CZuDGHkLJkd3zLMWsg=="], + + "@rollup/rollup-linux-riscv64-gnu": ["@rollup/rollup-linux-riscv64-gnu@4.45.1", "", { "os": "linux", "cpu": "none" }, "sha512-nlcl3jgUultKROfZijKjRQLUu9Ma0PeNv/VFHkZiKbXTBQXhpytS8CIj5/NfBeECZtY2FJQubm6ltIxm/ftxpw=="], + + "@rollup/rollup-linux-riscv64-musl": ["@rollup/rollup-linux-riscv64-musl@4.45.1", "", { "os": "linux", "cpu": "none" }, "sha512-HJV65KLS51rW0VY6rvZkiieiBnurSzpzore1bMKAhunQiECPuxsROvyeaot/tcK3A3aGnI+qTHqisrpSgQrpgA=="], + + "@rollup/rollup-linux-s390x-gnu": ["@rollup/rollup-linux-s390x-gnu@4.45.1", "", { "os": "linux", "cpu": "s390x" }, "sha512-NITBOCv3Qqc6hhwFt7jLV78VEO/il4YcBzoMGGNxznLgRQf43VQDae0aAzKiBeEPIxnDrACiMgbqjuihx08OOw=="], + + "@rollup/rollup-linux-x64-gnu": ["@rollup/rollup-linux-x64-gnu@4.45.1", "", { "os": "linux", "cpu": "x64" }, "sha512-+E/lYl6qu1zqgPEnTrs4WysQtvc/Sh4fC2nByfFExqgYrqkKWp1tWIbe+ELhixnenSpBbLXNi6vbEEJ8M7fiHw=="], + + "@rollup/rollup-linux-x64-musl": ["@rollup/rollup-linux-x64-musl@4.45.1", "", { "os": "linux", "cpu": "x64" }, "sha512-a6WIAp89p3kpNoYStITT9RbTbTnqarU7D8N8F2CV+4Cl9fwCOZraLVuVFvlpsW0SbIiYtEnhCZBPLoNdRkjQFw=="], + + "@rollup/rollup-win32-arm64-msvc": ["@rollup/rollup-win32-arm64-msvc@4.45.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-T5Bi/NS3fQiJeYdGvRpTAP5P02kqSOpqiopwhj0uaXB6nzs5JVi2XMJb18JUSKhCOX8+UE1UKQufyD6Or48dJg=="], + + "@rollup/rollup-win32-ia32-msvc": ["@rollup/rollup-win32-ia32-msvc@4.45.1", "", { "os": "win32", "cpu": "ia32" }, "sha512-lxV2Pako3ujjuUe9jiU3/s7KSrDfH6IgTSQOnDWr9aJ92YsFd7EurmClK0ly/t8dzMkDtd04g60WX6yl0sGfdw=="], + + "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.45.1", "", { "os": "win32", "cpu": "x64" }, "sha512-M/fKi4sasCdM8i0aWJjCSFm2qEnYRR8AMLG2kxp6wD13+tMGA4Z1tVAuHkNRjud5SW2EM3naLuK35w9twvf6aA=="], + + "@shikijs/core": ["@shikijs/core@1.29.2", "", { "dependencies": { "@shikijs/engine-javascript": "1.29.2", "@shikijs/engine-oniguruma": "1.29.2", "@shikijs/types": "1.29.2", "@shikijs/vscode-textmate": "^10.0.1", "@types/hast": "^3.0.4", "hast-util-to-html": "^9.0.4" } }, "sha512-vju0lY9r27jJfOY4Z7+Rt/nIOjzJpZ3y+nYpqtUZInVoXQ/TJZcfGnNOGnKjFdVZb8qexiCuSlZRKcGfhhTTZQ=="], + + "@shikijs/engine-javascript": ["@shikijs/engine-javascript@1.29.2", "", { "dependencies": { "@shikijs/types": "1.29.2", "@shikijs/vscode-textmate": "^10.0.1", "oniguruma-to-es": "^2.2.0" } }, "sha512-iNEZv4IrLYPv64Q6k7EPpOCE/nuvGiKl7zxdq0WFuRPF5PAE9PRo2JGq/d8crLusM59BRemJ4eOqrFrC4wiQ+A=="], + + "@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@1.29.2", "", { "dependencies": { "@shikijs/types": "1.29.2", "@shikijs/vscode-textmate": "^10.0.1" } }, "sha512-7iiOx3SG8+g1MnlzZVDYiaeHe7Ez2Kf2HrJzdmGwkRisT7r4rak0e655AcM/tF9JG/kg5fMNYlLLKglbN7gBqA=="], + + "@shikijs/langs": ["@shikijs/langs@1.29.2", "", { "dependencies": { "@shikijs/types": "1.29.2" } }, "sha512-FIBA7N3LZ+223U7cJDUYd5shmciFQlYkFXlkKVaHsCPgfVLiO+e12FmQE6Tf9vuyEsFe3dIl8qGWKXgEHL9wmQ=="], + + "@shikijs/rehype": ["@shikijs/rehype@1.29.2", "", { "dependencies": { "@shikijs/types": "1.29.2", "@types/hast": "^3.0.4", "hast-util-to-string": "^3.0.1", "shiki": "1.29.2", "unified": "^11.0.5", "unist-util-visit": "^5.0.0" } }, "sha512-sxi53HZe5XDz0s2UqF+BVN/kgHPMS9l6dcacM4Ra3ZDzCJa5rDGJ+Ukpk4LxdD1+MITBM6hoLbPfGv9StV8a5Q=="], + + "@shikijs/themes": ["@shikijs/themes@1.29.2", "", { "dependencies": { "@shikijs/types": "1.29.2" } }, "sha512-i9TNZlsq4uoyqSbluIcZkmPL9Bfi3djVxRnofUHwvx/h6SRW3cwgBC5SML7vsDcWyukY0eCzVN980rqP6qNl9g=="], + + "@shikijs/transformers": ["@shikijs/transformers@1.29.2", "", { "dependencies": { "@shikijs/core": "1.29.2", "@shikijs/types": "1.29.2" } }, "sha512-NHQuA+gM7zGuxGWP9/Ub4vpbwrYCrho9nQCLcCPfOe3Yc7LOYwmSuhElI688oiqIXk9dlZwDiyAG9vPBTuPJMA=="], + + "@shikijs/twoslash": ["@shikijs/twoslash@1.29.2", "", { "dependencies": { "@shikijs/core": "1.29.2", "@shikijs/types": "1.29.2", "twoslash": "^0.2.12" } }, "sha512-2S04ppAEa477tiaLfGEn1QJWbZUmbk8UoPbAEw4PifsrxkBXtAtOflIZJNtuCwz8ptc/TPxy7CO7gW4Uoi6o/g=="], + + "@shikijs/types": ["@shikijs/types@1.29.2", "", { "dependencies": { "@shikijs/vscode-textmate": "^10.0.1", "@types/hast": "^3.0.4" } }, "sha512-VJjK0eIijTZf0QSTODEXCqinjBn0joAHQ+aPSBzrv4O2d/QSbsMw+ZeSRx03kV34Hy7NzUvV/7NqfYGRLrASmw=="], + + "@shikijs/vscode-textmate": ["@shikijs/vscode-textmate@10.0.2", "", {}, "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg=="], + + "@sindresorhus/merge-streams": ["@sindresorhus/merge-streams@2.3.0", "", {}, "sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg=="], + + "@tailwindcss/node": ["@tailwindcss/node@4.0.7", "", { "dependencies": { "enhanced-resolve": "^5.18.1", "jiti": "^2.4.2", "tailwindcss": "4.0.7" } }, "sha512-dkFXufkbRB2mu3FPsW5xLAUWJyexpJA+/VtQj18k3SUiJVLdpgzBd1v1gRRcIpEJj7K5KpxBKfOXlZxT3ZZRuA=="], + + "@tailwindcss/oxide": ["@tailwindcss/oxide@4.0.7", "", { "optionalDependencies": { "@tailwindcss/oxide-android-arm64": "4.0.7", "@tailwindcss/oxide-darwin-arm64": "4.0.7", "@tailwindcss/oxide-darwin-x64": "4.0.7", "@tailwindcss/oxide-freebsd-x64": "4.0.7", "@tailwindcss/oxide-linux-arm-gnueabihf": "4.0.7", "@tailwindcss/oxide-linux-arm64-gnu": "4.0.7", "@tailwindcss/oxide-linux-arm64-musl": "4.0.7", "@tailwindcss/oxide-linux-x64-gnu": "4.0.7", "@tailwindcss/oxide-linux-x64-musl": "4.0.7", "@tailwindcss/oxide-win32-arm64-msvc": "4.0.7", "@tailwindcss/oxide-win32-x64-msvc": "4.0.7" } }, "sha512-yr6w5YMgjy+B+zkJiJtIYGXW+HNYOPfRPtSs+aqLnKwdEzNrGv4ZuJh9hYJ3mcA+HMq/K1rtFV+KsEr65S558g=="], + + "@tailwindcss/oxide-android-arm64": ["@tailwindcss/oxide-android-arm64@4.0.7", "", { "os": "android", "cpu": "arm64" }, "sha512-5iQXXcAeOHBZy8ASfHFm1k0O/9wR2E3tKh6+P+ilZZbQiMgu+qrnfpBWYPc3FPuQdWiWb73069WT5D+CAfx/tg=="], + + "@tailwindcss/oxide-darwin-arm64": ["@tailwindcss/oxide-darwin-arm64@4.0.7", "", { "os": "darwin", "cpu": "arm64" }, "sha512-7yGZtEc5IgVYylqK/2B0yVqoofk4UAbkn1ygNpIJZyrOhbymsfr8uUFCueTu2fUxmAYIfMZ8waWo2dLg/NgLgg=="], + + "@tailwindcss/oxide-darwin-x64": ["@tailwindcss/oxide-darwin-x64@4.0.7", "", { "os": "darwin", "cpu": "x64" }, "sha512-tPQDV20fBjb26yWbPqT1ZSoDChomMCiXTKn4jupMSoMCFyU7+OJvIY1ryjqBuY622dEBJ8LnCDDWsnj1lX9nNQ=="], + + "@tailwindcss/oxide-freebsd-x64": ["@tailwindcss/oxide-freebsd-x64@4.0.7", "", { "os": "freebsd", "cpu": "x64" }, "sha512-sZqJpTyTZiknU9LLHuByg5GKTW+u3FqM7q7myequAXxKOpAFiOfXpY710FuMY+gjzSapyRbDXJlsTQtCyiTo5w=="], + + "@tailwindcss/oxide-linux-arm-gnueabihf": ["@tailwindcss/oxide-linux-arm-gnueabihf@4.0.7", "", { "os": "linux", "cpu": "arm" }, "sha512-PBgvULgeSswjd8cbZ91gdIcIDMdc3TUHV5XemEpxlqt9M8KoydJzkuB/Dt910jYdofOIaTWRL6adG9nJICvU4A=="], + + "@tailwindcss/oxide-linux-arm64-gnu": ["@tailwindcss/oxide-linux-arm64-gnu@4.0.7", "", { "os": "linux", "cpu": "arm64" }, "sha512-By/a2yeh+e9b+C67F88ndSwVJl2A3tcUDb29FbedDi+DZ4Mr07Oqw9Y1DrDrtHIDhIZ3bmmiL1dkH2YxrtV+zw=="], + + "@tailwindcss/oxide-linux-arm64-musl": ["@tailwindcss/oxide-linux-arm64-musl@4.0.7", "", { "os": "linux", "cpu": "arm64" }, "sha512-WHYs3cpPEJb/ccyT20NOzopYQkl7JKncNBUbb77YFlwlXMVJLLV3nrXQKhr7DmZxz2ZXqjyUwsj2rdzd9stYdw=="], + + "@tailwindcss/oxide-linux-x64-gnu": ["@tailwindcss/oxide-linux-x64-gnu@4.0.7", "", { "os": "linux", "cpu": "x64" }, "sha512-7bP1UyuX9kFxbOwkeIJhBZNevKYPXB6xZI37v09fqi6rqRJR8elybwjMUHm54GVP+UTtJ14ueB1K54Dy1tIO6w=="], + + "@tailwindcss/oxide-linux-x64-musl": ["@tailwindcss/oxide-linux-x64-musl@4.0.7", "", { "os": "linux", "cpu": "x64" }, "sha512-gBQIV8nL/LuhARNGeroqzXymMzzW5wQzqlteVqOVoqwEfpHOP3GMird5pGFbnpY+NP0fOlsZGrxxOPQ4W/84bQ=="], + + "@tailwindcss/oxide-win32-arm64-msvc": ["@tailwindcss/oxide-win32-arm64-msvc@4.0.7", "", { "os": "win32", "cpu": "arm64" }, "sha512-aH530NFfx0kpQpvYMfWoeG03zGnRCMVlQG8do/5XeahYydz+6SIBxA1tl/cyITSJyWZHyVt6GVNkXeAD30v0Xg=="], + + "@tailwindcss/oxide-win32-x64-msvc": ["@tailwindcss/oxide-win32-x64-msvc@4.0.7", "", { "os": "win32", "cpu": "x64" }, "sha512-8Cva6bbJN7ZJx320k7vxGGdU0ewmpfS5A4PudyzUuofdi8MgeINuiiWiPQ0VZCda/GX88K6qp+6UpDZNVr8HMQ=="], + + "@tailwindcss/vite": ["@tailwindcss/vite@4.0.7", "", { "dependencies": { "@tailwindcss/node": "4.0.7", "@tailwindcss/oxide": "4.0.7", "lightningcss": "^1.29.1", "tailwindcss": "4.0.7" }, "peerDependencies": { "vite": "^5.2.0 || ^6" } }, "sha512-GYx5sxArfIMtdZCsxfya3S/efMmf4RvfqdiLUozkhmSFBNUFnYVodatpoO/en4/BsOIGvq/RB6HwcTLn9prFnQ=="], + + "@types/babel__core": ["@types/babel__core@7.20.5", "", { "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", "@types/babel__generator": "*", "@types/babel__template": "*", "@types/babel__traverse": "*" } }, "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA=="], + + "@types/babel__generator": ["@types/babel__generator@7.27.0", "", { "dependencies": { "@babel/types": "^7.0.0" } }, "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg=="], + + "@types/babel__template": ["@types/babel__template@7.4.4", "", { "dependencies": { "@babel/parser": "^7.1.0", "@babel/types": "^7.0.0" } }, "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A=="], + + "@types/babel__traverse": ["@types/babel__traverse@7.20.7", "", { "dependencies": { "@babel/types": "^7.20.7" } }, "sha512-dkO5fhS7+/oos4ciWxyEyjWe48zmG6wbCheo/G2ZnHx4fs3EU6YC6UM8rk56gAjNJ9P3MTH2jo5jb92/K6wbng=="], + + "@types/d3": ["@types/d3@7.4.3", "", { "dependencies": { "@types/d3-array": "*", "@types/d3-axis": "*", "@types/d3-brush": "*", "@types/d3-chord": "*", "@types/d3-color": "*", "@types/d3-contour": "*", "@types/d3-delaunay": "*", "@types/d3-dispatch": "*", "@types/d3-drag": "*", "@types/d3-dsv": "*", "@types/d3-ease": "*", "@types/d3-fetch": "*", "@types/d3-force": "*", "@types/d3-format": "*", "@types/d3-geo": "*", "@types/d3-hierarchy": "*", "@types/d3-interpolate": "*", "@types/d3-path": "*", "@types/d3-polygon": "*", "@types/d3-quadtree": "*", "@types/d3-random": "*", "@types/d3-scale": "*", "@types/d3-scale-chromatic": "*", "@types/d3-selection": "*", "@types/d3-shape": "*", "@types/d3-time": "*", "@types/d3-time-format": "*", "@types/d3-timer": "*", "@types/d3-transition": "*", "@types/d3-zoom": "*" } }, "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww=="], + + "@types/d3-array": ["@types/d3-array@3.2.1", "", {}, "sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg=="], + + "@types/d3-axis": ["@types/d3-axis@3.0.6", "", { "dependencies": { "@types/d3-selection": "*" } }, "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw=="], + + "@types/d3-brush": ["@types/d3-brush@3.0.6", "", { "dependencies": { "@types/d3-selection": "*" } }, "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A=="], + + "@types/d3-chord": ["@types/d3-chord@3.0.6", "", {}, "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg=="], + + "@types/d3-color": ["@types/d3-color@3.1.3", "", {}, "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A=="], + + "@types/d3-contour": ["@types/d3-contour@3.0.6", "", { "dependencies": { "@types/d3-array": "*", "@types/geojson": "*" } }, "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg=="], + + "@types/d3-delaunay": ["@types/d3-delaunay@6.0.4", "", {}, "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw=="], + + "@types/d3-dispatch": ["@types/d3-dispatch@3.0.6", "", {}, "sha512-4fvZhzMeeuBJYZXRXrRIQnvUYfyXwYmLsdiN7XXmVNQKKw1cM8a5WdID0g1hVFZDqT9ZqZEY5pD44p24VS7iZQ=="], + + "@types/d3-drag": ["@types/d3-drag@3.0.7", "", { "dependencies": { "@types/d3-selection": "*" } }, "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ=="], + + "@types/d3-dsv": ["@types/d3-dsv@3.0.7", "", {}, "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g=="], + + "@types/d3-ease": ["@types/d3-ease@3.0.2", "", {}, "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA=="], + + "@types/d3-fetch": ["@types/d3-fetch@3.0.7", "", { "dependencies": { "@types/d3-dsv": "*" } }, "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA=="], + + "@types/d3-force": ["@types/d3-force@3.0.10", "", {}, "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw=="], + + "@types/d3-format": ["@types/d3-format@3.0.4", "", {}, "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g=="], + + "@types/d3-geo": ["@types/d3-geo@3.1.0", "", { "dependencies": { "@types/geojson": "*" } }, "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ=="], + + "@types/d3-hierarchy": ["@types/d3-hierarchy@3.1.7", "", {}, "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg=="], + + "@types/d3-interpolate": ["@types/d3-interpolate@3.0.4", "", { "dependencies": { "@types/d3-color": "*" } }, "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA=="], + + "@types/d3-path": ["@types/d3-path@3.1.1", "", {}, "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg=="], + + "@types/d3-polygon": ["@types/d3-polygon@3.0.2", "", {}, "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA=="], + + "@types/d3-quadtree": ["@types/d3-quadtree@3.0.6", "", {}, "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg=="], + + "@types/d3-random": ["@types/d3-random@3.0.3", "", {}, "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ=="], + + "@types/d3-scale": ["@types/d3-scale@4.0.9", "", { "dependencies": { "@types/d3-time": "*" } }, "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw=="], + + "@types/d3-scale-chromatic": ["@types/d3-scale-chromatic@3.1.0", "", {}, "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ=="], + + "@types/d3-selection": ["@types/d3-selection@3.0.11", "", {}, "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w=="], + + "@types/d3-shape": ["@types/d3-shape@3.1.7", "", { "dependencies": { "@types/d3-path": "*" } }, "sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg=="], + + "@types/d3-time": ["@types/d3-time@3.0.4", "", {}, "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g=="], + + "@types/d3-time-format": ["@types/d3-time-format@4.0.3", "", {}, "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg=="], + + "@types/d3-timer": ["@types/d3-timer@3.0.2", "", {}, "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw=="], + + "@types/d3-transition": ["@types/d3-transition@3.0.9", "", { "dependencies": { "@types/d3-selection": "*" } }, "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg=="], + + "@types/d3-zoom": ["@types/d3-zoom@3.0.8", "", { "dependencies": { "@types/d3-interpolate": "*", "@types/d3-selection": "*" } }, "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw=="], + + "@types/debug": ["@types/debug@4.1.12", "", { "dependencies": { "@types/ms": "*" } }, "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ=="], + + "@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="], + + "@types/estree-jsx": ["@types/estree-jsx@1.0.5", "", { "dependencies": { "@types/estree": "*" } }, "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg=="], + + "@types/geojson": ["@types/geojson@7946.0.16", "", {}, "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg=="], + + "@types/hast": ["@types/hast@3.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ=="], + + "@types/mdast": ["@types/mdast@4.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA=="], + + "@types/mdx": ["@types/mdx@2.0.13", "", {}, "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw=="], + + "@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="], + + "@types/node": ["@types/node@24.0.14", "", { "dependencies": { "undici-types": "~7.8.0" } }, "sha512-4zXMWD91vBLGRtHK3YbIoFMia+1nqEz72coM42C5ETjnNCa/heoj7NT1G67iAfOqMmcfhuCZ4uNpyz8EjlAejw=="], + + "@types/react": ["@types/react@19.1.8", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g=="], + + "@types/trusted-types": ["@types/trusted-types@2.0.7", "", {}, "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw=="], + + "@types/unist": ["@types/unist@3.0.3", "", {}, "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q=="], + + "@typescript/vfs": ["@typescript/vfs@1.6.1", "", { "dependencies": { "debug": "^4.1.1" }, "peerDependencies": { "typescript": "*" } }, "sha512-JwoxboBh7Oz1v38tPbkrZ62ZXNHAk9bJ7c9x0eI5zBfBnBYGhURdbnh7Z4smN/MV48Y5OCcZb58n972UtbazsA=="], + + "@ungap/structured-clone": ["@ungap/structured-clone@1.3.0", "", {}, "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g=="], + + "@vanilla-extract/babel-plugin-debug-ids": ["@vanilla-extract/babel-plugin-debug-ids@1.2.2", "", { "dependencies": { "@babel/core": "^7.23.9" } }, "sha512-MeDWGICAF9zA/OZLOKwhoRlsUW+fiMwnfuOAqFVohL31Agj7Q/RBWAYweqjHLgFBCsdnr6XIfwjJnmb2znEWxw=="], + + "@vanilla-extract/compiler": ["@vanilla-extract/compiler@0.3.0", "", { "dependencies": { "@vanilla-extract/css": "^1.17.4", "@vanilla-extract/integration": "^8.0.4", "vite": "^5.0.0 || ^6.0.0", "vite-node": "^3.2.2" } }, "sha512-8EbPmDMXhY9NrN38Kh8xYDENgBk4i6s6ce4p7E9F3kHtCqxtEgfaKSNS08z/SVCTmaX3IB3N/kGSO0gr+APffg=="], + + "@vanilla-extract/css": ["@vanilla-extract/css@1.17.4", "", { "dependencies": { "@emotion/hash": "^0.9.0", "@vanilla-extract/private": "^1.0.9", "css-what": "^6.1.0", "cssesc": "^3.0.0", "csstype": "^3.0.7", "dedent": "^1.5.3", "deep-object-diff": "^1.1.9", "deepmerge": "^4.2.2", "lru-cache": "^10.4.3", "media-query-parser": "^2.0.2", "modern-ahocorasick": "^1.0.0", "picocolors": "^1.0.0" } }, "sha512-m3g9nQDWPtL+sTFdtCGRMI1Vrp86Ay4PBYq1Bo7Bnchj5ElNtAJpOqD+zg+apthVA4fB7oVpMWNjwpa6ElDWFQ=="], + + "@vanilla-extract/dynamic": ["@vanilla-extract/dynamic@2.1.5", "", { "dependencies": { "@vanilla-extract/private": "^1.0.9" } }, "sha512-QGIFGb1qyXQkbzx6X6i3+3LMc/iv/ZMBttMBL+Wm/DetQd36KsKsFg5CtH3qy+1hCA/5w93mEIIAiL4fkM8ycw=="], + + "@vanilla-extract/integration": ["@vanilla-extract/integration@8.0.4", "", { "dependencies": { "@babel/core": "^7.23.9", "@babel/plugin-syntax-typescript": "^7.23.3", "@vanilla-extract/babel-plugin-debug-ids": "^1.2.2", "@vanilla-extract/css": "^1.17.4", "dedent": "^1.5.3", "esbuild": "npm:esbuild@>=0.17.6 <0.26.0", "eval": "0.1.8", "find-up": "^5.0.0", "javascript-stringify": "^2.0.1", "mlly": "^1.4.2" } }, "sha512-cmOb7tR+g3ulKvFtSbmdw3YUyIS1d7MQqN+FcbwNhdieyno5xzUyfDCMjeWJhmCSMvZ6WlinkrOkgs6SHB+FRg=="], + + "@vanilla-extract/private": ["@vanilla-extract/private@1.0.9", "", {}, "sha512-gT2jbfZuaaCLrAxwXbRgIhGhcXbRZCG3v4TTUnjw0EJ7ArdBRxkq4msNJkbuRkCgfIK5ATmprB5t9ljvLeFDEA=="], + + "@vanilla-extract/vite-plugin": ["@vanilla-extract/vite-plugin@5.1.0", "", { "dependencies": { "@vanilla-extract/compiler": "^0.3.0", "@vanilla-extract/integration": "^8.0.4" }, "peerDependencies": { "vite": "^5.0.0 || ^6.0.0" } }, "sha512-BzVdmBD+FUyJnY6I29ZezwtDBc1B78l+VvHvIgoJYbgfPj0hvY0RmrGL8B4oNNGY/lOt7KgQflXY5kBMd3MGZg=="], + + "@vitejs/plugin-react": ["@vitejs/plugin-react@4.6.0", "", { "dependencies": { "@babel/core": "^7.27.4", "@babel/plugin-transform-react-jsx-self": "^7.27.1", "@babel/plugin-transform-react-jsx-source": "^7.27.1", "@rolldown/pluginutils": "1.0.0-beta.19", "@types/babel__core": "^7.20.5", "react-refresh": "^0.17.0" }, "peerDependencies": { "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0-beta.0" } }, "sha512-5Kgff+m8e2PB+9j51eGHEpn5kUzRKH2Ry0qGoe8ItJg7pqnkPrYPkDQZGgGmTa0EGarHrkjLvOdU3b1fzI8otQ=="], + + "acorn": ["acorn@8.15.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg=="], + + "acorn-jsx": ["acorn-jsx@5.3.2", "", { "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ=="], + + "ansi-regex": ["ansi-regex@6.1.0", "", {}, "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA=="], + + "ansi-styles": ["ansi-styles@6.2.1", "", {}, "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug=="], + + "aria-hidden": ["aria-hidden@1.2.6", "", { "dependencies": { "tslib": "^2.0.0" } }, "sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA=="], + + "astring": ["astring@1.9.0", "", { "bin": { "astring": "bin/astring" } }, "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg=="], + + "autoprefixer": ["autoprefixer@10.4.21", "", { "dependencies": { "browserslist": "^4.24.4", "caniuse-lite": "^1.0.30001702", "fraction.js": "^4.3.7", "normalize-range": "^0.1.2", "picocolors": "^1.1.1", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.1.0" }, "bin": { "autoprefixer": "bin/autoprefixer" } }, "sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ=="], + + "bail": ["bail@2.0.2", "", {}, "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw=="], + + "balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="], + + "base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="], + + "bcp-47-match": ["bcp-47-match@2.0.3", "", {}, "sha512-JtTezzbAibu8G0R9op9zb3vcWZd9JF6M0xOYGPn0fNCd7wOpRB1mU2mH9T8gaBGbAAyIIVgB2G7xG0GP98zMAQ=="], + + "bl": ["bl@5.1.0", "", { "dependencies": { "buffer": "^6.0.3", "inherits": "^2.0.4", "readable-stream": "^3.4.0" } }, "sha512-tv1ZJHLfTDnXE6tMHv73YgSJaWR2AFuPwMntBe7XL/GBFHnT0CLnsHMogfk5+GzCDC5ZWarSCYaIGATZt9dNsQ=="], + + "boolbase": ["boolbase@1.0.0", "", {}, "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="], + + "brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], + + "braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="], + + "browserslist": ["browserslist@4.25.1", "", { "dependencies": { "caniuse-lite": "^1.0.30001726", "electron-to-chromium": "^1.5.173", "node-releases": "^2.0.19", "update-browserslist-db": "^1.1.3" }, "bin": { "browserslist": "cli.js" } }, "sha512-KGj0KoOMXLpSNkkEI6Z6mShmQy0bc1I+T7K9N81k4WWMrfz+6fQ6es80B/YLAeRoKvjYE1YSHHOW1qe9xIVzHw=="], + + "buffer": ["buffer@6.0.3", "", { "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.2.1" } }, "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA=="], + + "bytes": ["bytes@3.1.2", "", {}, "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="], + + "cac": ["cac@6.7.14", "", {}, "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ=="], + + "caniuse-lite": ["caniuse-lite@1.0.30001727", "", {}, "sha512-pB68nIHmbN6L/4C6MH1DokyR3bYqFwjaSs/sWDHGj4CTcFtQUQMuJftVwWkXq7mNWOybD3KhUv3oWHoGxgP14Q=="], + + "ccount": ["ccount@2.0.1", "", {}, "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg=="], + + "chalk": ["chalk@5.4.1", "", {}, "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w=="], + + "character-entities": ["character-entities@2.0.2", "", {}, "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ=="], + + "character-entities-html4": ["character-entities-html4@2.1.0", "", {}, "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA=="], + + "character-entities-legacy": ["character-entities-legacy@3.0.0", "", {}, "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ=="], + + "character-reference-invalid": ["character-reference-invalid@2.0.1", "", {}, "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw=="], + + "chevrotain": ["chevrotain@11.0.3", "", { "dependencies": { "@chevrotain/cst-dts-gen": "11.0.3", "@chevrotain/gast": "11.0.3", "@chevrotain/regexp-to-ast": "11.0.3", "@chevrotain/types": "11.0.3", "@chevrotain/utils": "11.0.3", "lodash-es": "4.17.21" } }, "sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw=="], + + "chevrotain-allstar": ["chevrotain-allstar@0.3.1", "", { "dependencies": { "lodash-es": "^4.17.21" }, "peerDependencies": { "chevrotain": "^11.0.0" } }, "sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw=="], + + "chroma-js": ["chroma-js@3.1.2", "", {}, "sha512-IJnETTalXbsLx1eKEgx19d5L6SRM7cH4vINw/99p/M11HCuXGRWL+6YmCm7FWFGIo6dtWuQoQi1dc5yQ7ESIHg=="], + + "cli-cursor": ["cli-cursor@4.0.0", "", { "dependencies": { "restore-cursor": "^4.0.0" } }, "sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg=="], + + "cli-spinners": ["cli-spinners@2.9.2", "", {}, "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg=="], + + "clsx": ["clsx@2.1.1", "", {}, "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA=="], + + "collapse-white-space": ["collapse-white-space@2.1.0", "", {}, "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw=="], + + "color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], + + "color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], + + "comma-separated-tokens": ["comma-separated-tokens@2.0.3", "", {}, "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg=="], + + "commander": ["commander@8.3.0", "", {}, "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww=="], + + "compressible": ["compressible@2.0.18", "", { "dependencies": { "mime-db": ">= 1.43.0 < 2" } }, "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg=="], + + "compression": ["compression@1.8.0", "", { "dependencies": { "bytes": "3.1.2", "compressible": "~2.0.18", "debug": "2.6.9", "negotiator": "~0.6.4", "on-headers": "~1.0.2", "safe-buffer": "5.2.1", "vary": "~1.1.2" } }, "sha512-k6WLKfunuqCYD3t6AsuPGvQWaKwuLLh2/xHNcX4qE+vIfDNXpSqnrhwA7O53R7WVQUnt8dVAIW+YHr7xTgOgGA=="], + + "confbox": ["confbox@0.1.8", "", {}, "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w=="], + + "convert-source-map": ["convert-source-map@2.0.0", "", {}, "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg=="], + + "cookie": ["cookie@1.0.2", "", {}, "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA=="], + + "cose-base": ["cose-base@1.0.3", "", { "dependencies": { "layout-base": "^1.0.0" } }, "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg=="], + + "create-vocs": ["create-vocs@1.0.0", "", { "dependencies": { "@clack/prompts": "^0.7.0", "cac": "^6.7.14", "detect-package-manager": "^3.0.2", "fs-extra": "^11.3.0", "picocolors": "^1.1.1" }, "bin": { "create-vocs": "_lib/bin.js" } }, "sha512-Lv1Bd3WZEgwG4nrogkM54m8viW+TWPlGivLyEi7aNb3cuKPsEfMDZ/kTbo87fzOGtsZ2yh7scO54ZmVhhgBgTw=="], + + "cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="], + + "css-selector-parser": ["css-selector-parser@3.1.3", "", {}, "sha512-gJMigczVZqYAk0hPVzx/M4Hm1D9QOtqkdQk9005TNzDIUGzo5cnHEDiKUT7jGPximL/oYb+LIitcHFQ4aKupxg=="], + + "css-what": ["css-what@6.2.2", "", {}, "sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA=="], + + "cssesc": ["cssesc@3.0.0", "", { "bin": { "cssesc": "bin/cssesc" } }, "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg=="], + + "csstype": ["csstype@3.1.3", "", {}, "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="], + + "cytoscape": ["cytoscape@3.32.1", "", {}, "sha512-dbeqFTLYEwlFg7UGtcZhCCG/2WayX72zK3Sq323CEX29CY81tYfVhw1MIdduCtpstB0cTOhJswWlM/OEB3Xp+Q=="], + + "cytoscape-cose-bilkent": ["cytoscape-cose-bilkent@4.1.0", "", { "dependencies": { "cose-base": "^1.0.0" }, "peerDependencies": { "cytoscape": "^3.2.0" } }, "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ=="], + + "cytoscape-fcose": ["cytoscape-fcose@2.2.0", "", { "dependencies": { "cose-base": "^2.2.0" }, "peerDependencies": { "cytoscape": "^3.2.0" } }, "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ=="], + + "d3": ["d3@7.9.0", "", { "dependencies": { "d3-array": "3", "d3-axis": "3", "d3-brush": "3", "d3-chord": "3", "d3-color": "3", "d3-contour": "4", "d3-delaunay": "6", "d3-dispatch": "3", "d3-drag": "3", "d3-dsv": "3", "d3-ease": "3", "d3-fetch": "3", "d3-force": "3", "d3-format": "3", "d3-geo": "3", "d3-hierarchy": "3", "d3-interpolate": "3", "d3-path": "3", "d3-polygon": "3", "d3-quadtree": "3", "d3-random": "3", "d3-scale": "4", "d3-scale-chromatic": "3", "d3-selection": "3", "d3-shape": "3", "d3-time": "3", "d3-time-format": "4", "d3-timer": "3", "d3-transition": "3", "d3-zoom": "3" } }, "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA=="], + + "d3-array": ["d3-array@3.2.4", "", { "dependencies": { "internmap": "1 - 2" } }, "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg=="], + + "d3-axis": ["d3-axis@3.0.0", "", {}, "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw=="], + + "d3-brush": ["d3-brush@3.0.0", "", { "dependencies": { "d3-dispatch": "1 - 3", "d3-drag": "2 - 3", "d3-interpolate": "1 - 3", "d3-selection": "3", "d3-transition": "3" } }, "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ=="], + + "d3-chord": ["d3-chord@3.0.1", "", { "dependencies": { "d3-path": "1 - 3" } }, "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g=="], + + "d3-color": ["d3-color@3.1.0", "", {}, "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA=="], + + "d3-contour": ["d3-contour@4.0.2", "", { "dependencies": { "d3-array": "^3.2.0" } }, "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA=="], + + "d3-delaunay": ["d3-delaunay@6.0.4", "", { "dependencies": { "delaunator": "5" } }, "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A=="], + + "d3-dispatch": ["d3-dispatch@3.0.1", "", {}, "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg=="], + + "d3-drag": ["d3-drag@3.0.0", "", { "dependencies": { "d3-dispatch": "1 - 3", "d3-selection": "3" } }, "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg=="], + + "d3-dsv": ["d3-dsv@3.0.1", "", { "dependencies": { "commander": "7", "iconv-lite": "0.6", "rw": "1" }, "bin": { "csv2json": "bin/dsv2json.js", "csv2tsv": "bin/dsv2dsv.js", "dsv2dsv": "bin/dsv2dsv.js", "dsv2json": "bin/dsv2json.js", "json2csv": "bin/json2dsv.js", "json2dsv": "bin/json2dsv.js", "json2tsv": "bin/json2dsv.js", "tsv2csv": "bin/dsv2dsv.js", "tsv2json": "bin/dsv2json.js" } }, "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q=="], + + "d3-ease": ["d3-ease@3.0.1", "", {}, "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w=="], + + "d3-fetch": ["d3-fetch@3.0.1", "", { "dependencies": { "d3-dsv": "1 - 3" } }, "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw=="], + + "d3-force": ["d3-force@3.0.0", "", { "dependencies": { "d3-dispatch": "1 - 3", "d3-quadtree": "1 - 3", "d3-timer": "1 - 3" } }, "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg=="], + + "d3-format": ["d3-format@3.1.0", "", {}, "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA=="], + + "d3-geo": ["d3-geo@3.1.1", "", { "dependencies": { "d3-array": "2.5.0 - 3" } }, "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q=="], + + "d3-hierarchy": ["d3-hierarchy@3.1.2", "", {}, "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA=="], + + "d3-interpolate": ["d3-interpolate@3.0.1", "", { "dependencies": { "d3-color": "1 - 3" } }, "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g=="], + + "d3-path": ["d3-path@3.1.0", "", {}, "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ=="], + + "d3-polygon": ["d3-polygon@3.0.1", "", {}, "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg=="], + + "d3-quadtree": ["d3-quadtree@3.0.1", "", {}, "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw=="], + + "d3-random": ["d3-random@3.0.1", "", {}, "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ=="], + + "d3-sankey": ["d3-sankey@0.12.3", "", { "dependencies": { "d3-array": "1 - 2", "d3-shape": "^1.2.0" } }, "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ=="], + + "d3-scale": ["d3-scale@4.0.2", "", { "dependencies": { "d3-array": "2.10.0 - 3", "d3-format": "1 - 3", "d3-interpolate": "1.2.0 - 3", "d3-time": "2.1.1 - 3", "d3-time-format": "2 - 4" } }, "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ=="], + + "d3-scale-chromatic": ["d3-scale-chromatic@3.1.0", "", { "dependencies": { "d3-color": "1 - 3", "d3-interpolate": "1 - 3" } }, "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ=="], + + "d3-selection": ["d3-selection@3.0.0", "", {}, "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ=="], + + "d3-shape": ["d3-shape@3.2.0", "", { "dependencies": { "d3-path": "^3.1.0" } }, "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA=="], + + "d3-time": ["d3-time@3.1.0", "", { "dependencies": { "d3-array": "2 - 3" } }, "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q=="], + + "d3-time-format": ["d3-time-format@4.1.0", "", { "dependencies": { "d3-time": "1 - 3" } }, "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg=="], + + "d3-timer": ["d3-timer@3.0.1", "", {}, "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA=="], + + "d3-transition": ["d3-transition@3.0.1", "", { "dependencies": { "d3-color": "1 - 3", "d3-dispatch": "1 - 3", "d3-ease": "1 - 3", "d3-interpolate": "1 - 3", "d3-timer": "1 - 3" }, "peerDependencies": { "d3-selection": "2 - 3" } }, "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w=="], + + "d3-zoom": ["d3-zoom@3.0.0", "", { "dependencies": { "d3-dispatch": "1 - 3", "d3-drag": "2 - 3", "d3-interpolate": "1 - 3", "d3-selection": "2 - 3", "d3-transition": "2 - 3" } }, "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw=="], + + "dagre-d3-es": ["dagre-d3-es@7.0.11", "", { "dependencies": { "d3": "^7.9.0", "lodash-es": "^4.17.21" } }, "sha512-tvlJLyQf834SylNKax8Wkzco/1ias1OPw8DcUMDE7oUIoSEW25riQVuiu/0OWEFqT0cxHT3Pa9/D82Jr47IONw=="], + + "dayjs": ["dayjs@1.11.13", "", {}, "sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg=="], + + "debug": ["debug@2.6.9", "", { "dependencies": { "ms": "2.0.0" } }, "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA=="], + + "decode-named-character-reference": ["decode-named-character-reference@1.2.0", "", { "dependencies": { "character-entities": "^2.0.0" } }, "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q=="], + + "dedent": ["dedent@1.6.0", "", { "peerDependencies": { "babel-plugin-macros": "^3.1.0" }, "optionalPeers": ["babel-plugin-macros"] }, "sha512-F1Z+5UCFpmQUzJa11agbyPVMbpgT/qA3/SKyJ1jyBgm7dUcUEa8v9JwDkerSQXfakBwFljIxhOJqGkjUwZ9FSA=="], + + "deep-object-diff": ["deep-object-diff@1.1.9", "", {}, "sha512-Rn+RuwkmkDwCi2/oXOFS9Gsr5lJZu/yTGpK7wAaAIE75CC+LCGEZHpY6VQJa/RoJcrmaA/docWJZvYohlNkWPA=="], + + "deepmerge": ["deepmerge@4.3.1", "", {}, "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A=="], + + "delaunator": ["delaunator@5.0.1", "", { "dependencies": { "robust-predicates": "^3.0.2" } }, "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw=="], + + "depd": ["depd@2.0.0", "", {}, "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="], + + "dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="], + + "destroy": ["destroy@1.2.0", "", {}, "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg=="], + + "detect-libc": ["detect-libc@2.0.4", "", {}, "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA=="], + + "detect-node-es": ["detect-node-es@1.1.0", "", {}, "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ=="], + + "detect-package-manager": ["detect-package-manager@3.0.2", "", { "dependencies": { "execa": "^5.1.1" } }, "sha512-8JFjJHutStYrfWwzfretQoyNGoZVW1Fsrp4JO9spa7h/fBfwgTMEIy4/LBzRDGsxwVPHU0q+T9YvwLDJoOApLQ=="], + + "devlop": ["devlop@1.1.0", "", { "dependencies": { "dequal": "^2.0.0" } }, "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA=="], + + "direction": ["direction@2.0.1", "", { "bin": { "direction": "cli.js" } }, "sha512-9S6m9Sukh1cZNknO1CWAr2QAWsbKLafQiyM5gZ7VgXHeuaoUwffKN4q6NC4A/Mf9iiPlOXQEKW/Mv/mh9/3YFA=="], + + "dompurify": ["dompurify@3.2.6", "", { "optionalDependencies": { "@types/trusted-types": "^2.0.7" } }, "sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ=="], + + "eastasianwidth": ["eastasianwidth@0.2.0", "", {}, "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="], + + "ee-first": ["ee-first@1.1.1", "", {}, "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="], + + "electron-to-chromium": ["electron-to-chromium@1.5.186", "", {}, "sha512-lur7L4BFklgepaJxj4DqPk7vKbTEl0pajNlg2QjE5shefmlmBLm2HvQ7PMf1R/GvlevT/581cop33/quQcfX3A=="], + + "emoji-regex": ["emoji-regex@10.4.0", "", {}, "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw=="], + + "emoji-regex-xs": ["emoji-regex-xs@1.0.0", "", {}, "sha512-LRlerrMYoIDrT6jgpeZ2YYl/L8EulRTt5hQcYjy5AInh7HWXKimpqx68aknBFpGL2+/IcogTcaydJEgaTmOpDg=="], + + "encodeurl": ["encodeurl@2.0.0", "", {}, "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg=="], + + "enhanced-resolve": ["enhanced-resolve@5.18.2", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" } }, "sha512-6Jw4sE1maoRJo3q8MsSIn2onJFbLTOjY9hlx4DZXmOKvLRd1Ok2kXmAGXaafL2+ijsJZ1ClYbl/pmqr9+k4iUQ=="], + + "entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="], + + "es-module-lexer": ["es-module-lexer@1.7.0", "", {}, "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA=="], + + "esast-util-from-estree": ["esast-util-from-estree@2.0.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "devlop": "^1.0.0", "estree-util-visit": "^2.0.0", "unist-util-position-from-estree": "^2.0.0" } }, "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ=="], + + "esast-util-from-js": ["esast-util-from-js@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "acorn": "^8.0.0", "esast-util-from-estree": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw=="], + + "esbuild": ["esbuild@0.25.6", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.25.6", "@esbuild/android-arm": "0.25.6", "@esbuild/android-arm64": "0.25.6", "@esbuild/android-x64": "0.25.6", "@esbuild/darwin-arm64": "0.25.6", "@esbuild/darwin-x64": "0.25.6", "@esbuild/freebsd-arm64": "0.25.6", "@esbuild/freebsd-x64": "0.25.6", "@esbuild/linux-arm": "0.25.6", "@esbuild/linux-arm64": "0.25.6", "@esbuild/linux-ia32": "0.25.6", "@esbuild/linux-loong64": "0.25.6", "@esbuild/linux-mips64el": "0.25.6", "@esbuild/linux-ppc64": "0.25.6", "@esbuild/linux-riscv64": "0.25.6", "@esbuild/linux-s390x": "0.25.6", "@esbuild/linux-x64": "0.25.6", "@esbuild/netbsd-arm64": "0.25.6", "@esbuild/netbsd-x64": "0.25.6", "@esbuild/openbsd-arm64": "0.25.6", "@esbuild/openbsd-x64": "0.25.6", "@esbuild/openharmony-arm64": "0.25.6", "@esbuild/sunos-x64": "0.25.6", "@esbuild/win32-arm64": "0.25.6", "@esbuild/win32-ia32": "0.25.6", "@esbuild/win32-x64": "0.25.6" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-GVuzuUwtdsghE3ocJ9Bs8PNoF13HNQ5TXbEi2AhvVb8xU1Iwt9Fos9FEamfoee+u/TOsn7GUWc04lz46n2bbTg=="], + + "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], + + "escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="], + + "escape-string-regexp": ["escape-string-regexp@5.0.0", "", {}, "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="], + + "estree-util-attach-comments": ["estree-util-attach-comments@3.0.0", "", { "dependencies": { "@types/estree": "^1.0.0" } }, "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw=="], + + "estree-util-build-jsx": ["estree-util-build-jsx@3.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "estree-walker": "^3.0.0" } }, "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ=="], + + "estree-util-is-identifier-name": ["estree-util-is-identifier-name@3.0.0", "", {}, "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg=="], + + "estree-util-scope": ["estree-util-scope@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0" } }, "sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ=="], + + "estree-util-to-js": ["estree-util-to-js@2.0.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "astring": "^1.8.0", "source-map": "^0.7.0" } }, "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg=="], + + "estree-util-value-to-estree": ["estree-util-value-to-estree@3.4.0", "", { "dependencies": { "@types/estree": "^1.0.0" } }, "sha512-Zlp+gxis+gCfK12d3Srl2PdX2ybsEA8ZYy6vQGVQTNNYLEGRQQ56XB64bjemN8kxIKXP1nC9ip4Z+ILy9LGzvQ=="], + + "estree-util-visit": ["estree-util-visit@2.0.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/unist": "^3.0.0" } }, "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww=="], + + "estree-walker": ["estree-walker@3.0.3", "", { "dependencies": { "@types/estree": "^1.0.0" } }, "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g=="], + + "etag": ["etag@1.8.1", "", {}, "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="], + + "eval": ["eval@0.1.8", "", { "dependencies": { "@types/node": "*", "require-like": ">= 0.1.1" } }, "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw=="], + + "execa": ["execa@5.1.1", "", { "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.0", "human-signals": "^2.1.0", "is-stream": "^2.0.0", "merge-stream": "^2.0.0", "npm-run-path": "^4.0.1", "onetime": "^5.1.2", "signal-exit": "^3.0.3", "strip-final-newline": "^2.0.0" } }, "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg=="], + + "exsolve": ["exsolve@1.0.7", "", {}, "sha512-VO5fQUzZtI6C+vx4w/4BWJpg3s/5l+6pRQEHzFRM8WFi4XffSP1Z+4qi7GbjWbvRQEbdIco5mIMq+zX4rPuLrw=="], + + "extend": ["extend@3.0.2", "", {}, "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="], + + "fast-glob": ["fast-glob@3.3.3", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.8" } }, "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg=="], + + "fastq": ["fastq@1.19.1", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ=="], + + "fault": ["fault@2.0.1", "", { "dependencies": { "format": "^0.2.0" } }, "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ=="], + + "fdir": ["fdir@6.4.6", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-hiFoqpyZcfNm1yc4u8oWCf9A2c4D3QjCrks3zmoVKVxpQRzmPNar1hUJcBG2RQHvEVGDN+Jm81ZheVLAQMK6+w=="], + + "fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="], + + "find-up": ["find-up@5.0.0", "", { "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" } }, "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng=="], + + "foreground-child": ["foreground-child@3.3.1", "", { "dependencies": { "cross-spawn": "^7.0.6", "signal-exit": "^4.0.1" } }, "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw=="], + + "format": ["format@0.2.2", "", {}, "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww=="], + + "fraction.js": ["fraction.js@4.3.7", "", {}, "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew=="], + + "fresh": ["fresh@0.5.2", "", {}, "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q=="], + + "fs-extra": ["fs-extra@11.3.0", "", { "dependencies": { "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", "universalify": "^2.0.0" } }, "sha512-Z4XaCL6dUDHfP/jT25jJKMmtxvuwbkrD1vNSMFlo9lNLY2c5FHYSQgHPRZUjAB26TpDEoW9HCOgplrdbaPV/ew=="], + + "fsevents": ["fsevents@2.3.2", "", { "os": "darwin" }, "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA=="], + + "gensync": ["gensync@1.0.0-beta.2", "", {}, "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg=="], + + "get-nonce": ["get-nonce@1.0.1", "", {}, "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q=="], + + "get-stream": ["get-stream@6.0.1", "", {}, "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg=="], + + "github-slugger": ["github-slugger@2.0.0", "", {}, "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw=="], + + "glob": ["glob@11.0.3", "", { "dependencies": { "foreground-child": "^3.3.1", "jackspeak": "^4.1.1", "minimatch": "^10.0.3", "minipass": "^7.1.2", "package-json-from-dist": "^1.0.0", "path-scurry": "^2.0.0" }, "bin": { "glob": "dist/esm/bin.mjs" } }, "sha512-2Nim7dha1KVkaiF4q6Dj+ngPPMdfvLJEOpZk/jKiUAkqKebpGAWQXAq9z1xu9HKu5lWfqw/FASuccEjyznjPaA=="], + + "glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], + + "globals": ["globals@15.15.0", "", {}, "sha512-7ACyT3wmyp3I61S4fG682L0VA2RGD9otkqGJIwNUMF1SWUombIIk+af1unuDYgMm082aHYwD+mzJvv9Iu8dsgg=="], + + "globby": ["globby@14.1.0", "", { "dependencies": { "@sindresorhus/merge-streams": "^2.1.0", "fast-glob": "^3.3.3", "ignore": "^7.0.3", "path-type": "^6.0.0", "slash": "^5.1.0", "unicorn-magic": "^0.3.0" } }, "sha512-0Ia46fDOaT7k4og1PDW4YbodWWr3scS2vAr2lTbsplOt2WkKp0vQbkI9wKis/T5LV/dqPjO3bpS/z6GTJB82LA=="], + + "graceful-fs": ["graceful-fs@4.2.11", "", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="], + + "hachure-fill": ["hachure-fill@0.5.2", "", {}, "sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg=="], + + "hast-util-classnames": ["hast-util-classnames@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-tI3JjoGDEBVorMAWK4jNRsfLMYmih1BUOG3VV36pH36njs1IEl7xkNrVTD2mD2yYHmQCa5R/fj61a8IAF4bRaQ=="], + + "hast-util-from-dom": ["hast-util-from-dom@5.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "hastscript": "^9.0.0", "web-namespaces": "^2.0.0" } }, "sha512-N+LqofjR2zuzTjCPzyDUdSshy4Ma6li7p/c3pA78uTwzFgENbgbUrm2ugwsOdcjI1muO+o6Dgzp9p8WHtn/39Q=="], + + "hast-util-from-html": ["hast-util-from-html@2.0.3", "", { "dependencies": { "@types/hast": "^3.0.0", "devlop": "^1.1.0", "hast-util-from-parse5": "^8.0.0", "parse5": "^7.0.0", "vfile": "^6.0.0", "vfile-message": "^4.0.0" } }, "sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw=="], + + "hast-util-from-html-isomorphic": ["hast-util-from-html-isomorphic@2.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-from-dom": "^5.0.0", "hast-util-from-html": "^2.0.0", "unist-util-remove-position": "^5.0.0" } }, "sha512-zJfpXq44yff2hmE0XmwEOzdWin5xwH+QIhMLOScpX91e/NSGPsAzNCvLQDIEPyO2TXi+lBmU6hjLIhV8MwP2kw=="], + + "hast-util-from-parse5": ["hast-util-from-parse5@8.0.3", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "devlop": "^1.0.0", "hastscript": "^9.0.0", "property-information": "^7.0.0", "vfile": "^6.0.0", "vfile-location": "^5.0.0", "web-namespaces": "^2.0.0" } }, "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg=="], + + "hast-util-has-property": ["hast-util-has-property@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-MNilsvEKLFpV604hwfhVStK0usFY/QmM5zX16bo7EjnAEGofr5YyI37kzopBlZJkHD4t887i+q/C8/tr5Q94cA=="], + + "hast-util-heading-rank": ["hast-util-heading-rank@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-EJKb8oMUXVHcWZTDepnr+WNbfnXKFNf9duMesmr4S8SXTJBJ9M4Yok08pu9vxdJwdlGRhVumk9mEhkEvKGifwA=="], + + "hast-util-is-element": ["hast-util-is-element@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g=="], + + "hast-util-parse-selector": ["hast-util-parse-selector@4.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A=="], + + "hast-util-select": ["hast-util-select@6.0.4", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "bcp-47-match": "^2.0.0", "comma-separated-tokens": "^2.0.0", "css-selector-parser": "^3.0.0", "devlop": "^1.0.0", "direction": "^2.0.0", "hast-util-has-property": "^3.0.0", "hast-util-to-string": "^3.0.0", "hast-util-whitespace": "^3.0.0", "nth-check": "^2.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "unist-util-visit": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-RqGS1ZgI0MwxLaKLDxjprynNzINEkRHY2i8ln4DDjgv9ZhcYVIHN9rlpiYsqtFwrgpYU361SyWDQcGNIBVu3lw=="], + + "hast-util-to-estree": ["hast-util-to-estree@3.1.3", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "devlop": "^1.0.0", "estree-util-attach-comments": "^3.0.0", "estree-util-is-identifier-name": "^3.0.0", "hast-util-whitespace": "^3.0.0", "mdast-util-mdx-expression": "^2.0.0", "mdast-util-mdx-jsx": "^3.0.0", "mdast-util-mdxjs-esm": "^2.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "style-to-js": "^1.0.0", "unist-util-position": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w=="], + + "hast-util-to-html": ["hast-util-to-html@9.0.5", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-whitespace": "^3.0.0", "html-void-elements": "^3.0.0", "mdast-util-to-hast": "^13.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "stringify-entities": "^4.0.0", "zwitch": "^2.0.4" } }, "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw=="], + + "hast-util-to-jsx-runtime": ["hast-util-to-jsx-runtime@2.3.6", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "comma-separated-tokens": "^2.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "hast-util-whitespace": "^3.0.0", "mdast-util-mdx-expression": "^2.0.0", "mdast-util-mdx-jsx": "^3.0.0", "mdast-util-mdxjs-esm": "^2.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "style-to-js": "^1.0.0", "unist-util-position": "^5.0.0", "vfile-message": "^4.0.0" } }, "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg=="], + + "hast-util-to-string": ["hast-util-to-string@3.0.1", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-XelQVTDWvqcl3axRfI0xSeoVKzyIFPwsAGSLIsKdJKQMXDYJS4WYrBNF/8J7RdhIcFI2BOHgAifggsvsxp/3+A=="], + + "hast-util-to-text": ["hast-util-to-text@4.0.2", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "hast-util-is-element": "^3.0.0", "unist-util-find-after": "^5.0.0" } }, "sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A=="], + + "hast-util-whitespace": ["hast-util-whitespace@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw=="], + + "hastscript": ["hastscript@8.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-parse-selector": "^4.0.0", "property-information": "^6.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw=="], + + "hono": ["hono@4.8.5", "", {}, "sha512-Up2cQbtNz1s111qpnnECdTGqSIUIhZJMLikdKkshebQSEBcoUKq6XJayLGqSZWidiH0zfHRCJqFu062Mz5UuRA=="], + + "html-void-elements": ["html-void-elements@3.0.0", "", {}, "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg=="], + + "http-errors": ["http-errors@2.0.0", "", { "dependencies": { "depd": "2.0.0", "inherits": "2.0.4", "setprototypeof": "1.2.0", "statuses": "2.0.1", "toidentifier": "1.0.1" } }, "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ=="], + + "human-signals": ["human-signals@2.1.0", "", {}, "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw=="], + + "iconv-lite": ["iconv-lite@0.6.3", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw=="], + + "ieee754": ["ieee754@1.2.1", "", {}, "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA=="], + + "ignore": ["ignore@7.0.5", "", {}, "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg=="], + + "inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="], + + "inline-style-parser": ["inline-style-parser@0.2.4", "", {}, "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q=="], + + "internmap": ["internmap@1.0.1", "", {}, "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw=="], + + "is-alphabetical": ["is-alphabetical@2.0.1", "", {}, "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ=="], + + "is-alphanumerical": ["is-alphanumerical@2.0.1", "", { "dependencies": { "is-alphabetical": "^2.0.0", "is-decimal": "^2.0.0" } }, "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw=="], + + "is-decimal": ["is-decimal@2.0.1", "", {}, "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A=="], + + "is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="], + + "is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], + + "is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="], + + "is-hexadecimal": ["is-hexadecimal@2.0.1", "", {}, "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg=="], + + "is-interactive": ["is-interactive@2.0.0", "", {}, "sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ=="], + + "is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="], + + "is-plain-obj": ["is-plain-obj@4.1.0", "", {}, "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg=="], + + "is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="], + + "is-unicode-supported": ["is-unicode-supported@1.3.0", "", {}, "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ=="], + + "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], + + "jackspeak": ["jackspeak@4.1.1", "", { "dependencies": { "@isaacs/cliui": "^8.0.2" } }, "sha512-zptv57P3GpL+O0I7VdMJNBZCu+BPHVQUk55Ft8/QCJjTVxrnJHuVuX/0Bl2A6/+2oyR/ZMEuFKwmzqqZ/U5nPQ=="], + + "javascript-stringify": ["javascript-stringify@2.1.0", "", {}, "sha512-JVAfqNPTvNq3sB/VHQJAFxN/sPgKnsKrCwyRt15zwNCdrMMJDdcEOdubuy+DuJYYdm0ox1J4uzEuYKkN+9yhVg=="], + + "jiti": ["jiti@2.4.2", "", { "bin": { "jiti": "lib/jiti-cli.mjs" } }, "sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A=="], + + "js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="], + + "jsesc": ["jsesc@3.1.0", "", { "bin": { "jsesc": "bin/jsesc" } }, "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA=="], + + "json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="], + + "jsonfile": ["jsonfile@6.1.0", "", { "dependencies": { "universalify": "^2.0.0" }, "optionalDependencies": { "graceful-fs": "^4.1.6" } }, "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ=="], + + "katex": ["katex@0.16.22", "", { "dependencies": { "commander": "^8.3.0" }, "bin": { "katex": "cli.js" } }, "sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg=="], + + "khroma": ["khroma@2.1.0", "", {}, "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw=="], + + "kolorist": ["kolorist@1.8.0", "", {}, "sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ=="], + + "langium": ["langium@3.3.1", "", { "dependencies": { "chevrotain": "~11.0.3", "chevrotain-allstar": "~0.3.0", "vscode-languageserver": "~9.0.1", "vscode-languageserver-textdocument": "~1.0.11", "vscode-uri": "~3.0.8" } }, "sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w=="], + + "layout-base": ["layout-base@1.0.2", "", {}, "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg=="], + + "lightningcss": ["lightningcss@1.30.1", "", { "dependencies": { "detect-libc": "^2.0.3" }, "optionalDependencies": { "lightningcss-darwin-arm64": "1.30.1", "lightningcss-darwin-x64": "1.30.1", "lightningcss-freebsd-x64": "1.30.1", "lightningcss-linux-arm-gnueabihf": "1.30.1", "lightningcss-linux-arm64-gnu": "1.30.1", "lightningcss-linux-arm64-musl": "1.30.1", "lightningcss-linux-x64-gnu": "1.30.1", "lightningcss-linux-x64-musl": "1.30.1", "lightningcss-win32-arm64-msvc": "1.30.1", "lightningcss-win32-x64-msvc": "1.30.1" } }, "sha512-xi6IyHML+c9+Q3W0S4fCQJOym42pyurFiJUHEcEyHS0CeKzia4yZDEsLlqOFykxOdHpNy0NmvVO31vcSqAxJCg=="], + + "lightningcss-darwin-arm64": ["lightningcss-darwin-arm64@1.30.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-c8JK7hyE65X1MHMN+Viq9n11RRC7hgin3HhYKhrMyaXflk5GVplZ60IxyoVtzILeKr+xAJwg6zK6sjTBJ0FKYQ=="], + + "lightningcss-darwin-x64": ["lightningcss-darwin-x64@1.30.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-k1EvjakfumAQoTfcXUcHQZhSpLlkAuEkdMBsI/ivWw9hL+7FtilQc0Cy3hrx0AAQrVtQAbMI7YjCgYgvn37PzA=="], + + "lightningcss-freebsd-x64": ["lightningcss-freebsd-x64@1.30.1", "", { "os": "freebsd", "cpu": "x64" }, "sha512-kmW6UGCGg2PcyUE59K5r0kWfKPAVy4SltVeut+umLCFoJ53RdCUWxcRDzO1eTaxf/7Q2H7LTquFHPL5R+Gjyig=="], + + "lightningcss-linux-arm-gnueabihf": ["lightningcss-linux-arm-gnueabihf@1.30.1", "", { "os": "linux", "cpu": "arm" }, "sha512-MjxUShl1v8pit+6D/zSPq9S9dQ2NPFSQwGvxBCYaBYLPlCWuPh9/t1MRS8iUaR8i+a6w7aps+B4N0S1TYP/R+Q=="], + + "lightningcss-linux-arm64-gnu": ["lightningcss-linux-arm64-gnu@1.30.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-gB72maP8rmrKsnKYy8XUuXi/4OctJiuQjcuqWNlJQ6jZiWqtPvqFziskH3hnajfvKB27ynbVCucKSm2rkQp4Bw=="], + + "lightningcss-linux-arm64-musl": ["lightningcss-linux-arm64-musl@1.30.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-jmUQVx4331m6LIX+0wUhBbmMX7TCfjF5FoOH6SD1CttzuYlGNVpA7QnrmLxrsub43ClTINfGSYyHe2HWeLl5CQ=="], + + "lightningcss-linux-x64-gnu": ["lightningcss-linux-x64-gnu@1.30.1", "", { "os": "linux", "cpu": "x64" }, "sha512-piWx3z4wN8J8z3+O5kO74+yr6ze/dKmPnI7vLqfSqI8bccaTGY5xiSGVIJBDd5K5BHlvVLpUB3S2YCfelyJ1bw=="], + + "lightningcss-linux-x64-musl": ["lightningcss-linux-x64-musl@1.30.1", "", { "os": "linux", "cpu": "x64" }, "sha512-rRomAK7eIkL+tHY0YPxbc5Dra2gXlI63HL+v1Pdi1a3sC+tJTcFrHX+E86sulgAXeI7rSzDYhPSeHHjqFhqfeQ=="], + + "lightningcss-win32-arm64-msvc": ["lightningcss-win32-arm64-msvc@1.30.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-mSL4rqPi4iXq5YVqzSsJgMVFENoa4nGTT/GjO2c0Yl9OuQfPsIfncvLrEW6RbbB24WtZ3xP/2CCmI3tNkNV4oA=="], + + "lightningcss-win32-x64-msvc": ["lightningcss-win32-x64-msvc@1.30.1", "", { "os": "win32", "cpu": "x64" }, "sha512-PVqXh48wh4T53F/1CCu8PIPCxLzWyCnn/9T5W1Jpmdy5h9Cwd+0YQS6/LwhHXSafuc61/xg9Lv5OrCby6a++jg=="], + + "local-pkg": ["local-pkg@1.1.1", "", { "dependencies": { "mlly": "^1.7.4", "pkg-types": "^2.0.1", "quansync": "^0.2.8" } }, "sha512-WunYko2W1NcdfAFpuLUoucsgULmgDBRkdxHxWQ7mK0cQqwPiy8E1enjuRBrhLtZkB5iScJ1XIPdhVEFK8aOLSg=="], + + "locate-path": ["locate-path@6.0.0", "", { "dependencies": { "p-locate": "^5.0.0" } }, "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw=="], + + "lodash-es": ["lodash-es@4.17.21", "", {}, "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw=="], + + "log-symbols": ["log-symbols@5.1.0", "", { "dependencies": { "chalk": "^5.0.0", "is-unicode-supported": "^1.1.0" } }, "sha512-l0x2DvrW294C9uDCoQe1VSU4gf529FkSZ6leBl4TiqZH/e+0R7hSfHQBNut2mNygDgHwvYHfFLn6Oxb3VWj2rA=="], + + "longest-streak": ["longest-streak@3.1.0", "", {}, "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g=="], + + "lru-cache": ["lru-cache@11.1.0", "", {}, "sha512-QIXZUBJUx+2zHUdQujWejBkcD9+cs94tLn0+YL8UrCh+D5sCXZ4c7LaEH48pNwRY3MLDgqUFyhlCyjJPf1WP0A=="], + + "mark.js": ["mark.js@8.11.1", "", {}, "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ=="], + + "markdown-extensions": ["markdown-extensions@2.0.0", "", {}, "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q=="], + + "markdown-table": ["markdown-table@3.0.4", "", {}, "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw=="], + + "marked": ["marked@16.0.0", "", { "bin": { "marked": "bin/marked.js" } }, "sha512-MUKMXDjsD/eptB7GPzxo4xcnLS6oo7/RHimUMHEDRhUooPwmN9BEpMl7AEOJv3bmso169wHI2wUF9VQgL7zfmA=="], + + "mdast-util-directive": ["mdast-util-directive@3.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "parse-entities": "^4.0.0", "stringify-entities": "^4.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-I3fNFt+DHmpWCYAT7quoM6lHf9wuqtI+oCOfvILnoicNIqjh5E3dEJWiXuYME2gNe8vl1iMQwyUHa7bgFmak6Q=="], + + "mdast-util-find-and-replace": ["mdast-util-find-and-replace@3.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "escape-string-regexp": "^5.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg=="], + + "mdast-util-from-markdown": ["mdast-util-from-markdown@2.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "mdast-util-to-string": "^4.0.0", "micromark": "^4.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA=="], + + "mdast-util-frontmatter": ["mdast-util-frontmatter@2.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "escape-string-regexp": "^5.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "micromark-extension-frontmatter": "^2.0.0" } }, "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA=="], + + "mdast-util-gfm": ["mdast-util-gfm@3.1.0", "", { "dependencies": { "mdast-util-from-markdown": "^2.0.0", "mdast-util-gfm-autolink-literal": "^2.0.0", "mdast-util-gfm-footnote": "^2.0.0", "mdast-util-gfm-strikethrough": "^2.0.0", "mdast-util-gfm-table": "^2.0.0", "mdast-util-gfm-task-list-item": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ=="], + + "mdast-util-gfm-autolink-literal": ["mdast-util-gfm-autolink-literal@2.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "ccount": "^2.0.0", "devlop": "^1.0.0", "mdast-util-find-and-replace": "^3.0.0", "micromark-util-character": "^2.0.0" } }, "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ=="], + + "mdast-util-gfm-footnote": ["mdast-util-gfm-footnote@2.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0" } }, "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ=="], + + "mdast-util-gfm-strikethrough": ["mdast-util-gfm-strikethrough@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg=="], + + "mdast-util-gfm-table": ["mdast-util-gfm-table@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "markdown-table": "^3.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg=="], + + "mdast-util-gfm-task-list-item": ["mdast-util-gfm-task-list-item@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ=="], + + "mdast-util-mdx": ["mdast-util-mdx@3.0.0", "", { "dependencies": { "mdast-util-from-markdown": "^2.0.0", "mdast-util-mdx-expression": "^2.0.0", "mdast-util-mdx-jsx": "^3.0.0", "mdast-util-mdxjs-esm": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w=="], + + "mdast-util-mdx-expression": ["mdast-util-mdx-expression@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ=="], + + "mdast-util-mdx-jsx": ["mdast-util-mdx-jsx@3.2.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "parse-entities": "^4.0.0", "stringify-entities": "^4.0.0", "unist-util-stringify-position": "^4.0.0", "vfile-message": "^4.0.0" } }, "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q=="], + + "mdast-util-mdxjs-esm": ["mdast-util-mdxjs-esm@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg=="], + + "mdast-util-phrasing": ["mdast-util-phrasing@4.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "unist-util-is": "^6.0.0" } }, "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w=="], + + "mdast-util-to-hast": ["mdast-util-to-hast@13.2.0", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@ungap/structured-clone": "^1.0.0", "devlop": "^1.0.0", "micromark-util-sanitize-uri": "^2.0.0", "trim-lines": "^3.0.0", "unist-util-position": "^5.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" } }, "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA=="], + + "mdast-util-to-markdown": ["mdast-util-to-markdown@2.1.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "longest-streak": "^3.0.0", "mdast-util-phrasing": "^4.0.0", "mdast-util-to-string": "^4.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "unist-util-visit": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA=="], + + "mdast-util-to-string": ["mdast-util-to-string@4.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0" } }, "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg=="], + + "media-query-parser": ["media-query-parser@2.0.2", "", { "dependencies": { "@babel/runtime": "^7.12.5" } }, "sha512-1N4qp+jE0pL5Xv4uEcwVUhIkwdUO3S/9gML90nqKA7v7FcOS5vUtatfzok9S9U1EJU8dHWlcv95WLnKmmxZI9w=="], + + "merge-stream": ["merge-stream@2.0.0", "", {}, "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w=="], + + "merge2": ["merge2@1.4.1", "", {}, "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="], + + "mermaid": ["mermaid@11.9.0", "", { "dependencies": { "@braintree/sanitize-url": "^7.0.4", "@iconify/utils": "^2.1.33", "@mermaid-js/parser": "^0.6.2", "@types/d3": "^7.4.3", "cytoscape": "^3.29.3", "cytoscape-cose-bilkent": "^4.1.0", "cytoscape-fcose": "^2.2.0", "d3": "^7.9.0", "d3-sankey": "^0.12.3", "dagre-d3-es": "7.0.11", "dayjs": "^1.11.13", "dompurify": "^3.2.5", "katex": "^0.16.22", "khroma": "^2.1.0", "lodash-es": "^4.17.21", "marked": "^16.0.0", "roughjs": "^4.6.6", "stylis": "^4.3.6", "ts-dedent": "^2.2.0", "uuid": "^11.1.0" } }, "sha512-YdPXn9slEwO0omQfQIsW6vS84weVQftIyyTGAZCwM//MGhPzL1+l6vO6bkf0wnP4tHigH1alZ5Ooy3HXI2gOag=="], + + "mermaid-isomorphic": ["mermaid-isomorphic@3.0.4", "", { "dependencies": { "@fortawesome/fontawesome-free": "^6.0.0", "mermaid": "^11.0.0" }, "peerDependencies": { "playwright": "1" }, "optionalPeers": ["playwright"] }, "sha512-XQTy7H1XwHK3DPEHf+ZNWiqUEd9BwX3Xws38R9Fj2gx718srmgjlZoUzHr+Tca+O+dqJOJsAJaKzCoP65QDfDg=="], + + "micromark": ["micromark@4.0.2", "", { "dependencies": { "@types/debug": "^4.0.0", "debug": "^4.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA=="], + + "micromark-core-commonmark": ["micromark-core-commonmark@2.0.3", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-destination": "^2.0.0", "micromark-factory-label": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-factory-title": "^2.0.0", "micromark-factory-whitespace": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-html-tag-name": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg=="], + + "micromark-extension-directive": ["micromark-extension-directive@3.0.2", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-factory-whitespace": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "parse-entities": "^4.0.0" } }, "sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA=="], + + "micromark-extension-frontmatter": ["micromark-extension-frontmatter@2.0.0", "", { "dependencies": { "fault": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg=="], + + "micromark-extension-gfm": ["micromark-extension-gfm@3.0.0", "", { "dependencies": { "micromark-extension-gfm-autolink-literal": "^2.0.0", "micromark-extension-gfm-footnote": "^2.0.0", "micromark-extension-gfm-strikethrough": "^2.0.0", "micromark-extension-gfm-table": "^2.0.0", "micromark-extension-gfm-tagfilter": "^2.0.0", "micromark-extension-gfm-task-list-item": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w=="], + + "micromark-extension-gfm-autolink-literal": ["micromark-extension-gfm-autolink-literal@2.1.0", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw=="], + + "micromark-extension-gfm-footnote": ["micromark-extension-gfm-footnote@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw=="], + + "micromark-extension-gfm-strikethrough": ["micromark-extension-gfm-strikethrough@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw=="], + + "micromark-extension-gfm-table": ["micromark-extension-gfm-table@2.1.1", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg=="], + + "micromark-extension-gfm-tagfilter": ["micromark-extension-gfm-tagfilter@2.0.0", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg=="], + + "micromark-extension-gfm-task-list-item": ["micromark-extension-gfm-task-list-item@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw=="], + + "micromark-extension-mdx-expression": ["micromark-extension-mdx-expression@3.0.1", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-mdx-expression": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q=="], + + "micromark-extension-mdx-jsx": ["micromark-extension-mdx-jsx@3.0.2", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "micromark-factory-mdx-expression": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ=="], + + "micromark-extension-mdx-md": ["micromark-extension-mdx-md@2.0.0", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ=="], + + "micromark-extension-mdxjs": ["micromark-extension-mdxjs@3.0.0", "", { "dependencies": { "acorn": "^8.0.0", "acorn-jsx": "^5.0.0", "micromark-extension-mdx-expression": "^3.0.0", "micromark-extension-mdx-jsx": "^3.0.0", "micromark-extension-mdx-md": "^2.0.0", "micromark-extension-mdxjs-esm": "^3.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ=="], + + "micromark-extension-mdxjs-esm": ["micromark-extension-mdxjs-esm@3.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-position-from-estree": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A=="], + + "micromark-factory-destination": ["micromark-factory-destination@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA=="], + + "micromark-factory-label": ["micromark-factory-label@2.0.1", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg=="], + + "micromark-factory-mdx-expression": ["micromark-factory-mdx-expression@2.0.3", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-position-from-estree": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ=="], + + "micromark-factory-space": ["micromark-factory-space@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg=="], + + "micromark-factory-title": ["micromark-factory-title@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw=="], + + "micromark-factory-whitespace": ["micromark-factory-whitespace@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ=="], + + "micromark-util-character": ["micromark-util-character@2.1.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q=="], + + "micromark-util-chunked": ["micromark-util-chunked@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA=="], + + "micromark-util-classify-character": ["micromark-util-classify-character@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q=="], + + "micromark-util-combine-extensions": ["micromark-util-combine-extensions@2.0.1", "", { "dependencies": { "micromark-util-chunked": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg=="], + + "micromark-util-decode-numeric-character-reference": ["micromark-util-decode-numeric-character-reference@2.0.2", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw=="], + + "micromark-util-decode-string": ["micromark-util-decode-string@2.0.1", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ=="], + + "micromark-util-encode": ["micromark-util-encode@2.0.1", "", {}, "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw=="], + + "micromark-util-events-to-acorn": ["micromark-util-events-to-acorn@2.0.3", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/unist": "^3.0.0", "devlop": "^1.0.0", "estree-util-visit": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg=="], + + "micromark-util-html-tag-name": ["micromark-util-html-tag-name@2.0.1", "", {}, "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA=="], + + "micromark-util-normalize-identifier": ["micromark-util-normalize-identifier@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q=="], + + "micromark-util-resolve-all": ["micromark-util-resolve-all@2.0.1", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg=="], + + "micromark-util-sanitize-uri": ["micromark-util-sanitize-uri@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ=="], + + "micromark-util-subtokenize": ["micromark-util-subtokenize@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA=="], + + "micromark-util-symbol": ["micromark-util-symbol@2.0.1", "", {}, "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q=="], + + "micromark-util-types": ["micromark-util-types@2.0.2", "", {}, "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA=="], + + "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], + + "mime": ["mime@1.6.0", "", { "bin": { "mime": "cli.js" } }, "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="], + + "mime-db": ["mime-db@1.54.0", "", {}, "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ=="], + + "mimic-fn": ["mimic-fn@2.1.0", "", {}, "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg=="], + + "mini-svg-data-uri": ["mini-svg-data-uri@1.4.4", "", { "bin": { "mini-svg-data-uri": "cli.js" } }, "sha512-r9deDe9p5FJUPZAk3A59wGH7Ii9YrjjWw0jmw/liSbHl2CHiyXj6FcDXDu2K3TjVAXqiJdaw3xxwlZZr9E6nHg=="], + + "minimatch": ["minimatch@10.0.3", "", { "dependencies": { "@isaacs/brace-expansion": "^5.0.0" } }, "sha512-IPZ167aShDZZUMdRk66cyQAW3qr0WzbHkPdMYa8bzZhlHhO3jALbKdxcaak7W9FfT2rZNpQuUu4Od7ILEpXSaw=="], + + "minipass": ["minipass@7.1.2", "", {}, "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw=="], + + "minisearch": ["minisearch@6.3.0", "", {}, "sha512-ihFnidEeU8iXzcVHy74dhkxh/dn8Dc08ERl0xwoMMGqp4+LvRSCgicb+zGqWthVokQKvCSxITlh3P08OzdTYCQ=="], + + "mlly": ["mlly@1.7.4", "", { "dependencies": { "acorn": "^8.14.0", "pathe": "^2.0.1", "pkg-types": "^1.3.0", "ufo": "^1.5.4" } }, "sha512-qmdSIPC4bDJXgZTCR7XosJiNKySV7O215tsPtDN9iEO/7q/76b/ijtgRu/+epFXSJhijtTCCGp3DWS549P3xKw=="], + + "modern-ahocorasick": ["modern-ahocorasick@1.1.0", "", {}, "sha512-sEKPVl2rM+MNVkGQt3ChdmD8YsigmXdn5NifZn6jiwn9LRJpWm8F3guhaqrJT/JOat6pwpbXEk6kv+b9DMIjsQ=="], + + "ms": ["ms@2.0.0", "", {}, "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="], + + "nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="], + + "negotiator": ["negotiator@0.6.4", "", {}, "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w=="], + + "node-releases": ["node-releases@2.0.19", "", {}, "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw=="], + + "normalize-range": ["normalize-range@0.1.2", "", {}, "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA=="], + + "npm-run-path": ["npm-run-path@4.0.1", "", { "dependencies": { "path-key": "^3.0.0" } }, "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw=="], + + "nth-check": ["nth-check@2.1.1", "", { "dependencies": { "boolbase": "^1.0.0" } }, "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w=="], + + "on-finished": ["on-finished@2.4.1", "", { "dependencies": { "ee-first": "1.1.1" } }, "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg=="], + + "on-headers": ["on-headers@1.0.2", "", {}, "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA=="], + + "onetime": ["onetime@5.1.2", "", { "dependencies": { "mimic-fn": "^2.1.0" } }, "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg=="], + + "oniguruma-to-es": ["oniguruma-to-es@2.3.0", "", { "dependencies": { "emoji-regex-xs": "^1.0.0", "regex": "^5.1.1", "regex-recursion": "^5.1.1" } }, "sha512-bwALDxriqfKGfUufKGGepCzu9x7nJQuoRoAFp4AnwehhC2crqrDIAP/uN2qdlsAvSMpeRC3+Yzhqc7hLmle5+g=="], + + "ora": ["ora@7.0.1", "", { "dependencies": { "chalk": "^5.3.0", "cli-cursor": "^4.0.0", "cli-spinners": "^2.9.0", "is-interactive": "^2.0.0", "is-unicode-supported": "^1.3.0", "log-symbols": "^5.1.0", "stdin-discarder": "^0.1.0", "string-width": "^6.1.0", "strip-ansi": "^7.1.0" } }, "sha512-0TUxTiFJWv+JnjWm4o9yvuskpEJLXTcng8MJuKd+SzAzp2o+OP3HWqNhB4OdJRt1Vsd9/mR0oyaEYlOnL7XIRw=="], + + "p-limit": ["p-limit@5.0.0", "", { "dependencies": { "yocto-queue": "^1.0.0" } }, "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ=="], + + "p-locate": ["p-locate@5.0.0", "", { "dependencies": { "p-limit": "^3.0.2" } }, "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw=="], + + "package-json-from-dist": ["package-json-from-dist@1.0.1", "", {}, "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw=="], + + "package-manager-detector": ["package-manager-detector@1.3.0", "", {}, "sha512-ZsEbbZORsyHuO00lY1kV3/t72yp6Ysay6Pd17ZAlNGuGwmWDLCJxFpRs0IzfXfj1o4icJOkUEioexFHzyPurSQ=="], + + "parse-entities": ["parse-entities@4.0.2", "", { "dependencies": { "@types/unist": "^2.0.0", "character-entities-legacy": "^3.0.0", "character-reference-invalid": "^2.0.0", "decode-named-character-reference": "^1.0.0", "is-alphanumerical": "^2.0.0", "is-decimal": "^2.0.0", "is-hexadecimal": "^2.0.0" } }, "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw=="], + + "parse5": ["parse5@7.3.0", "", { "dependencies": { "entities": "^6.0.0" } }, "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw=="], + + "parseurl": ["parseurl@1.3.3", "", {}, "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="], + + "path-data-parser": ["path-data-parser@0.1.0", "", {}, "sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w=="], + + "path-exists": ["path-exists@4.0.0", "", {}, "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="], + + "path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="], + + "path-scurry": ["path-scurry@2.0.0", "", { "dependencies": { "lru-cache": "^11.0.0", "minipass": "^7.1.2" } }, "sha512-ypGJsmGtdXUOeM5u93TyeIEfEhM6s+ljAhrk5vAvSx8uyY/02OvrZnA0YNGUrPXfpJMgI1ODd3nwz8Npx4O4cg=="], + + "path-type": ["path-type@6.0.0", "", {}, "sha512-Vj7sf++t5pBD637NSfkxpHSMfWaeig5+DKWLhcqIYx6mWQz5hdJTGDVMQiJcw1ZYkhs7AazKDGpRVji1LJCZUQ=="], + + "pathe": ["pathe@2.0.3", "", {}, "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w=="], + + "picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="], + + "picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="], + + "pkg-types": ["pkg-types@1.3.1", "", { "dependencies": { "confbox": "^0.1.8", "mlly": "^1.7.4", "pathe": "^2.0.1" } }, "sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ=="], + + "playwright": ["playwright@1.54.1", "", { "dependencies": { "playwright-core": "1.54.1" }, "optionalDependencies": { "fsevents": "2.3.2" }, "bin": { "playwright": "cli.js" } }, "sha512-peWpSwIBmSLi6aW2auvrUtf2DqY16YYcCMO8rTVx486jKmDTJg7UAhyrraP98GB8BoPURZP8+nxO7TSd4cPr5g=="], + + "playwright-core": ["playwright-core@1.54.1", "", { "bin": { "playwright-core": "cli.js" } }, "sha512-Nbjs2zjj0htNhzgiy5wu+3w09YetDx5pkrpI/kZotDlDUaYk0HVA5xrBVPdow4SAUIlhgKcJeJg4GRKW6xHusA=="], + + "points-on-curve": ["points-on-curve@0.2.0", "", {}, "sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A=="], + + "points-on-path": ["points-on-path@0.2.1", "", { "dependencies": { "path-data-parser": "0.1.0", "points-on-curve": "0.2.0" } }, "sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g=="], + + "postcss": ["postcss@8.5.6", "", { "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg=="], + + "postcss-value-parser": ["postcss-value-parser@4.2.0", "", {}, "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="], + + "property-information": ["property-information@6.5.0", "", {}, "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig=="], + + "quansync": ["quansync@0.2.10", "", {}, "sha512-t41VRkMYbkHyCYmOvx/6URnN80H7k4X0lLdBMGsz+maAwrJQYB1djpV6vHrQIBE0WBSGqhtEHrK9U3DWWH8v7A=="], + + "queue-microtask": ["queue-microtask@1.2.3", "", {}, "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="], + + "radix-ui": ["radix-ui@1.4.2", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-accessible-icon": "1.1.7", "@radix-ui/react-accordion": "1.2.11", "@radix-ui/react-alert-dialog": "1.1.14", "@radix-ui/react-arrow": "1.1.7", "@radix-ui/react-aspect-ratio": "1.1.7", "@radix-ui/react-avatar": "1.1.10", "@radix-ui/react-checkbox": "1.3.2", "@radix-ui/react-collapsible": "1.1.11", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-context-menu": "2.2.15", "@radix-ui/react-dialog": "1.1.14", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-dropdown-menu": "2.1.15", "@radix-ui/react-focus-guards": "1.1.2", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-form": "0.1.7", "@radix-ui/react-hover-card": "1.1.14", "@radix-ui/react-label": "2.1.7", "@radix-ui/react-menu": "2.1.15", "@radix-ui/react-menubar": "1.1.15", "@radix-ui/react-navigation-menu": "1.2.13", "@radix-ui/react-one-time-password-field": "0.1.7", "@radix-ui/react-password-toggle-field": "0.1.2", "@radix-ui/react-popover": "1.1.14", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-progress": "1.1.7", "@radix-ui/react-radio-group": "1.3.7", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-scroll-area": "1.2.9", "@radix-ui/react-select": "2.2.5", "@radix-ui/react-separator": "1.1.7", "@radix-ui/react-slider": "1.3.5", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-switch": "1.2.5", "@radix-ui/react-tabs": "1.1.12", "@radix-ui/react-toast": "1.2.14", "@radix-ui/react-toggle": "1.1.9", "@radix-ui/react-toggle-group": "1.1.10", "@radix-ui/react-toolbar": "1.1.10", "@radix-ui/react-tooltip": "1.2.7", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-escape-keydown": "1.1.1", "@radix-ui/react-use-is-hydrated": "0.1.0", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-size": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-fT/3YFPJzf2WUpqDoQi005GS8EpCi+53VhcLaHUj5fwkPYiZAjk1mSxFvbMA8Uq71L03n+WysuYC+mlKkXxt/Q=="], + + "range-parser": ["range-parser@1.2.1", "", {}, "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="], + + "react": ["react@19.1.0", "", {}, "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg=="], + + "react-dom": ["react-dom@19.1.0", "", { "dependencies": { "scheduler": "^0.26.0" }, "peerDependencies": { "react": "^19.1.0" } }, "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g=="], + + "react-intersection-observer": ["react-intersection-observer@9.16.0", "", { "peerDependencies": { "react": "^17.0.0 || ^18.0.0 || ^19.0.0", "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0" }, "optionalPeers": ["react-dom"] }, "sha512-w9nJSEp+DrW9KmQmeWHQyfaP6b03v+TdXynaoA964Wxt7mdR3An11z4NNCQgL4gKSK7y1ver2Fq+JKH6CWEzUA=="], + + "react-refresh": ["react-refresh@0.17.0", "", {}, "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ=="], + + "react-remove-scroll": ["react-remove-scroll@2.7.1", "", { "dependencies": { "react-remove-scroll-bar": "^2.3.7", "react-style-singleton": "^2.2.3", "tslib": "^2.1.0", "use-callback-ref": "^1.3.3", "use-sidecar": "^1.1.3" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-HpMh8+oahmIdOuS5aFKKY6Pyog+FNaZV/XyJOq7b4YFwsFHe5yYfdbIalI4k3vU2nSDql7YskmUseHsRrJqIPA=="], + + "react-remove-scroll-bar": ["react-remove-scroll-bar@2.3.8", "", { "dependencies": { "react-style-singleton": "^2.2.2", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" }, "optionalPeers": ["@types/react"] }, "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q=="], + + "react-router": ["react-router@7.7.0", "", { "dependencies": { "cookie": "^1.0.1", "set-cookie-parser": "^2.6.0" }, "peerDependencies": { "react": ">=18", "react-dom": ">=18" }, "optionalPeers": ["react-dom"] }, "sha512-3FUYSwlvB/5wRJVTL/aavqHmfUKe0+Xm9MllkYgGo9eDwNdkvwlJGjpPxono1kCycLt6AnDTgjmXvK3/B4QGuw=="], + + "react-style-singleton": ["react-style-singleton@2.2.3", "", { "dependencies": { "get-nonce": "^1.0.0", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ=="], + + "readable-stream": ["readable-stream@3.6.2", "", { "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } }, "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA=="], + + "recma-build-jsx": ["recma-build-jsx@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-util-build-jsx": "^3.0.0", "vfile": "^6.0.0" } }, "sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew=="], + + "recma-jsx": ["recma-jsx@1.0.0", "", { "dependencies": { "acorn-jsx": "^5.0.0", "estree-util-to-js": "^2.0.0", "recma-parse": "^1.0.0", "recma-stringify": "^1.0.0", "unified": "^11.0.0" } }, "sha512-5vwkv65qWwYxg+Atz95acp8DMu1JDSqdGkA2Of1j6rCreyFUE/gp15fC8MnGEuG1W68UKjM6x6+YTWIh7hZM/Q=="], + + "recma-parse": ["recma-parse@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "esast-util-from-js": "^2.0.0", "unified": "^11.0.0", "vfile": "^6.0.0" } }, "sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ=="], + + "recma-stringify": ["recma-stringify@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-util-to-js": "^2.0.0", "unified": "^11.0.0", "vfile": "^6.0.0" } }, "sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g=="], + + "regex": ["regex@5.1.1", "", { "dependencies": { "regex-utilities": "^2.3.0" } }, "sha512-dN5I359AVGPnwzJm2jN1k0W9LPZ+ePvoOeVMMfqIMFz53sSwXkxaJoxr50ptnsC771lK95BnTrVSZxq0b9yCGw=="], + + "regex-recursion": ["regex-recursion@5.1.1", "", { "dependencies": { "regex": "^5.1.1", "regex-utilities": "^2.3.0" } }, "sha512-ae7SBCbzVNrIjgSbh7wMznPcQel1DNlDtzensnFxpiNpXt1U2ju/bHugH422r+4LAVS1FpW1YCwilmnNsjum9w=="], + + "regex-utilities": ["regex-utilities@2.3.0", "", {}, "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng=="], + + "rehype-autolink-headings": ["rehype-autolink-headings@7.1.0", "", { "dependencies": { "@types/hast": "^3.0.0", "@ungap/structured-clone": "^1.0.0", "hast-util-heading-rank": "^3.0.0", "hast-util-is-element": "^3.0.0", "unified": "^11.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-rItO/pSdvnvsP4QRB1pmPiNHUskikqtPojZKJPPPAVx9Hj8i8TwMBhofrrAYRhYOOBZH9tgmG5lPqDLuIWPWmw=="], + + "rehype-class-names": ["rehype-class-names@2.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-classnames": "^3.0.0", "hast-util-select": "^6.0.0", "unified": "^11.0.4" } }, "sha512-jldCIiAEvXKdq8hqr5f5PzNdIDkvHC6zfKhwta9oRoMu7bn0W7qLES/JrrjBvr9rKz3nJ8x4vY1EWI+dhjHVZQ=="], + + "rehype-mermaid": ["rehype-mermaid@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-from-html-isomorphic": "^2.0.0", "hast-util-to-text": "^4.0.0", "mermaid-isomorphic": "^3.0.0", "mini-svg-data-uri": "^1.0.0", "space-separated-tokens": "^2.0.0", "unified": "^11.0.0", "unist-util-visit-parents": "^6.0.0", "vfile": "^6.0.0" }, "peerDependencies": { "playwright": "1" }, "optionalPeers": ["playwright"] }, "sha512-fxrD5E4Fa1WXUjmjNDvLOMT4XB1WaxcfycFIWiYU0yEMQhcTDElc9aDFnbDFRLxG1Cfo1I3mfD5kg4sjlWaB+Q=="], + + "rehype-recma": ["rehype-recma@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/hast": "^3.0.0", "hast-util-to-estree": "^3.0.0" } }, "sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw=="], + + "rehype-slug": ["rehype-slug@6.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "github-slugger": "^2.0.0", "hast-util-heading-rank": "^3.0.0", "hast-util-to-string": "^3.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-lWyvf/jwu+oS5+hL5eClVd3hNdmwM1kAC0BUvEGD19pajQMIzcNUd/k9GsfQ+FfECvX+JE+e9/btsKH0EjJT6A=="], + + "remark-directive": ["remark-directive@3.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-directive": "^3.0.0", "micromark-extension-directive": "^3.0.0", "unified": "^11.0.0" } }, "sha512-gwglrEQEZcZYgVyG1tQuA+h58EZfq5CSULw7J90AFuCTyib1thgHPoqQ+h9iFvU6R+vnZ5oNFQR5QKgGpk741A=="], + + "remark-frontmatter": ["remark-frontmatter@5.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-frontmatter": "^2.0.0", "micromark-extension-frontmatter": "^2.0.0", "unified": "^11.0.0" } }, "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ=="], + + "remark-gfm": ["remark-gfm@4.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-gfm": "^3.0.0", "micromark-extension-gfm": "^3.0.0", "remark-parse": "^11.0.0", "remark-stringify": "^11.0.0", "unified": "^11.0.0" } }, "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg=="], + + "remark-mdx": ["remark-mdx@3.1.0", "", { "dependencies": { "mdast-util-mdx": "^3.0.0", "micromark-extension-mdxjs": "^3.0.0" } }, "sha512-Ngl/H3YXyBV9RcRNdlYsZujAmhsxwzxpDzpDEhFBVAGthS4GDgnctpDjgFl/ULx5UEDzqtW1cyBSNKqYYrqLBA=="], + + "remark-mdx-frontmatter": ["remark-mdx-frontmatter@5.2.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "estree-util-value-to-estree": "^3.0.0", "toml": "^3.0.0", "unified": "^11.0.0", "unist-util-mdx-define": "^1.0.0", "yaml": "^2.0.0" } }, "sha512-U/hjUYTkQqNjjMRYyilJgLXSPF65qbLPdoESOkXyrwz2tVyhAnm4GUKhfXqOOS9W34M3545xEMq+aMpHgVjEeQ=="], + + "remark-parse": ["remark-parse@11.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-from-markdown": "^2.0.0", "micromark-util-types": "^2.0.0", "unified": "^11.0.0" } }, "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA=="], + + "remark-rehype": ["remark-rehype@11.1.2", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "mdast-util-to-hast": "^13.0.0", "unified": "^11.0.0", "vfile": "^6.0.0" } }, "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw=="], + + "remark-stringify": ["remark-stringify@11.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-to-markdown": "^2.0.0", "unified": "^11.0.0" } }, "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw=="], + + "require-like": ["require-like@0.1.2", "", {}, "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A=="], + + "restore-cursor": ["restore-cursor@4.0.0", "", { "dependencies": { "onetime": "^5.1.0", "signal-exit": "^3.0.2" } }, "sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg=="], + + "reusify": ["reusify@1.1.0", "", {}, "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw=="], + + "robust-predicates": ["robust-predicates@3.0.2", "", {}, "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg=="], + + "rollup": ["rollup@4.45.1", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.45.1", "@rollup/rollup-android-arm64": "4.45.1", "@rollup/rollup-darwin-arm64": "4.45.1", "@rollup/rollup-darwin-x64": "4.45.1", "@rollup/rollup-freebsd-arm64": "4.45.1", "@rollup/rollup-freebsd-x64": "4.45.1", "@rollup/rollup-linux-arm-gnueabihf": "4.45.1", "@rollup/rollup-linux-arm-musleabihf": "4.45.1", "@rollup/rollup-linux-arm64-gnu": "4.45.1", "@rollup/rollup-linux-arm64-musl": "4.45.1", "@rollup/rollup-linux-loongarch64-gnu": "4.45.1", "@rollup/rollup-linux-powerpc64le-gnu": "4.45.1", "@rollup/rollup-linux-riscv64-gnu": "4.45.1", "@rollup/rollup-linux-riscv64-musl": "4.45.1", "@rollup/rollup-linux-s390x-gnu": "4.45.1", "@rollup/rollup-linux-x64-gnu": "4.45.1", "@rollup/rollup-linux-x64-musl": "4.45.1", "@rollup/rollup-win32-arm64-msvc": "4.45.1", "@rollup/rollup-win32-ia32-msvc": "4.45.1", "@rollup/rollup-win32-x64-msvc": "4.45.1", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-4iya7Jb76fVpQyLoiVpzUrsjQ12r3dM7fIVz+4NwoYvZOShknRmiv+iu9CClZml5ZLGb0XMcYLutK6w9tgxHDw=="], + + "roughjs": ["roughjs@4.6.6", "", { "dependencies": { "hachure-fill": "^0.5.2", "path-data-parser": "^0.1.0", "points-on-curve": "^0.2.0", "points-on-path": "^0.2.1" } }, "sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ=="], + + "run-parallel": ["run-parallel@1.2.0", "", { "dependencies": { "queue-microtask": "^1.2.2" } }, "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA=="], + + "rw": ["rw@1.3.3", "", {}, "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ=="], + + "safe-buffer": ["safe-buffer@5.2.1", "", {}, "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="], + + "safer-buffer": ["safer-buffer@2.1.2", "", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="], + + "scheduler": ["scheduler@0.26.0", "", {}, "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA=="], + + "semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], + + "send": ["send@0.19.0", "", { "dependencies": { "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "etag": "~1.8.1", "fresh": "0.5.2", "http-errors": "2.0.0", "mime": "1.6.0", "ms": "2.1.3", "on-finished": "2.4.1", "range-parser": "~1.2.1", "statuses": "2.0.1" } }, "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw=="], + + "serve-static": ["serve-static@1.16.2", "", { "dependencies": { "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "parseurl": "~1.3.3", "send": "0.19.0" } }, "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw=="], + + "set-cookie-parser": ["set-cookie-parser@2.7.1", "", {}, "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ=="], + + "setprototypeof": ["setprototypeof@1.2.0", "", {}, "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="], + + "shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="], + + "shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="], + + "shiki": ["shiki@1.29.2", "", { "dependencies": { "@shikijs/core": "1.29.2", "@shikijs/engine-javascript": "1.29.2", "@shikijs/engine-oniguruma": "1.29.2", "@shikijs/langs": "1.29.2", "@shikijs/themes": "1.29.2", "@shikijs/types": "1.29.2", "@shikijs/vscode-textmate": "^10.0.1", "@types/hast": "^3.0.4" } }, "sha512-njXuliz/cP+67jU2hukkxCNuH1yUi4QfdZZY+sMr5PPrIyXSu5iTb/qYC4BiWWB0vZ+7TbdvYUCeL23zpwCfbg=="], + + "signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], + + "sisteransi": ["sisteransi@1.0.5", "", {}, "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg=="], + + "slash": ["slash@5.1.0", "", {}, "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg=="], + + "source-map": ["source-map@0.7.4", "", {}, "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA=="], + + "source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="], + + "space-separated-tokens": ["space-separated-tokens@2.0.2", "", {}, "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q=="], + + "statuses": ["statuses@2.0.1", "", {}, "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ=="], + + "stdin-discarder": ["stdin-discarder@0.1.0", "", { "dependencies": { "bl": "^5.0.0" } }, "sha512-xhV7w8S+bUwlPTb4bAOUQhv8/cSS5offJuX8GQGq32ONF0ZtDWKfkdomM3HMRA+LhX6um/FZ0COqlwsjD53LeQ=="], + + "string-width": ["string-width@6.1.0", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^10.2.1", "strip-ansi": "^7.0.1" } }, "sha512-k01swCJAgQmuADB0YIc+7TuatfNvTBVOoaUWJjTB9R4VJzR5vNWzf5t42ESVZFPS8xTySF7CAdV4t/aaIm3UnQ=="], + + "string-width-cjs": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], + + "string_decoder": ["string_decoder@1.3.0", "", { "dependencies": { "safe-buffer": "~5.2.0" } }, "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA=="], + + "stringify-entities": ["stringify-entities@4.0.4", "", { "dependencies": { "character-entities-html4": "^2.0.0", "character-entities-legacy": "^3.0.0" } }, "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg=="], + + "strip-ansi": ["strip-ansi@7.1.0", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ=="], + + "strip-ansi-cjs": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "strip-final-newline": ["strip-final-newline@2.0.0", "", {}, "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA=="], + + "style-to-js": ["style-to-js@1.1.17", "", { "dependencies": { "style-to-object": "1.0.9" } }, "sha512-xQcBGDxJb6jjFCTzvQtfiPn6YvvP2O8U1MDIPNfJQlWMYfktPy+iGsHE7cssjs7y84d9fQaK4UF3RIJaAHSoYA=="], + + "style-to-object": ["style-to-object@1.0.9", "", { "dependencies": { "inline-style-parser": "0.2.4" } }, "sha512-G4qppLgKu/k6FwRpHiGiKPaPTFcG3g4wNVX/Qsfu+RqQM30E7Tyu/TEgxcL9PNLF5pdRLwQdE3YKKf+KF2Dzlw=="], + + "stylis": ["stylis@4.3.6", "", {}, "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ=="], + + "tabbable": ["tabbable@6.2.0", "", {}, "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew=="], + + "tailwindcss": ["tailwindcss@4.0.7", "", {}, "sha512-yH5bPPyapavo7L+547h3c4jcBXcrKwybQRjwdEIVAd9iXRvy/3T1CC6XSQEgZtRySjKfqvo3Cc0ZF1DTheuIdA=="], + + "tapable": ["tapable@2.2.2", "", {}, "sha512-Re10+NauLTMCudc7T5WLFLAwDhQ0JWdrMK+9B2M8zR5hRExKmsRDCBA7/aV/pNJFltmBFO5BAMlQFi/vq3nKOg=="], + + "tinyexec": ["tinyexec@1.0.1", "", {}, "sha512-5uC6DDlmeqiOwCPmK9jMSdOuZTh8bU39Ys6yidB+UTt5hfZUPGAypSgFRiEp+jbi9qH40BLDvy85jIU88wKSqw=="], + + "tinyglobby": ["tinyglobby@0.2.14", "", { "dependencies": { "fdir": "^6.4.4", "picomatch": "^4.0.2" } }, "sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ=="], + + "to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="], + + "toidentifier": ["toidentifier@1.0.1", "", {}, "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA=="], + + "toml": ["toml@3.0.0", "", {}, "sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w=="], + + "trim-lines": ["trim-lines@3.0.1", "", {}, "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg=="], + + "trough": ["trough@2.2.0", "", {}, "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw=="], + + "ts-dedent": ["ts-dedent@2.2.0", "", {}, "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ=="], + + "tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], + + "twoslash": ["twoslash@0.2.12", "", { "dependencies": { "@typescript/vfs": "^1.6.0", "twoslash-protocol": "0.2.12" }, "peerDependencies": { "typescript": "*" } }, "sha512-tEHPASMqi7kqwfJbkk7hc/4EhlrKCSLcur+TcvYki3vhIfaRMXnXjaYFgXpoZRbT6GdprD4tGuVBEmTpUgLBsw=="], + + "twoslash-protocol": ["twoslash-protocol@0.2.12", "", {}, "sha512-5qZLXVYfZ9ABdjqbvPc4RWMr7PrpPaaDSeaYY55vl/w1j6H6kzsWK/urAEIXlzYlyrFmyz1UbwIt+AA0ck+wbg=="], + + "typescript": ["typescript@5.8.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ=="], + + "ua-parser-js": ["ua-parser-js@1.0.40", "", { "bin": { "ua-parser-js": "script/cli.js" } }, "sha512-z6PJ8Lml+v3ichVojCiB8toQJBuwR42ySM4ezjXIqXK3M0HczmKQ3LF4rhU55PfD99KEEXQG6yb7iOMyvYuHew=="], + + "ufo": ["ufo@1.6.1", "", {}, "sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA=="], + + "undici-types": ["undici-types@7.8.0", "", {}, "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw=="], + + "unicorn-magic": ["unicorn-magic@0.3.0", "", {}, "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA=="], + + "unified": ["unified@11.0.5", "", { "dependencies": { "@types/unist": "^3.0.0", "bail": "^2.0.0", "devlop": "^1.0.0", "extend": "^3.0.0", "is-plain-obj": "^4.0.0", "trough": "^2.0.0", "vfile": "^6.0.0" } }, "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA=="], + + "unist-util-find-after": ["unist-util-find-after@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" } }, "sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ=="], + + "unist-util-is": ["unist-util-is@6.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw=="], + + "unist-util-mdx-define": ["unist-util-mdx-define@1.1.2", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "estree-util-is-identifier-name": "^3.0.0", "estree-util-scope": "^1.0.0", "estree-walker": "^3.0.0", "vfile": "^6.0.0" } }, "sha512-9ncH7i7TN5Xn7/tzX5bE3rXgz1X/u877gYVAUB3mLeTKYJmQHmqKTDBi6BTGXV7AeolBCI9ErcVsOt2qryoD0g=="], + + "unist-util-position": ["unist-util-position@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA=="], + + "unist-util-position-from-estree": ["unist-util-position-from-estree@2.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ=="], + + "unist-util-remove-position": ["unist-util-remove-position@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q=="], + + "unist-util-stringify-position": ["unist-util-stringify-position@4.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ=="], + + "unist-util-visit": ["unist-util-visit@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg=="], + + "unist-util-visit-parents": ["unist-util-visit-parents@6.0.1", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" } }, "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw=="], + + "universalify": ["universalify@2.0.1", "", {}, "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw=="], + + "update-browserslist-db": ["update-browserslist-db@1.1.3", "", { "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" }, "peerDependencies": { "browserslist": ">= 4.21.0" }, "bin": { "update-browserslist-db": "cli.js" } }, "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw=="], + + "use-callback-ref": ["use-callback-ref@1.3.3", "", { "dependencies": { "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg=="], + + "use-sidecar": ["use-sidecar@1.1.3", "", { "dependencies": { "detect-node-es": "^1.1.0", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ=="], + + "use-sync-external-store": ["use-sync-external-store@1.5.0", "", { "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-Rb46I4cGGVBmjamjphe8L/UnvJD+uPPtTkNvX5mZgqdbavhI4EbgIWJiIHXJ8bc/i9EQGPRh4DwEURJ552Do0A=="], + + "util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="], + + "uuid": ["uuid@11.1.0", "", { "bin": { "uuid": "dist/esm/bin/uuid" } }, "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A=="], + + "vary": ["vary@1.1.2", "", {}, "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg=="], + + "vfile": ["vfile@6.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile-message": "^4.0.0" } }, "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q=="], + + "vfile-location": ["vfile-location@5.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile": "^6.0.0" } }, "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg=="], + + "vfile-message": ["vfile-message@4.0.2", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw=="], + + "vite": ["vite@6.3.5", "", { "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.4.4", "picomatch": "^4.0.2", "postcss": "^8.5.3", "rollup": "^4.34.9", "tinyglobby": "^0.2.13" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "peerDependencies": { "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", "jiti": ">=1.21.0", "less": "*", "lightningcss": "^1.21.0", "sass": "*", "sass-embedded": "*", "stylus": "*", "sugarss": "*", "terser": "^5.16.0", "tsx": "^4.8.1", "yaml": "^2.4.2" }, "optionalPeers": ["@types/node", "jiti", "less", "lightningcss", "sass", "sass-embedded", "stylus", "sugarss", "terser", "tsx", "yaml"], "bin": { "vite": "bin/vite.js" } }, "sha512-cZn6NDFE7wdTpINgs++ZJ4N49W2vRp8LCKrn3Ob1kYNtOo21vfDoaV5GzBfLU4MovSAB8uNRm4jgzVQZ+mBzPQ=="], + + "vite-node": ["vite-node@3.2.4", "", { "dependencies": { "cac": "^6.7.14", "debug": "^4.4.1", "es-module-lexer": "^1.7.0", "pathe": "^2.0.3", "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" }, "bin": { "vite-node": "vite-node.mjs" } }, "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg=="], + + "vocs": ["vocs@1.0.13", "", { "dependencies": { "@floating-ui/react": "^0.27.4", "@hono/node-server": "^1.13.8", "@mdx-js/react": "^3.1.0", "@mdx-js/rollup": "^3.1.0", "@noble/hashes": "^1.7.1", "@radix-ui/colors": "^3.0.0", "@radix-ui/react-accordion": "^1.2.3", "@radix-ui/react-dialog": "^1.1.6", "@radix-ui/react-icons": "^1.3.2", "@radix-ui/react-label": "^2.1.2", "@radix-ui/react-navigation-menu": "^1.2.5", "@radix-ui/react-popover": "^1.1.6", "@radix-ui/react-tabs": "^1.1.3", "@shikijs/rehype": "^1", "@shikijs/transformers": "^1", "@shikijs/twoslash": "^1", "@tailwindcss/vite": "4.0.7", "@vanilla-extract/css": "^1.17.1", "@vanilla-extract/dynamic": "^2.1.2", "@vanilla-extract/vite-plugin": "^5.0.1", "@vitejs/plugin-react": "^4.3.4", "autoprefixer": "^10.4.20", "cac": "^6.7.14", "chroma-js": "^3.1.2", "clsx": "^2.1.1", "compression": "^1.8.0", "create-vocs": "^1.0.0-alpha.5", "cross-spawn": "^7.0.6", "fs-extra": "^11.3.0", "globby": "^14.1.0", "hastscript": "^8.0.0", "hono": "^4.7.1", "mark.js": "^8.11.1", "mdast-util-directive": "^3.1.0", "mdast-util-from-markdown": "^2.0.2", "mdast-util-frontmatter": "^2.0.1", "mdast-util-gfm": "^3.1.0", "mdast-util-mdx": "^3.0.0", "mdast-util-mdx-jsx": "^3.2.0", "mdast-util-to-hast": "^13.2.0", "mdast-util-to-markdown": "^2.1.2", "minimatch": "^9.0.5", "minisearch": "^6.3.0", "ora": "^7.0.1", "p-limit": "^5.0.0", "playwright": "^1.52.0", "postcss": "^8.5.2", "radix-ui": "^1.1.3", "react-intersection-observer": "^9.15.1", "react-router": "^7.2.0", "rehype-autolink-headings": "^7.1.0", "rehype-class-names": "^2.0.0", "rehype-mermaid": "^3.0.0", "rehype-slug": "^6.0.0", "remark-directive": "^3.0.1", "remark-frontmatter": "^5.0.0", "remark-gfm": "^4.0.1", "remark-mdx": "^3.1.0", "remark-mdx-frontmatter": "^5.0.0", "remark-parse": "^11.0.0", "serve-static": "^1.16.2", "shiki": "^1", "toml": "^3.0.0", "twoslash": "~0.2.12", "ua-parser-js": "^1.0.40", "unified": "^11.0.5", "unist-util-visit": "^5.0.0", "vite": "^6.1.0" }, "peerDependencies": { "react": "^19", "react-dom": "^19" }, "bin": { "vocs": "_lib/cli/index.js" } }, "sha512-V/ogXG5xw7jMFXI2Wv0d0ZdCeeT5jzaX0PKdRKcqhnd21UtLZrqa5pKZkStNIZyVpvfsLW0WB7wjB4iBOpueiw=="], + + "vscode-jsonrpc": ["vscode-jsonrpc@8.2.0", "", {}, "sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA=="], + + "vscode-languageserver": ["vscode-languageserver@9.0.1", "", { "dependencies": { "vscode-languageserver-protocol": "3.17.5" }, "bin": { "installServerIntoExtension": "bin/installServerIntoExtension" } }, "sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g=="], + + "vscode-languageserver-protocol": ["vscode-languageserver-protocol@3.17.5", "", { "dependencies": { "vscode-jsonrpc": "8.2.0", "vscode-languageserver-types": "3.17.5" } }, "sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg=="], + + "vscode-languageserver-textdocument": ["vscode-languageserver-textdocument@1.0.12", "", {}, "sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA=="], + + "vscode-languageserver-types": ["vscode-languageserver-types@3.17.5", "", {}, "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg=="], + + "vscode-uri": ["vscode-uri@3.0.8", "", {}, "sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw=="], + + "web-namespaces": ["web-namespaces@2.0.1", "", {}, "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ=="], + + "which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="], + + "wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], + + "wrap-ansi-cjs": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="], + + "yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="], + + "yaml": ["yaml@2.8.0", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-4lLa/EcQCB0cJkyts+FpIRx5G/llPxfP6VQU5KByHEhLxY3IJCH0f0Hy1MHI8sClTvsIb8qwRJ6R/ZdlDJ/leQ=="], + + "yocto-queue": ["yocto-queue@1.2.1", "", {}, "sha512-AyeEbWOu/TAXdxlV9wmGcR0+yh2j3vYPGOECcIj2S7MkrLyC7ne+oye2BKTItt0ii2PHk4cDy+95+LshzbXnGg=="], + + "zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="], + + "@babel/core/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "@babel/helper-compilation-targets/lru-cache": ["lru-cache@5.1.1", "", { "dependencies": { "yallist": "^3.0.2" } }, "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w=="], + + "@babel/traverse/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "@clack/prompts/is-unicode-supported": ["is-unicode-supported@1.3.0", "", { "bundled": true }, "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ=="], + + "@iconify/utils/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "@isaacs/cliui/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], + + "@rollup/pluginutils/estree-walker": ["estree-walker@2.0.2", "", {}, "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w=="], + + "@typescript/vfs/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "@vanilla-extract/css/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], + + "cytoscape-fcose/cose-base": ["cose-base@2.2.0", "", { "dependencies": { "layout-base": "^2.0.0" } }, "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g=="], + + "d3-dsv/commander": ["commander@7.2.0", "", {}, "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw=="], + + "d3-sankey/d3-array": ["d3-array@2.12.1", "", { "dependencies": { "internmap": "^1.0.0" } }, "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ=="], + + "d3-sankey/d3-shape": ["d3-shape@1.3.7", "", { "dependencies": { "d3-path": "1" } }, "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw=="], + + "execa/signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], + + "hast-util-from-dom/hastscript": ["hastscript@9.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-parse-selector": "^4.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w=="], + + "hast-util-from-parse5/hastscript": ["hastscript@9.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-parse-selector": "^4.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w=="], + + "hast-util-from-parse5/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "hast-util-select/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "hast-util-to-estree/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "hast-util-to-html/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "hast-util-to-jsx-runtime/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "local-pkg/pkg-types": ["pkg-types@2.2.0", "", { "dependencies": { "confbox": "^0.2.2", "exsolve": "^1.0.7", "pathe": "^2.0.3" } }, "sha512-2SM/GZGAEkPp3KWORxQZns4M+WSeXbC2HEvmOIJe3Cmiv6ieAJvdVhDldtHqM5J1Y7MrR1XhkBT/rMlhh9FdqQ=="], + + "micromark/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "micromatch/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], + + "p-locate/p-limit": ["p-limit@3.1.0", "", { "dependencies": { "yocto-queue": "^0.1.0" } }, "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ=="], + + "parse-entities/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="], + + "restore-cursor/signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], + + "rollup/fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], + + "send/encodeurl": ["encodeurl@1.0.2", "", {}, "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w=="], + + "send/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "string-width-cjs/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + + "string-width-cjs/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "strip-ansi-cjs/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "vite/fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], + + "vite-node/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "vocs/minimatch": ["minimatch@9.0.5", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow=="], + + "wrap-ansi/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], + + "wrap-ansi-cjs/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], + + "wrap-ansi-cjs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], + + "wrap-ansi-cjs/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "@babel/core/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "@babel/traverse/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "@iconify/utils/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "@isaacs/cliui/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + + "@typescript/vfs/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "cytoscape-fcose/cose-base/layout-base": ["layout-base@2.0.1", "", {}, "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg=="], + + "d3-sankey/d3-shape/d3-path": ["d3-path@1.0.9", "", {}, "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg=="], + + "hast-util-from-dom/hastscript/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "local-pkg/pkg-types/confbox": ["confbox@0.2.2", "", {}, "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ=="], + + "micromark/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "p-locate/p-limit/yocto-queue": ["yocto-queue@0.1.0", "", {}, "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q=="], + + "string-width-cjs/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "vite-node/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "wrap-ansi-cjs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + + "wrap-ansi-cjs/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "wrap-ansi/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + } +} diff --git a/docs/vocs/bun.lockb b/docs/vocs/bun.lockb deleted file mode 100755 index a975dd0d492..00000000000 Binary files a/docs/vocs/bun.lockb and /dev/null differ diff --git a/docs/vocs/bunfig.toml b/docs/vocs/bunfig.toml new file mode 100644 index 00000000000..a38b9b61752 --- /dev/null +++ b/docs/vocs/bunfig.toml @@ -0,0 +1,4 @@ +telemetry = false + +# ensures runtime is always bun regardless of shebang +run.bun = true diff --git a/docs/vocs/docs/components/SdkShowcase.tsx b/docs/vocs/docs/components/SdkShowcase.tsx index 5f878206a84..442d6676f4f 100644 --- a/docs/vocs/docs/components/SdkShowcase.tsx +++ b/docs/vocs/docs/components/SdkShowcase.tsx @@ -1,5 +1,3 @@ -import React from 'react' - interface SdkProject { name: string description: string @@ -43,16 +41,16 @@ const projects: SdkProject[] = [ export function SdkShowcase() { return (

- {projects.map((project, index) => ( + {projects.map((project) => (
{/* LoC Badge */}
{project.loc} LoC
- + {/* Content */}
@@ -63,11 +61,11 @@ export function SdkShowcase() { {project.company}

- +

{project.description}

- + {/* GitHub Link */}
) -} \ No newline at end of file +} diff --git a/docs/vocs/docs/components/TrustedBy.tsx b/docs/vocs/docs/components/TrustedBy.tsx index ef50527f8ea..41b78e8787a 100644 --- a/docs/vocs/docs/components/TrustedBy.tsx +++ b/docs/vocs/docs/components/TrustedBy.tsx @@ -1,5 +1,3 @@ -import React from 'react' - interface TrustedCompany { name: string logoUrl: string @@ -27,9 +25,9 @@ const companies: TrustedCompany[] = [ export function TrustedBy() { return (
- {companies.map((company, index) => ( + {companies.map((company) => (
{/* Company Logo */} @@ -46,4 +44,4 @@ export function TrustedBy() { ))}
) -} \ No newline at end of file +} diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index 330f32b3fd2..d7582ab64c5 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -1,47 +1,45 @@ -- [`reth`](/cli/reth) - - [`reth node`](/cli/reth/node) - - [`reth init`](/cli/reth/init) - - [`reth init-state`](/cli/reth/init-state) - - [`reth import`](/cli/reth/import) - - [`reth import-era`](/cli/reth/import-era) - - [`reth dump-genesis`](/cli/reth/dump-genesis) - - [`reth db`](/cli/reth/db) - - [`reth db stats`](/cli/reth/db/stats) - - [`reth db list`](/cli/reth/db/list) - - [`reth db checksum`](/cli/reth/db/checksum) - - [`reth db diff`](/cli/reth/db/diff) - - [`reth db get`](/cli/reth/db/get) - - [`reth db get mdbx`](/cli/reth/db/get/mdbx) - - [`reth db get static-file`](/cli/reth/db/get/static-file) - - [`reth db drop`](/cli/reth/db/drop) - - [`reth db clear`](/cli/reth/db/clear) - - [`reth db clear mdbx`](/cli/reth/db/clear/mdbx) - - [`reth db clear static-file`](/cli/reth/db/clear/static-file) - - [`reth db version`](/cli/reth/db/version) - - [`reth db path`](/cli/reth/db/path) - - [`reth download`](/cli/reth/download) - - [`reth stage`](/cli/reth/stage) - - [`reth stage run`](/cli/reth/stage/run) - - [`reth stage drop`](/cli/reth/stage/drop) - - [`reth stage dump`](/cli/reth/stage/dump) - - [`reth stage dump execution`](/cli/reth/stage/dump/execution) - - [`reth stage dump storage-hashing`](/cli/reth/stage/dump/storage-hashing) - - [`reth stage dump account-hashing`](/cli/reth/stage/dump/account-hashing) - - [`reth stage dump merkle`](/cli/reth/stage/dump/merkle) - - [`reth stage unwind`](/cli/reth/stage/unwind) - - [`reth stage unwind to-block`](/cli/reth/stage/unwind/to-block) - - [`reth stage unwind num-blocks`](/cli/reth/stage/unwind/num-blocks) - - [`reth p2p`](/cli/reth/p2p) - - [`reth p2p header`](/cli/reth/p2p/header) - - [`reth p2p body`](/cli/reth/p2p/body) - - [`reth p2p rlpx`](/cli/reth/p2p/rlpx) - - [`reth p2p rlpx ping`](/cli/reth/p2p/rlpx/ping) - - [`reth config`](/cli/reth/config) - - [`reth debug`](/cli/reth/debug) - - [`reth debug execution`](/cli/reth/debug/execution) - - [`reth debug merkle`](/cli/reth/debug/merkle) - - [`reth debug in-memory-merkle`](/cli/reth/debug/in-memory-merkle) - - [`reth debug build-block`](/cli/reth/debug/build-block) - - [`reth recover`](/cli/reth/recover) - - [`reth recover storage-tries`](/cli/reth/recover/storage-tries) - - [`reth prune`](/cli/reth/prune) + - [`reth`](/cli/reth) + - [`reth node`](/cli/reth/node) + - [`reth init`](/cli/reth/init) + - [`reth init-state`](/cli/reth/init-state) + - [`reth import`](/cli/reth/import) + - [`reth import-era`](/cli/reth/import-era) + - [`reth export-era`](/cli/reth/export-era) + - [`reth dump-genesis`](/cli/reth/dump-genesis) + - [`reth db`](/cli/reth/db) + - [`reth db stats`](/cli/reth/db/stats) + - [`reth db list`](/cli/reth/db/list) + - [`reth db checksum`](/cli/reth/db/checksum) + - [`reth db diff`](/cli/reth/db/diff) + - [`reth db get`](/cli/reth/db/get) + - [`reth db get mdbx`](/cli/reth/db/get/mdbx) + - [`reth db get static-file`](/cli/reth/db/get/static-file) + - [`reth db drop`](/cli/reth/db/drop) + - [`reth db clear`](/cli/reth/db/clear) + - [`reth db clear mdbx`](/cli/reth/db/clear/mdbx) + - [`reth db clear static-file`](/cli/reth/db/clear/static-file) + - [`reth db version`](/cli/reth/db/version) + - [`reth db path`](/cli/reth/db/path) + - [`reth download`](/cli/reth/download) + - [`reth stage`](/cli/reth/stage) + - [`reth stage run`](/cli/reth/stage/run) + - [`reth stage drop`](/cli/reth/stage/drop) + - [`reth stage dump`](/cli/reth/stage/dump) + - [`reth stage dump execution`](/cli/reth/stage/dump/execution) + - [`reth stage dump storage-hashing`](/cli/reth/stage/dump/storage-hashing) + - [`reth stage dump account-hashing`](/cli/reth/stage/dump/account-hashing) + - [`reth stage dump merkle`](/cli/reth/stage/dump/merkle) + - [`reth stage unwind`](/cli/reth/stage/unwind) + - [`reth stage unwind to-block`](/cli/reth/stage/unwind/to-block) + - [`reth stage unwind num-blocks`](/cli/reth/stage/unwind/num-blocks) + - [`reth p2p`](/cli/reth/p2p) + - [`reth p2p header`](/cli/reth/p2p/header) + - [`reth p2p body`](/cli/reth/p2p/body) + - [`reth p2p rlpx`](/cli/reth/p2p/rlpx) + - [`reth p2p rlpx ping`](/cli/reth/p2p/rlpx/ping) + - [`reth p2p bootnode`](/cli/reth/p2p/bootnode) + - [`reth config`](/cli/reth/config) + - [`reth recover`](/cli/reth/recover) + - [`reth recover storage-tries`](/cli/reth/recover/storage-tries) + - [`reth prune`](/cli/reth/prune) + - [`reth re-execute`](/cli/reth/re-execute) \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/cli.mdx b/docs/vocs/docs/pages/cli/cli.mdx index 20046ce9e77..d7a02e2b738 100644 --- a/docs/vocs/docs/pages/cli/cli.mdx +++ b/docs/vocs/docs/pages/cli/cli.mdx @@ -2,7 +2,7 @@ import Summary from './SUMMARY.mdx'; # CLI Reference -The Reth node is operated via the CLI by running the `reth node` command. To stop it, press `ctrl-c`. You may need to wait a bit as Reth tears down existing p2p connections or other cleanup tasks. +The Reth node is operated via the CLI by running the `reth node` command. To stop it, press `ctrl-c`. You may need to wait a bit as Reth tears down existing p2p connections or performs other cleanup tasks. However, Reth has more commands: diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 8225d71b3b7..0d2a4355c84 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -14,15 +14,16 @@ Commands: init-state Initialize the database from a state dump file import This syncs RLP encoded blocks from a file import-era This syncs ERA encoded blocks from a directory + export-era Exports block to era1 files in a specified directory dump-genesis Dumps genesis block JSON configuration to stdout db Database debugging utilities download Download public node snapshots stage Manipulate individual stages p2p P2P Debugging utilities config Write config to stdout - debug Various debug routines recover Scripts for node recovery prune Prune according to the configuration without any limits + re-execute Re-execute blocks in parallel to verify historical sync correctness help Print this message or the help of the given subcommand(s) Options: diff --git a/docs/vocs/docs/pages/cli/reth/debug.mdx b/docs/vocs/docs/pages/cli/reth/debug.mdx index 0f616236a67..f56a60aa941 100644 --- a/docs/vocs/docs/pages/cli/reth/debug.mdx +++ b/docs/vocs/docs/pages/cli/reth/debug.mdx @@ -9,10 +9,8 @@ $ reth debug --help Usage: reth debug [OPTIONS] Commands: - execution Debug the roundtrip execution of blocks as well as the generated data merkle Debug the clean & incremental state root calculations in-memory-merkle Debug in-memory state root calculation - build-block Debug block building help Print this message or the help of the given subcommand(s) Options: diff --git a/docs/vocs/docs/pages/cli/reth/debug/execution.mdx b/docs/vocs/docs/pages/cli/reth/debug/execution.mdx deleted file mode 100644 index ef7069f8173..00000000000 --- a/docs/vocs/docs/pages/cli/reth/debug/execution.mdx +++ /dev/null @@ -1,328 +0,0 @@ -# reth debug execution - -Debug the roundtrip execution of blocks as well as the generated data - -```bash -$ reth debug execution --help -``` -```txt -Usage: reth debug execution [OPTIONS] --to - -Options: - -h, --help - Print help (see a summary with '-h') - -Datadir: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --datadir.static-files - The absolute path to store static files in. - - --config - The path to the configuration file to use - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, holesky, hoodi, dev - - [default: mainnet] - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - - --db.read-transaction-timeout - Read transaction timeout in seconds, 0 means no timeout - -Networking: - -d, --disable-discovery - Disable the discovery service - - --disable-dns-discovery - Disable the DNS discovery - - --disable-discv4-discovery - Disable Discv4 discovery - - --enable-discv5-discovery - Enable Discv5 discovery - - --disable-nat - Disable Nat discovery - - --discovery.addr - The UDP address to use for devp2p peer discovery version 4 - - [default: 0.0.0.0] - - --discovery.port - The UDP port to use for devp2p peer discovery version 4 - - [default: 30303] - - --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 - - --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 - - --discovery.v5.port - The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set - - [default: 9200] - - --discovery.v5.port.ipv6 - The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set - - [default: 9200] - - --discovery.v5.lookup-interval - The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - - [default: 20] - - --discovery.v5.bootstrap.lookup-interval - The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - - [default: 5] - - --discovery.v5.bootstrap.lookup-countdown - The number of times to carry out boost lookup queries at bootstrap - - [default: 200] - - --trusted-peers - Comma separated enode URLs of trusted peers for P2P connections. - - --trusted-peers enode://abcd@192.168.0.1:30303 - - --trusted-only - Connect to or accept from trusted peers only - - --bootnodes - Comma separated enode URLs for P2P discovery bootstrap. - - Will fall back to a network-specific default if not specified. - - --dns-retries - Amount of DNS resolution requests retries to perform when peering - - [default: 0] - - --peers-file - The path to the known peers file. Connected peers are dumped to this file on nodes - shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - - --identity - Custom node identity - - [default: reth/-/] - - --p2p-secret-key - Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. - - --no-persist-peers - Do not persist peers. - - --nat - NAT resolution method (any|none|upnp|publicip|extip:\) - - [default: any] - - --addr - Network listening address - - [default: 0.0.0.0] - - --port - Network listening port - - [default: 30303] - - --max-outbound-peers - Maximum number of outbound requests. default: 100 - - --max-inbound-peers - Maximum number of inbound requests. default: 30 - - --max-tx-reqs - Max concurrent `GetPooledTransactions` requests. - - [default: 130] - - --max-tx-reqs-peer - Max concurrent `GetPooledTransactions` requests per peer. - - [default: 1] - - --max-seen-tx-history - Max number of seen transactions to remember per peer. - - Default is 320 transaction hashes. - - [default: 320] - - --max-pending-imports - Max number of transactions to import concurrently. - - [default: 4096] - - --pooled-tx-response-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions - to pack in one response. - Spec'd at 2MiB. - - [default: 2097152] - - --pooled-tx-pack-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions to - request in one request. - - Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see `RLPx` specs). This allows a node to request a specific size - response. - - By default, nodes request only 128 KiB worth of transactions, but should a peer request - more, up to 2 MiB, a node will answer with more than 128 KiB. - - Default is 128 KiB. - - [default: 131072] - - --max-tx-pending-fetch - Max capacity of cache of hashes for transactions pending fetch. - - [default: 25600] - - --net-if.experimental - Name of network interface used to communicate with peers. - - If flag is set, but no value is passed, the default interface for docker `eth0` is tried. - - --tx-propagation-policy - Transaction Propagation Policy - - The policy determines which peers transactions are gossiped to. - - [default: All] - - --to - The maximum block height - - --interval - The block interval for sync and unwind. Defaults to `1000` - - [default: 1000] - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/debug/in-memory-merkle.mdx b/docs/vocs/docs/pages/cli/reth/debug/in-memory-merkle.mdx deleted file mode 100644 index 7db3b2d2ba8..00000000000 --- a/docs/vocs/docs/pages/cli/reth/debug/in-memory-merkle.mdx +++ /dev/null @@ -1,328 +0,0 @@ -# reth debug in-memory-merkle - -Debug in-memory state root calculation - -```bash -$ reth debug in-memory-merkle --help -``` -```txt -Usage: reth debug in-memory-merkle [OPTIONS] - -Options: - -h, --help - Print help (see a summary with '-h') - -Datadir: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --datadir.static-files - The absolute path to store static files in. - - --config - The path to the configuration file to use - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, holesky, hoodi, dev - - [default: mainnet] - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - - --db.read-transaction-timeout - Read transaction timeout in seconds, 0 means no timeout - -Networking: - -d, --disable-discovery - Disable the discovery service - - --disable-dns-discovery - Disable the DNS discovery - - --disable-discv4-discovery - Disable Discv4 discovery - - --enable-discv5-discovery - Enable Discv5 discovery - - --disable-nat - Disable Nat discovery - - --discovery.addr - The UDP address to use for devp2p peer discovery version 4 - - [default: 0.0.0.0] - - --discovery.port - The UDP port to use for devp2p peer discovery version 4 - - [default: 30303] - - --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 - - --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 - - --discovery.v5.port - The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set - - [default: 9200] - - --discovery.v5.port.ipv6 - The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set - - [default: 9200] - - --discovery.v5.lookup-interval - The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - - [default: 20] - - --discovery.v5.bootstrap.lookup-interval - The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - - [default: 5] - - --discovery.v5.bootstrap.lookup-countdown - The number of times to carry out boost lookup queries at bootstrap - - [default: 200] - - --trusted-peers - Comma separated enode URLs of trusted peers for P2P connections. - - --trusted-peers enode://abcd@192.168.0.1:30303 - - --trusted-only - Connect to or accept from trusted peers only - - --bootnodes - Comma separated enode URLs for P2P discovery bootstrap. - - Will fall back to a network-specific default if not specified. - - --dns-retries - Amount of DNS resolution requests retries to perform when peering - - [default: 0] - - --peers-file - The path to the known peers file. Connected peers are dumped to this file on nodes - shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - - --identity - Custom node identity - - [default: reth/-/] - - --p2p-secret-key - Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. - - --no-persist-peers - Do not persist peers. - - --nat - NAT resolution method (any|none|upnp|publicip|extip:\) - - [default: any] - - --addr - Network listening address - - [default: 0.0.0.0] - - --port - Network listening port - - [default: 30303] - - --max-outbound-peers - Maximum number of outbound requests. default: 100 - - --max-inbound-peers - Maximum number of inbound requests. default: 30 - - --max-tx-reqs - Max concurrent `GetPooledTransactions` requests. - - [default: 130] - - --max-tx-reqs-peer - Max concurrent `GetPooledTransactions` requests per peer. - - [default: 1] - - --max-seen-tx-history - Max number of seen transactions to remember per peer. - - Default is 320 transaction hashes. - - [default: 320] - - --max-pending-imports - Max number of transactions to import concurrently. - - [default: 4096] - - --pooled-tx-response-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions - to pack in one response. - Spec'd at 2MiB. - - [default: 2097152] - - --pooled-tx-pack-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions to - request in one request. - - Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see `RLPx` specs). This allows a node to request a specific size - response. - - By default, nodes request only 128 KiB worth of transactions, but should a peer request - more, up to 2 MiB, a node will answer with more than 128 KiB. - - Default is 128 KiB. - - [default: 131072] - - --max-tx-pending-fetch - Max capacity of cache of hashes for transactions pending fetch. - - [default: 25600] - - --net-if.experimental - Name of network interface used to communicate with peers. - - If flag is set, but no value is passed, the default interface for docker `eth0` is tried. - - --tx-propagation-policy - Transaction Propagation Policy - - The policy determines which peers transactions are gossiped to. - - [default: All] - - --retries - The number of retries per request - - [default: 5] - - --skip-node-depth - The depth after which we should start comparing branch nodes - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/debug/merkle.mdx b/docs/vocs/docs/pages/cli/reth/debug/merkle.mdx deleted file mode 100644 index 03b16a35e38..00000000000 --- a/docs/vocs/docs/pages/cli/reth/debug/merkle.mdx +++ /dev/null @@ -1,331 +0,0 @@ -# reth debug merkle - -Debug the clean & incremental state root calculations - -```bash -$ reth debug merkle --help -``` -```txt -Usage: reth debug merkle [OPTIONS] --to - -Options: - -h, --help - Print help (see a summary with '-h') - -Datadir: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --datadir.static-files - The absolute path to store static files in. - - --config - The path to the configuration file to use - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, holesky, hoodi, dev - - [default: mainnet] - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - - --db.read-transaction-timeout - Read transaction timeout in seconds, 0 means no timeout - -Networking: - -d, --disable-discovery - Disable the discovery service - - --disable-dns-discovery - Disable the DNS discovery - - --disable-discv4-discovery - Disable Discv4 discovery - - --enable-discv5-discovery - Enable Discv5 discovery - - --disable-nat - Disable Nat discovery - - --discovery.addr - The UDP address to use for devp2p peer discovery version 4 - - [default: 0.0.0.0] - - --discovery.port - The UDP port to use for devp2p peer discovery version 4 - - [default: 30303] - - --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 - - --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 - - --discovery.v5.port - The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set - - [default: 9200] - - --discovery.v5.port.ipv6 - The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set - - [default: 9200] - - --discovery.v5.lookup-interval - The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - - [default: 20] - - --discovery.v5.bootstrap.lookup-interval - The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - - [default: 5] - - --discovery.v5.bootstrap.lookup-countdown - The number of times to carry out boost lookup queries at bootstrap - - [default: 200] - - --trusted-peers - Comma separated enode URLs of trusted peers for P2P connections. - - --trusted-peers enode://abcd@192.168.0.1:30303 - - --trusted-only - Connect to or accept from trusted peers only - - --bootnodes - Comma separated enode URLs for P2P discovery bootstrap. - - Will fall back to a network-specific default if not specified. - - --dns-retries - Amount of DNS resolution requests retries to perform when peering - - [default: 0] - - --peers-file - The path to the known peers file. Connected peers are dumped to this file on nodes - shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - - --identity - Custom node identity - - [default: reth/-/] - - --p2p-secret-key - Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. - - --no-persist-peers - Do not persist peers. - - --nat - NAT resolution method (any|none|upnp|publicip|extip:\) - - [default: any] - - --addr - Network listening address - - [default: 0.0.0.0] - - --port - Network listening port - - [default: 30303] - - --max-outbound-peers - Maximum number of outbound requests. default: 100 - - --max-inbound-peers - Maximum number of inbound requests. default: 30 - - --max-tx-reqs - Max concurrent `GetPooledTransactions` requests. - - [default: 130] - - --max-tx-reqs-peer - Max concurrent `GetPooledTransactions` requests per peer. - - [default: 1] - - --max-seen-tx-history - Max number of seen transactions to remember per peer. - - Default is 320 transaction hashes. - - [default: 320] - - --max-pending-imports - Max number of transactions to import concurrently. - - [default: 4096] - - --pooled-tx-response-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions - to pack in one response. - Spec'd at 2MiB. - - [default: 2097152] - - --pooled-tx-pack-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions to - request in one request. - - Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see `RLPx` specs). This allows a node to request a specific size - response. - - By default, nodes request only 128 KiB worth of transactions, but should a peer request - more, up to 2 MiB, a node will answer with more than 128 KiB. - - Default is 128 KiB. - - [default: 131072] - - --max-tx-pending-fetch - Max capacity of cache of hashes for transactions pending fetch. - - [default: 25600] - - --net-if.experimental - Name of network interface used to communicate with peers. - - If flag is set, but no value is passed, the default interface for docker `eth0` is tried. - - --tx-propagation-policy - Transaction Propagation Policy - - The policy determines which peers transactions are gossiped to. - - [default: All] - - --retries - The number of retries per request - - [default: 5] - - --to - The height to finish at - - --skip-node-depth - The depth after which we should start comparing branch nodes - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 04a7228f212..e170a321a4f 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -71,7 +71,7 @@ Database: Specify a snapshot URL or let the command propose a default one. Available snapshot sources: - - https://downloads.merkle.io (default, mainnet archive) + - https://snapshots.merkle.io (default, mainnet archive) - https://publicnode.com/snapshots (full nodes & testnets) If no URL is provided, the latest mainnet archive snapshot diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx new file mode 100644 index 00000000000..165970638ba --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -0,0 +1,162 @@ +# reth export-era + +Exports block to era1 files in a specified directory + +```bash +$ reth export-era --help +``` +```txt +Usage: reth export-era [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --first-block-number + Optional first block number to export from the db. + It is by default 0. + + --last-block-number + Optional last block number to export from the db. + It is by default 8191. + + --max-blocks-per-file + The maximum number of blocks per file, it can help you to decrease the size of the files. + Must be less than or equal to 8192. + + --path + The directory path where to export era1 files. + The block data are read from the database. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index c0679868ea3..dd29bbc103b 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -287,6 +287,11 @@ RPC: [default: .ipc] + --ipc.permissions + Set the permissions for the IPC socket file, in octal format. + + If not specified, the permissions will be set by the system's umask. + --authrpc.addr Auth server address to listen on @@ -312,6 +317,11 @@ RPC: [default: _engine_api.ipc] + --disable-auth-server + Disable the auth/engine API server. + + This will prevent the authenticated engine-API server from starting. Use this if you're running a node that doesn't need to serve engine API requests. + --rpc.jwtsecret Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and `--ws.api`. @@ -489,11 +499,17 @@ TxPool: [default: 7] + --txpool.minimum-priority-fee + Minimum priority fee required for transaction acceptance into the pool. Transactions with priority fee below this value will be rejected + --txpool.gas-limit The default enforced gas limit for transactions entering the pool [default: 30000000] + --txpool.max-tx-gas + Maximum gas limit for individual transactions. Transactions exceeding this limit will be rejected by the transaction pool + --blobpool.pricebump Price bump percentage to replace an already existing blob transaction @@ -624,6 +640,9 @@ Debug: compare them against local execution when a bad block is encountered, helping identify discrepancies in state execution. + --ethstats + The URL of the ethstats server to connect to. Example: `nodename:secret@host:port` + Database: --db.log-level Database logging level. Levels higher than "notice" require a debug build @@ -699,6 +718,9 @@ Pruning: --prune.receipts.full Prunes all receipt data + --prune.receipts.pre-merge + Prune receipts before the merge block + --prune.receipts.distance Prune receipts before the `head-N` block number. In other words, keep last N + 1 blocks @@ -752,6 +774,9 @@ Engine: --engine.disable-caching-and-prewarming Disable cross-block caching and parallel prewarming + --engine.parallel-sparse-trie + Enable the parallel sparse trie in the engine + --engine.state-provider-metrics Enable state provider latency metrics. This allows the engine to collect and report stats about how long state provider calls took during execution, but this does introduce slight overhead to state provider calls diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index a435c916169..151c386ef48 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -9,249 +9,16 @@ $ reth p2p --help Usage: reth p2p [OPTIONS] Commands: - header Download block header - body Download block body - rlpx RLPx commands - help Print this message or the help of the given subcommand(s) + header Download block header + body Download block body + rlpx RLPx commands + bootnode Bootnode command + help Print this message or the help of the given subcommand(s) Options: - --config - The path to the configuration file to use. - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, holesky, hoodi, dev - - [default: mainnet] - - --retries - The number of retries per request - - [default: 5] - -h, --help Print help (see a summary with '-h') -Networking: - -d, --disable-discovery - Disable the discovery service - - --disable-dns-discovery - Disable the DNS discovery - - --disable-discv4-discovery - Disable Discv4 discovery - - --enable-discv5-discovery - Enable Discv5 discovery - - --disable-nat - Disable Nat discovery - - --discovery.addr - The UDP address to use for devp2p peer discovery version 4 - - [default: 0.0.0.0] - - --discovery.port - The UDP port to use for devp2p peer discovery version 4 - - [default: 30303] - - --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 - - --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 - - --discovery.v5.port - The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set - - [default: 9200] - - --discovery.v5.port.ipv6 - The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set - - [default: 9200] - - --discovery.v5.lookup-interval - The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - - [default: 20] - - --discovery.v5.bootstrap.lookup-interval - The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - - [default: 5] - - --discovery.v5.bootstrap.lookup-countdown - The number of times to carry out boost lookup queries at bootstrap - - [default: 200] - - --trusted-peers - Comma separated enode URLs of trusted peers for P2P connections. - - --trusted-peers enode://abcd@192.168.0.1:30303 - - --trusted-only - Connect to or accept from trusted peers only - - --bootnodes - Comma separated enode URLs for P2P discovery bootstrap. - - Will fall back to a network-specific default if not specified. - - --dns-retries - Amount of DNS resolution requests retries to perform when peering - - [default: 0] - - --peers-file - The path to the known peers file. Connected peers are dumped to this file on nodes - shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - - --identity - Custom node identity - - [default: reth/-/] - - --p2p-secret-key - Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. - - --no-persist-peers - Do not persist peers. - - --nat - NAT resolution method (any|none|upnp|publicip|extip:\) - - [default: any] - - --addr - Network listening address - - [default: 0.0.0.0] - - --port - Network listening port - - [default: 30303] - - --max-outbound-peers - Maximum number of outbound requests. default: 100 - - --max-inbound-peers - Maximum number of inbound requests. default: 30 - - --max-tx-reqs - Max concurrent `GetPooledTransactions` requests. - - [default: 130] - - --max-tx-reqs-peer - Max concurrent `GetPooledTransactions` requests per peer. - - [default: 1] - - --max-seen-tx-history - Max number of seen transactions to remember per peer. - - Default is 320 transaction hashes. - - [default: 320] - - --max-pending-imports - Max number of transactions to import concurrently. - - [default: 4096] - - --pooled-tx-response-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions - to pack in one response. - Spec'd at 2MiB. - - [default: 2097152] - - --pooled-tx-pack-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions to - request in one request. - - Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see `RLPx` specs). This allows a node to request a specific size - response. - - By default, nodes request only 128 KiB worth of transactions, but should a peer request - more, up to 2 MiB, a node will answer with more than 128 KiB. - - Default is 128 KiB. - - [default: 131072] - - --max-tx-pending-fetch - Max capacity of cache of hashes for transactions pending fetch. - - [default: 25600] - - --net-if.experimental - Name of network interface used to communicate with peers. - - If flag is set, but no value is passed, the default interface for docker `eth0` is tried. - - --tx-propagation-policy - Transaction Propagation Policy - - The policy determines which peers transactions are gossiped to. - - [default: All] - -Datadir: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --datadir.static-files - The absolute path to store static files in. - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - - --db.read-transaction-timeout - Read transaction timeout in seconds, 0 means no timeout - Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index e5092f274ea..223dec04d25 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -8,14 +8,219 @@ $ reth p2p body --help ```txt Usage: reth p2p body [OPTIONS] -Arguments: - - The block number or hash - Options: + --retries + The number of retries per request + + [default: 5] + -h, --help Print help (see a summary with '-h') +Networking: + -d, --disable-discovery + Disable the discovery service + + --disable-dns-discovery + Disable the DNS discovery + + --disable-discv4-discovery + Disable Discv4 discovery + + --enable-discv5-discovery + Enable Discv5 discovery + + --disable-nat + Disable Nat discovery + + --discovery.addr + The UDP address to use for devp2p peer discovery version 4 + + [default: 0.0.0.0] + + --discovery.port + The UDP port to use for devp2p peer discovery version 4 + + [default: 30303] + + --discovery.v5.addr + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 + + --discovery.v5.port + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set + + [default: 9200] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set + + [default: 9200] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 20] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 200] + + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 + + --trusted-only + Connect to or accept from trusted peers only + + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --dns-retries + Amount of DNS resolution requests retries to perform when peering + + [default: 0] + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --no-persist-peers + Do not persist peers. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound requests. default: 100 + + --max-inbound-peers + Maximum number of inbound requests. default: 30 + + --max-tx-reqs + Max concurrent `GetPooledTransactions` requests. + + [default: 130] + + --max-tx-reqs-peer + Max concurrent `GetPooledTransactions` requests per peer. + + [default: 1] + + --max-seen-tx-history + Max number of seen transactions to remember per peer. + + Default is 320 transaction hashes. + + [default: 320] + + --max-pending-imports + Max number of transactions to import concurrently. + + [default: 4096] + + --pooled-tx-response-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + + [default: 131072] + + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + + --tx-propagation-policy + Transaction Propagation Policy + + The policy determines which peers transactions are gossiped to. + + [default: All] + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --config + The path to the configuration file to use. + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + + + The block number or hash + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx new file mode 100644 index 00000000000..a7edd5b9a53 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -0,0 +1,114 @@ +# reth p2p bootnode + +Bootnode command + +```bash +$ reth p2p bootnode --help +``` +```txt +Usage: reth p2p bootnode [OPTIONS] + +Options: + --addr + Listen address for the bootnode (default: ":30301") + + [default: :30301] + + --gen-key + Generate a new node key and save it to the specified file + + [default: ] + + --node-key + Private key filename for the node + + [default: ] + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --v5 + Run a v5 topic discovery bootnode + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index 8b1f6b96cd8..1fbaa1b1989 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -8,14 +8,219 @@ $ reth p2p header --help ```txt Usage: reth p2p header [OPTIONS] -Arguments: - - The header number or hash - Options: + --retries + The number of retries per request + + [default: 5] + -h, --help Print help (see a summary with '-h') +Networking: + -d, --disable-discovery + Disable the discovery service + + --disable-dns-discovery + Disable the DNS discovery + + --disable-discv4-discovery + Disable Discv4 discovery + + --enable-discv5-discovery + Enable Discv5 discovery + + --disable-nat + Disable Nat discovery + + --discovery.addr + The UDP address to use for devp2p peer discovery version 4 + + [default: 0.0.0.0] + + --discovery.port + The UDP port to use for devp2p peer discovery version 4 + + [default: 30303] + + --discovery.v5.addr + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 + + --discovery.v5.port + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set + + [default: 9200] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set + + [default: 9200] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 20] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 200] + + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 + + --trusted-only + Connect to or accept from trusted peers only + + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --dns-retries + Amount of DNS resolution requests retries to perform when peering + + [default: 0] + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --no-persist-peers + Do not persist peers. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound requests. default: 100 + + --max-inbound-peers + Maximum number of inbound requests. default: 30 + + --max-tx-reqs + Max concurrent `GetPooledTransactions` requests. + + [default: 130] + + --max-tx-reqs-peer + Max concurrent `GetPooledTransactions` requests per peer. + + [default: 1] + + --max-seen-tx-history + Max number of seen transactions to remember per peer. + + Default is 320 transaction hashes. + + [default: 320] + + --max-pending-imports + Max number of transactions to import concurrently. + + [default: 4096] + + --pooled-tx-response-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + + [default: 131072] + + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + + --tx-propagation-policy + Transaction Propagation Policy + + The policy determines which peers transactions are gossiped to. + + [default: All] + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --config + The path to the configuration file to use. + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + + + The header number or hash + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/debug/build-block.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx similarity index 86% rename from docs/vocs/docs/pages/cli/reth/debug/build-block.mdx rename to docs/vocs/docs/pages/cli/reth/re-execute.mdx index ac8ab6d3214..22883e9d610 100644 --- a/docs/vocs/docs/pages/cli/reth/debug/build-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -1,12 +1,12 @@ -# reth debug build-block +# reth re-execute -Debug block building +Re-execute blocks in parallel to verify historical sync correctness ```bash -$ reth debug build-block --help +$ reth re-execute --help ``` ```txt -Usage: reth debug build-block [OPTIONS] --prev-randao --timestamp --suggested-fee-recipient +Usage: reth re-execute [OPTIONS] Options: -h, --help @@ -67,23 +67,18 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout - --parent-beacon-block-root + --from + The height to start at + [default: 1] - --prev-randao + --to + The height to end at. Defaults to the latest block + --num-tasks + Number of tasks to run in parallel - --timestamp - - - --suggested-fee-recipient - - - --transactions - Array of transactions. NOTE: 4844 transactions must be provided in the same order as they appear in the blobs bundle - - --blobs-bundle-path - Path to the file that contains a corresponding blobs bundle + [default: 10] Logging: --log.stdout.format diff --git a/docs/vocs/docs/pages/guides/history-expiry.mdx b/docs/vocs/docs/pages/guides/history-expiry.mdx new file mode 100644 index 00000000000..1f03b6b4aca --- /dev/null +++ b/docs/vocs/docs/pages/guides/history-expiry.mdx @@ -0,0 +1,80 @@ +--- +description: Usage of tools for importing, exporting and pruning historical blocks +--- + +# History Expiry + +In this chapter, we will learn how to use tools for dealing with historical data, it's import, export and removal. + +We will use [reth cli](../cli/cli) to import and export historical data. + +## Enabling Pre-merge history expiry + +Opting in into pre-merge history expiry will remove all pre-merge transaction/receipt data (static files) for mainnet and sepolia. + +For new and existing nodes: + +Use the flags `--prune.bodies.pre-merge` `--prune.receipts.pre-merge` + +See also [Partial history expiry announcement](https://blog.ethereum.org/2025/07/08/partial-history-exp) + +## File format + +The historical data is packaged and distributed in files of special formats with different names, all of which are based on [e2store](https://github.com/status-im/nimbus-eth2/blob/613f4a9a50c9c4bd8568844eaffb3ac15d067e56/docs/e2store.md#introduction). The most important ones are the **ERA1**, which deals with block range from genesis until the last pre-merge block, and **ERA**, which deals with block range from the merge onwards. + +See the following specifications for more details : +- [E2store specification](https://github.com/eth-clients/e2store-format-specs) +- [ERA1 specification](https://github.com/eth-clients/e2store-format-specs/blob/main/formats/era1.md) +- [ERA specification](https://github.com/eth-clients/e2store-format-specs/blob/main/formats/era.md) + +The contents of these archives is an ordered sequence of blocks. We're mostly concerned with headers and transactions. For ERA1, there is 8192 blocks per file except for the last one, i.e. the one containing pre-merge block, which can be less than that. + +## Import + +In this section we discuss how to get blocks from ERA1 files. + +### Automatic sync + +If enabled, importing blocks from ERA1 files can be done automatically with no manual steps required. + +#### Enabling the ERA stage + +The import from ERA1 files within the pre-merge block range is included in the [reth node](/cli/reth/node) synchronization pipeline. It is disabled by default. To enable it, pass the `--era.enable` flag when running the [`node`](/cli/reth/node) command. + +The benefit of using this option is significant increase in the synchronization speed for the headers and mainly bodies stage of the pipeline within the ERA1 block range. We encourage you to use it! Eventually, it will become enabled by default. + +#### Using the ERA stage + +When enabled, the import from ERA1 files runs as its own separate stage before all others. It is an optional stage that is doing the work of headers and bodies stage at a significantly higher speed. The checkpoints of these stages are shifted by the ERA stage. + +### Manual import + +If you want to import block headers and transactions from ERA1 files without running the synchronization pipeline, you may use the [`import-era`](../cli/reth/import-era) command. + +### Options + +Both ways of importing the ERA1 files have the same options because they use the same underlying subsystems. No options are mandatory. + +#### Sources + +There are two kinds of data sources for the ERA1 import. +* Remote from an HTTP URL. Use the option `--era.url` with an ERA1 hosting provider URL. +* Local from a file-system directory. Use the option `--era.path` with a directory containing ERA1 files. + +Both options cannot be used at the same time. If no option is specified, the remote source is used with a URL derived from the chain ID. Only Mainnet and Sepolia have ERA1 files. If the node is running on a different chain, no source is provided and nothing is imported. + +## Export + +In this section we discuss how to export blocks data into ERA1 files. + +### Manual export +You can manually export block data from your database to ERA1 files using the [`export-era`](../cli/reth/export-era) command. + +The CLI reads block headers, bodies, and receipts from your local database and packages them into the standardized ERA1 format with up to 8,192 blocks per file. + +#### Set up +The export command allows you to specify: + +- Block ranges with `--first-block-number` and `--last-block-number` +- Output directory with `--path` for the export destination +- File size limits with `--max-blocks-per-file` with a maximum of 8,192 blocks per ERA1 file diff --git a/docs/vocs/docs/pages/index.mdx b/docs/vocs/docs/pages/index.mdx index 5e65d0695ce..8778914f4c8 100644 --- a/docs/vocs/docs/pages/index.mdx +++ b/docs/vocs/docs/pages/index.mdx @@ -25,7 +25,7 @@ import { TrustedBy } from "../components/TrustedBy";
Run a Node - Build a Node + Build a Node Why Reth?
@@ -117,7 +117,7 @@ import { TrustedBy } from "../components/TrustedBy";
- +
Customizable
Build custom nodes with tailored transaction handling
@@ -150,7 +150,7 @@ Leading infra companies use Reth for MEV applications, staking, RPC services and ## Built with Reth SDK -Production chains and networks powered by Reth's modular architecture. These nodes are built using existing components without forking, saving several engineering hours while improving maintainability. +Production chains and networks are powered by Reth's modular architecture. These nodes are built using existing components without forking, saving several engineering hours while improving maintainability.
diff --git a/docs/vocs/docs/pages/introduction/why-reth.mdx b/docs/vocs/docs/pages/introduction/why-reth.mdx index f140c0e3128..1b03870a877 100644 --- a/docs/vocs/docs/pages/introduction/why-reth.mdx +++ b/docs/vocs/docs/pages/introduction/why-reth.mdx @@ -46,5 +46,5 @@ Reth isn't just a tool—it's a movement toward better blockchain infrastructure **Ready to build the future?** - [Get Started](/run/ethereum) with running your first Reth node -- [Explore the SDK](/sdk/overview) to build custom blockchain infrastructure +- [Explore the SDK](/sdk) to build custom blockchain infrastructure - [Join the Community](https://github.com/paradigmxyz/reth) and contribute to the future of Ethereum diff --git a/docs/vocs/docs/pages/jsonrpc/admin.mdx b/docs/vocs/docs/pages/jsonrpc/admin.mdx index cf1ef29c05b..481a4f76d76 100644 --- a/docs/vocs/docs/pages/jsonrpc/admin.mdx +++ b/docs/vocs/docs/pages/jsonrpc/admin.mdx @@ -43,7 +43,7 @@ Disconnects from a peer if the connection exists. Returns a `bool` indicating wh ## `admin_addTrustedPeer` -Adds the given peer to a list of trusted peers, which allows the peer to always connect, even if there would be no room for it otherwise. +Adds the given peer to a list of trusted peers, which allows the peer to always connect, even if there is no room for it otherwise. It returns a `bool` indicating whether the peer was added to the list or not. diff --git a/docs/vocs/docs/pages/jsonrpc/debug.mdx b/docs/vocs/docs/pages/jsonrpc/debug.mdx index aa3a47685c6..5b435d7dca7 100644 --- a/docs/vocs/docs/pages/jsonrpc/debug.mdx +++ b/docs/vocs/docs/pages/jsonrpc/debug.mdx @@ -55,7 +55,7 @@ Returns the structured logs created during the execution of EVM between two bloc ## `debug_traceBlock` -The `debug_traceBlock` method will return a full stack trace of all invoked opcodes of all transaction that were included in this block. +The `debug_traceBlock` method will return a full stack trace of all invoked opcodes of all transactions that were included in this block. This expects an RLP-encoded block. @@ -93,7 +93,7 @@ The `debug_traceTransaction` debugging method will attempt to run the transactio ## `debug_traceCall` -The `debug_traceCall` method lets you run an `eth_call` within the context of the given block execution using the final state of parent block as the base. +The `debug_traceCall` method lets you run an `eth_call` within the context of the given block execution using the final state of the parent block as the base. The first argument (just as in `eth_call`) is a transaction request. diff --git a/docs/vocs/docs/pages/jsonrpc/trace.mdx b/docs/vocs/docs/pages/jsonrpc/trace.mdx index 464832db70e..d1ddd3ca55c 100644 --- a/docs/vocs/docs/pages/jsonrpc/trace.mdx +++ b/docs/vocs/docs/pages/jsonrpc/trace.mdx @@ -4,8 +4,6 @@ description: Trace API for inspecting Ethereum state and transactions. # `trace` Namespace -{/* TODO: We should probably document the format of the traces themselves, OE does not do that */} - The `trace` API provides several methods to inspect the Ethereum state, including Parity-style traces. A similar module exists (with other debug functions) with Geth-style traces ([`debug`](/jsonrpc/debug)). @@ -17,6 +15,128 @@ There are two types of methods in this API: - **Ad-hoc tracing APIs** for performing diagnostics on calls or transactions (historical or hypothetical). - **Transaction-trace filtering APIs** for getting full externality traces on any transaction executed by reth. +## Trace Format Specification + +The trace API returns different types of trace data depending on the requested trace types. Understanding these formats is crucial for interpreting the results. + +### TraceResults + +The `TraceResults` object is returned by ad-hoc tracing methods (`trace_call`, `trace_callMany`, `trace_rawTransaction`, `trace_replayTransaction`, `trace_replayBlockTransactions`). It contains the following fields: + +| Field | Type | Description | +|-------|------|-------------| +| `output` | `string` | The return value of the traced call, encoded as hex | +| `stateDiff` | `object \| null` | State changes caused by the transaction (only if `stateDiff` trace type requested) | +| `trace` | `array \| null` | Array of transaction traces (only if `trace` trace type requested) | +| `vmTrace` | `object \| null` | Virtual machine execution trace (only if `vmTrace` trace type requested) | + +### LocalizedTransactionTrace + +Individual transaction traces in `trace_block`, `trace_filter`, `trace_get`, and `trace_transaction` methods return `LocalizedTransactionTrace` objects: + +| Field | Type | Description | +|-------|------|-------------| +| `action` | `object` | The action performed by this trace | +| `result` | `object \| null` | The result of the trace execution | +| `error` | `string \| null` | Error message if the trace failed | +| `blockHash` | `string \| null` | Hash of the block containing this trace | +| `blockNumber` | `number \| null` | Number of the block containing this trace | +| `transactionHash` | `string \| null` | Hash of the transaction containing this trace | +| `transactionPosition` | `number \| null` | Position of the transaction in the block | +| `subtraces` | `number` | Number of child traces | +| `traceAddress` | `array` | Position of this trace in the call tree | +| `type` | `string` | Type of action: `"call"`, `"create"`, `"suicide"`, or `"reward"` | + +### Action Types + +#### Call Action (`type: "call"`) + +| Field | Type | Description | +|-------|------|-------------| +| `callType` | `string` | Type of call: `"call"`, `"callcode"`, `"delegatecall"`, or `"staticcall"` | +| `from` | `string` | Address of the caller | +| `to` | `string` | Address of the callee | +| `gas` | `string` | Gas provided for the call | +| `input` | `string` | Input data for the call | +| `value` | `string` | Value transferred in the call | + +#### Create Action (`type: "create"`) + +| Field | Type | Description | +|-------|------|-------------| +| `from` | `string` | Address of the creator | +| `gas` | `string` | Gas provided for contract creation | +| `init` | `string` | Contract initialization code | +| `value` | `string` | Value sent to the new contract | + +#### Suicide Action (`type: "suicide"`) + +| Field | Type | Description | +|-------|------|-------------| +| `address` | `string` | Address of the contract being destroyed | +| `refundAddress` | `string` | Address receiving the remaining balance | +| `balance` | `string` | Balance transferred to refund address | + +#### Reward Action (`type: "reward"`) + +| Field | Type | Description | +|-------|------|-------------| +| `author` | `string` | Address receiving the reward | +| `value` | `string` | Amount of the reward | +| `rewardType` | `string` | Type of reward: `"block"` or `"uncle"` | + +### Result Format + +When a trace executes successfully, the `result` field contains: + +| Field | Type | Description | +|-------|------|-------------| +| `gasUsed` | `string` | Amount of gas consumed by this trace | +| `output` | `string` | Return data from the trace execution | +| `address` | `string` | Created contract address (for create actions only) | +| `code` | `string` | Deployed contract code (for create actions only) | + +### State Diff Format + +When `stateDiff` trace type is requested, the `stateDiff` field contains an object mapping addresses to their state changes: + +```json +{ + "0x123...": { + "balance": { + "*": { + "from": "0x0", + "to": "0x1000" + } + }, + "nonce": { + "*": { + "from": "0x0", + "to": "0x1" + } + }, + "code": { + "*": { + "from": "0x", + "to": "0x608060405234801561001057600080fd5b50..." + } + }, + "storage": { + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563": { + "*": { + "from": "0x0", + "to": "0x1" + } + } + } + } +} +``` + +### VM Trace Format + +When `vmTrace` trace type is requested, the `vmTrace` field contains detailed virtual machine execution information including opcodes, stack, memory, and storage changes at each step. The exact format depends on the specific VM tracer implementation. + ## Ad-hoc tracing APIs Ad-hoc tracing APIs allow you to perform diagnostics on calls or transactions (historical or hypothetical), including: @@ -71,7 +191,14 @@ The third and optional parameter is a block number, block hash, or a block tag ( "output": "0x", "stateDiff": null, "trace": [{ - "action": { ... }, + "action": { + "callType": "call", + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gas": "0x76c0", + "input": "0x", + "value": "0x0" + }, "result": { "gasUsed": "0x0", "output": "0x" @@ -170,9 +297,16 @@ Traces a call to `eth_sendRawTransaction` without making the call, returning the "jsonrpc": "2.0", "result": { "output": "0x", - "stateDiff": null, - "trace": [{ - "action": { ... }, + "stateDiff": null, + "trace": [{ + "action": { + "callType": "call", + "from": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "to": "0x6295ee1b4f6dd65047762f924ecd367c17eabf8f", + "gas": "0x186a0", + "input": "0x", + "value": "0x0" + }, "result": { "gasUsed": "0x0", "output": "0x" @@ -181,7 +315,7 @@ Traces a call to `eth_sendRawTransaction` without making the call, returning the "traceAddress": [], "type": "call" }], - "vmTrace": null + "vmTrace": null } } ``` @@ -206,7 +340,14 @@ Replays all transactions in a block returning the requested traces for each tran "output": "0x", "stateDiff": null, "trace": [{ - "action": { ... }, + "action": { + "callType": "call", + "from": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "to": "0x6295ee1b4f6dd65047762f924ecd367c17eabf8f", + "gas": "0x186a0", + "input": "0x", + "value": "0x0" + }, "result": { "gasUsed": "0x0", "output": "0x" @@ -215,10 +356,9 @@ Replays all transactions in a block returning the requested traces for each tran "traceAddress": [], "type": "call" }], - "transactionHash": "0x...", + "transactionHash": "0x4e70b5d8d5dc43e0e61e4a8f1e6e4e6e4e6e4e6e4e6e4e6e4e6e4e6e4e6e4e6e4", "vmTrace": null - }, - { ... } + } ] } ``` @@ -242,10 +382,17 @@ Replays a transaction, returning the traces. "output": "0x", "stateDiff": null, "trace": [{ - "action": { ... }, + "action": { + "callType": "call", + "from": "0x1c39ba39e4735cb65978d4db400ddd70a72dc750", + "to": "0x2bd2326c993dfaef84f696526064ff22eba5b362", + "gas": "0x13e99", + "input": "0x16c72721", + "value": "0x0" + }, "result": { - "gasUsed": "0x0", - "output": "0x" + "gasUsed": "0x183", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" }, "subtraces": 0, "traceAddress": [], @@ -292,8 +439,7 @@ Returns traces created at given block. "transactionHash": "0x07da28d752aba3b9dd7060005e554719c6205c8a3aea358599fc9b245c52f1f6", "transactionPosition": 0, "type": "call" - }, - ... + } ] } ``` @@ -345,8 +491,7 @@ All properties are optional. "transactionHash": "0x3321a7708b1083130bd78da0d62ead9f6683033231617c9d268e2c7e3fa6c104", "transactionPosition": 3, "type": "call" - }, - ... + } ] } ``` @@ -430,8 +575,7 @@ Returns all traces of given transaction "transactionHash": "0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3", "transactionPosition": 2, "type": "call" - }, - ... + } ] } ``` diff --git a/docs/vocs/docs/pages/overview.mdx b/docs/vocs/docs/pages/overview.mdx index e467dacc03f..33bc607bd45 100644 --- a/docs/vocs/docs/pages/overview.mdx +++ b/docs/vocs/docs/pages/overview.mdx @@ -111,4 +111,4 @@ You can contribute to the docs on [GitHub][gh-docs]. [tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=chat&url=https%3A%2F%2Ftg.sumanjay.workers.dev%2Fparadigm%5Freth [tg-url]: https://t.me/paradigm_reth -[gh-docs]: https://github.com/paradigmxyz/reth/tree/main/book +[gh-docs]: https://github.com/paradigmxyz/reth/tree/main/docs diff --git a/docs/vocs/docs/pages/run/ethereum.mdx b/docs/vocs/docs/pages/run/ethereum.mdx index 3c488416ec9..e5663d63041 100644 --- a/docs/vocs/docs/pages/run/ethereum.mdx +++ b/docs/vocs/docs/pages/run/ethereum.mdx @@ -4,7 +4,7 @@ description: How to run Reth on Ethereum mainnet and testnets. # Running Reth on Ethereum Mainnet or testnets -Reth is an [_execution client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#execution-clients). After Ethereum's transition to Proof of Stake (aka the Merge) it became required to run a [_consensus client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#consensus-clients) along your execution client in order to sync into any "post-Merge" network. This is because the Ethereum execution layer now outsources consensus to a separate component, known as the consensus client. +Reth is an [_execution client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#execution-clients). After Ethereum's transition to Proof of Stake (aka the Merge) it became required to run a [_consensus client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#consensus-clients) along with your execution client in order to sync into any "post-Merge" network. This is because the Ethereum execution layer now outsources consensus to a separate component, known as the consensus client. Consensus clients decide what blocks are part of the chain, while execution clients only validate that transactions and blocks are valid in themselves and with respect to the world state. In other words, execution clients execute blocks and transactions and check their validity, while consensus clients determine which valid blocks should be part of the chain. Therefore, running a consensus client in parallel with the execution client is necessary to ensure synchronization and participation in the network. @@ -43,7 +43,7 @@ You can change this by adding the `--http`, `--ws` flags, respectively and using For more commands, see the [`reth node` CLI reference](/cli/cli). ::: -The EL \<> CL communication happens over the [Engine API](https://github.com/ethereum/execution-apis/blob/main/src/engine/common), which is by default exposed at `http://localhost:8551`. The connection is authenticated over JWT using a JWT secret which is auto-generated by Reth and placed in a file called `jwt.hex` in the data directory, which on Linux by default is `$HOME/.local/share/reth/` (`/Users//Library/Application Support/reth/mainnet/jwt.hex` in Mac). +The EL \<> CL communication happens over the [Engine API](https://github.com/ethereum/execution-apis/blob/main/src/engine/common.md), which is by default exposed at `http://localhost:8551`. The connection is authenticated over JWT using a JWT secret which is auto-generated by Reth and placed in a file called `jwt.hex` in the data directory, which on Linux by default is `$HOME/.local/share/reth/` (`/Users//Library/Application Support/reth/mainnet/jwt.hex` in Mac). You can override this path using the `--authrpc.jwtsecret` option. You MUST use the same JWT secret in BOTH Reth and the chosen Consensus Layer. If you want to override the address or port, you can use the `--authrpc.addr` and `--authrpc.port` options, respectively. @@ -77,7 +77,7 @@ If you don't intend on running validators on your node you can add: --disable-deposit-contract-sync ``` -The `--checkpoint-sync-url` argument value can be replaced with any checkpoint sync endpoint from a [community maintained list](https://eth-clients.github.io/checkpoint-sync-endpoints/#mainnet). +The `--checkpoint-sync-url` argument value can be replaced with any checkpoint sync endpoint from a [community-maintained list](https://eth-clients.github.io/checkpoint-sync-endpoints/#mainnet). Your Reth node should start receiving "fork choice updated" messages, and begin syncing the chain. @@ -88,13 +88,13 @@ In the meantime, consider setting up [observability](/run/monitoring) to monitor {/* TODO: Add more logs to help node operators debug any weird CL to EL messages! */} -[installation]: ./../installation/installation +[installation]: ./../../installation/overview [docs]: https://github.com/paradigmxyz/reth/tree/main/docs -[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics#current-metrics +[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics.md#metrics ## Running without a Consensus Layer -We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending an `engine_forkchoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients other methods of syncing like importing Lighthouse as a library. +We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending an `engine_forkchoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients and other methods of syncing like importing Lighthouse as a library. ## Running with Etherscan as Block Source diff --git a/docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx b/docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx index e895331288e..58fe9a2babe 100644 --- a/docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx +++ b/docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx @@ -11,7 +11,7 @@ To sync OP mainnet, Bedrock state needs to be imported as a starting point. Ther ## Minimal bootstrap (recommended) -**The state snapshot at Bedrock block is required.** It can be exported from [op-geth](https://github.com/testinprod-io/op-erigon/blob/pcw109550/bedrock-db-migration/bedrock-migration#export-state) (**.jsonl**) or downloaded directly from [here](https://mega.nz/file/GdZ1xbAT#a9cBv3AqzsTGXYgX7nZc_3fl--tcBmOAIwIA5ND6kwc). +**The state snapshot at Bedrock block is required.** It can be exported from [op-geth](https://github.com/testinprod-io/op-erigon/blob/pcw109550/bedrock-db-migration/bedrock-migration.md#export-state) (**.jsonl**) or downloaded directly from [here](https://mega.nz/file/GdZ1xbAT#a9cBv3AqzsTGXYgX7nZc_3fl--tcBmOAIwIA5ND6kwc). Import the state snapshot diff --git a/docs/vocs/docs/pages/run/faq/troubleshooting.mdx b/docs/vocs/docs/pages/run/faq/troubleshooting.mdx index 08b9c6fbe5d..1f26cba9dae 100644 --- a/docs/vocs/docs/pages/run/faq/troubleshooting.mdx +++ b/docs/vocs/docs/pages/run/faq/troubleshooting.mdx @@ -58,7 +58,7 @@ Currently, there are two main ways to fix this issue. #### Compact the database It will take around 5-6 hours and require **additional** disk space located on the same or different drive -equal to the [freshly synced node](/installation/overview#hardware-requirements). +equal to the [freshly synced node](/run/system-requirements). 1. Clone Reth ```bash diff --git a/docs/vocs/docs/pages/run/monitoring.mdx b/docs/vocs/docs/pages/run/monitoring.mdx index d09b795dc4b..30ce967bb10 100644 --- a/docs/vocs/docs/pages/run/monitoring.mdx +++ b/docs/vocs/docs/pages/run/monitoring.mdx @@ -57,7 +57,7 @@ cd prometheus-* # Install Grafana sudo apt-get install -y apt-transport-https software-properties-common wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add - -echo "deb https://packages.grafana.com/oss/deb stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list +echo "deb https://packages.grafana.com stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list sudo apt-get update sudo apt-get install grafana ``` @@ -141,4 +141,4 @@ This will all be very useful to you, whether you're simply running a home node a [installation]: ../installation/installation [release-profile]: https://doc.rust-lang.org/cargo/reference/profiles.html#release [docs]: https://github.com/paradigmxyz/reth/tree/main/docs -[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics#current-metrics +[metrics]: https://reth.rs/run/observability.html diff --git a/docs/vocs/docs/pages/run/opstack.mdx b/docs/vocs/docs/pages/run/opstack.mdx index 86e9ad72438..d472485be60 100644 --- a/docs/vocs/docs/pages/run/opstack.mdx +++ b/docs/vocs/docs/pages/run/opstack.mdx @@ -91,13 +91,13 @@ op-node \ Consider adding the `--l1.trustrpc` flag to improve performance, if the connection to l1 is over localhost. [l1-el-spec]: https://github.com/ethereum/execution-specs -[rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node +[rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.md [op-geth-forkdiff]: https://op-geth.optimism.io -[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/background#sequencers +[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/background.md#sequencers [op-stack-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs -[l2-el-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine -[deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits -[derivation-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation +[l2-el-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md +[deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits.md +[derivation-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation.md [superchain-registry]: https://github.com/ethereum-optimism/superchain-registry [op-node-docker]: https://console.cloud.google.com/artifacts/docker/oplabs-tools-artifacts/us/images/op-node [reth]: https://github.com/paradigmxyz/reth diff --git a/docs/vocs/docs/pages/run/system-requirements.mdx b/docs/vocs/docs/pages/run/system-requirements.mdx index 60e30189f6a..9db3294f68e 100644 --- a/docs/vocs/docs/pages/run/system-requirements.mdx +++ b/docs/vocs/docs/pages/run/system-requirements.mdx @@ -55,7 +55,7 @@ TLC (Triple-Level Cell) NVMe drives, on the other hand, use three bits of data p Most of the time during syncing is spent executing transactions, which is a single-threaded operation due to potential state dependencies of a transaction on previous ones. -As a result, the number of cores matters less, but in general higher clock speeds are better. More cores are better for parallelizable [stages](https://github.com/paradigmxyz/reth/blob/main/docs/crates/stages) (like sender recovery or bodies downloading), but these stages are not the primary bottleneck for syncing. +As a result, the number of cores matters less, but in general higher clock speeds are better. More cores are better for parallelizable [stages](https://github.com/paradigmxyz/reth/blob/main/docs/crates/stages.md) (like sender recovery or bodies downloading), but these stages are not the primary bottleneck for syncing. ## Memory diff --git a/docs/vocs/docs/pages/sdk/overview.mdx b/docs/vocs/docs/pages/sdk.mdx similarity index 100% rename from docs/vocs/docs/pages/sdk/overview.mdx rename to docs/vocs/docs/pages/sdk.mdx diff --git a/docs/vocs/docs/pages/sdk/examples/standalone-components.mdx b/docs/vocs/docs/pages/sdk/examples/standalone-components.mdx index 3c16e1cf123..8b77913f539 100644 --- a/docs/vocs/docs/pages/sdk/examples/standalone-components.mdx +++ b/docs/vocs/docs/pages/sdk/examples/standalone-components.mdx @@ -4,6 +4,87 @@ This guide demonstrates how to use Reth components independently without running ## Direct Database Access +Reth uses MDBX as its primary database backend, storing blockchain data in a structured format. You can access this database directly from external processes for read-only operations, which is useful for analytics, indexing, or building custom tools. + +### Understanding the Database Architecture + +Reth's storage architecture consists of two main components: + +1. **MDBX Database**: Primary storage for blockchain state, headers, bodies, receipts, and indices +2. **Static Files**: Immutable historical data (headers, bodies, receipts, transactions) stored in compressed files for better performance + +Both components must be accessed together for complete data access. + +### Database Location + +The database is stored in the node's data directory: +- **Default location**: `$HOME/.local/share/reth/mainnet/db` (Linux/macOS) or `%APPDATA%\reth\mainnet\db` (Windows) +- **Custom location**: Set with `--datadir` flag when running reth +- **Static files**: Located in `/static_files` subdirectory + +### Opening the Database from External Processes + +When accessing the database while a node is running, you **must** open it in read-only mode to prevent corruption and conflicts. + +#### Using the High-Level API + +The safest way to access the database is through Reth's provider factory: + +```rust +use reth_ethereum::node::EthereumNode; +use reth_ethereum::chainspec::MAINNET; + +// Open with automatic configuration +let factory = EthereumNode::provider_factory_builder() + .open_read_only(MAINNET.clone(), "path/to/datadir")?; + +// Get a provider for queries +let provider = factory.provider()?; +let latest_block = provider.last_block_number()?; +``` + +### Performance Implications + +External reads while the node is syncing or processing blocks: + +- **I/O Competition**: May compete with the node for disk I/O +- **Cache Pollution**: Can evict hot data from OS page cache +- **CPU Impact**: Complex queries can impact node performance + +### Important Considerations + +1. **Read-Only Access Only**: Never open the database in write mode while the regular reth process is running. + +2. **Consistency**: When reading from an external process: + - Data may be slightly behind the latest processed block (if it hasn't been written to disk yet) + - Use transactions for consistent views across multiple reads + - Be aware of potential reorgs affecting recent blocks + +3. **Performance**: + - MDBX uses memory-mapped files for efficient access + - Multiple readers don't block each other + - Consider caching frequently accessed data + +### Disabling long-lived read transactions: + +By default long lived read transactions are terminated after a few minutes, this is because long read transaction can cause the free list to grow if changes to the database are made (reth node is running). +To opt out of this, this safety mechanism can be disabled: + +```rust +let factory = EthereumNode::provider_factory_builder() + .open_read_only(MAINNET.clone(), ReadOnlyConfig::from_datadir("datadir").disable_long_read_transaction_safety())?; +``` + +### Real-time Block Access Configuration + +Reth buffers new blocks in memory before persisting them to disk for performance optimization. If your external process needs immediate access to the latest blocks, configure the node to persist blocks immediately: + +- `--engine.persistence-threshold 0` - Persists new canonical blocks to disk immediately +- `--engine.memory-block-buffer-target 0` - Disables in-memory block buffering + +Use both flags together to ensure external processes can read new blocks without delay. + +As soon as the reth process has persisted the block data, the external reader can read it from the database. ## Next Steps diff --git a/docs/vocs/docs/pages/sdk/node-components/evm.mdx b/docs/vocs/docs/pages/sdk/node-components/evm.mdx index 6047f69bd73..1460f8938f4 100644 --- a/docs/vocs/docs/pages/sdk/node-components/evm.mdx +++ b/docs/vocs/docs/pages/sdk/node-components/evm.mdx @@ -1,6 +1,6 @@ # EVM Component -The EVM (Ethereum Virtual Machine) component handles transaction execution and state transitionss. It's responsible for processing transactions and updating the blockchain state. +The EVM (Ethereum Virtual Machine) component handles transaction execution and state transitions. It's responsible for processing transactions and updating the blockchain state. ## Overview @@ -42,4 +42,4 @@ Block builders construct new blocks for proposal: - Learn about [RPC](/sdk/node-components/rpc) server integration - Explore [Transaction Pool](/sdk/node-components/pool) interaction -- Review [Consensus](/sdk/node-components/consensus) validation \ No newline at end of file +- Review [Consensus](/sdk/node-components/consensus) validation diff --git a/docs/vocs/links-report.json b/docs/vocs/links-report.json deleted file mode 100644 index 830568362a2..00000000000 --- a/docs/vocs/links-report.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "timestamp": "2025-06-23T11:20:27.303Z", - "totalFiles": 106, - "totalLinks": 150, - "brokenLinks": [ - { - "file": "docs/pages/index.mdx", - "link": "/introduction/benchmarks", - "line": 110, - "reason": "Absolute path not found: /introduction/benchmarks" - } - ], - "summary": { - "brokenCount": 1, - "validCount": 149 - } -} \ No newline at end of file diff --git a/docs/vocs/package.json b/docs/vocs/package.json index f8d43111c51..035fc13b699 100644 --- a/docs/vocs/package.json +++ b/docs/vocs/package.json @@ -13,14 +13,14 @@ "inject-cargo-docs": "bun scripts/inject-cargo-docs.ts" }, "dependencies": { - "react": "latest", - "react-dom": "latest", - "vocs": "latest" + "react": "^19.1.0", + "react-dom": "^19.1.0", + "vocs": "^1.0.13" }, "devDependencies": { - "@types/node": "latest", - "@types/react": "latest", + "@types/node": "^24.0.14", + "@types/react": "^19.1.8", "glob": "^11.0.3", - "typescript": "latest" + "typescript": "^5.8.3" } } \ No newline at end of file diff --git a/docs/vocs/redirects.config.ts b/docs/vocs/redirects.config.ts index 6d30c882a14..82a911b6bfc 100644 --- a/docs/vocs/redirects.config.ts +++ b/docs/vocs/redirects.config.ts @@ -17,6 +17,8 @@ export const redirects: Record = { '/run/pruning': '/run/faq/pruning', '/run/ports': '/run/faq/ports', '/run/troubleshooting': '/run/faq/troubleshooting', + // SDK + '/sdk/overview': '/sdk', // Exex '/developers/exex': '/exex/overview', '/developers/exex/how-it-works': '/exex/how-it-works', diff --git a/docs/vocs/scripts/inject-cargo-docs.ts b/docs/vocs/scripts/inject-cargo-docs.ts index 1f8fee260d9..74857cb03e9 100644 --- a/docs/vocs/scripts/inject-cargo-docs.ts +++ b/docs/vocs/scripts/inject-cargo-docs.ts @@ -1,5 +1,4 @@ import { promises as fs } from 'fs'; -import { join, relative } from 'path'; import { glob } from 'glob'; const CARGO_DOCS_PATH = '../../target/doc'; @@ -41,6 +40,22 @@ async function injectCargoDocs() { for (const file of htmlFiles) { let content = await fs.readFile(file, 'utf-8'); + // Extract the current crate name and module path from the file path + // Remove the base path to get the relative path within the docs + const relativePath = file.startsWith('./') ? file.slice(2) : file; + const docsRelativePath = relativePath.replace(/^docs\/dist\/docs\//, ''); + const pathParts = docsRelativePath.split('/'); + const fileName = pathParts[pathParts.length - 1]; + + // Determine if this is the root index + const isRootIndex = pathParts.length === 1 && fileName === 'index.html'; + + // Extract crate name - it's the first directory in the docs-relative path + const crateName = isRootIndex ? null : pathParts[0]; + + // Build the current module path (everything between crate and filename) + const modulePath = pathParts.slice(1, -1).join('/'); + // Fix static file references content = content // CSS and JS in static.files @@ -56,8 +71,37 @@ async function injectCargoDocs() { // Fix crate navigation links .replace(/href="\.\/([^/]+)\/index\.html"/g, `href="${BASE_PATH}/$1/index.html"`) .replace(/href="\.\.\/([^/]+)\/index\.html"/g, `href="${BASE_PATH}/$1/index.html"`) - // Fix simple crate links (without ./ or ../) - .replace(/href="([^/:"]+)\/index\.html"/g, `href="${BASE_PATH}/$1/index.html"`) + // Fix module links within the same crate (relative paths without ./ or ../) + // These need to include the current crate name in the path + .replace(/href="([^/:"\.](?:[^/:"]*)?)\/index\.html"/g, (match, moduleName) => { + // Skip if it's already an absolute path or contains a protocol + if (moduleName.startsWith('/') || moduleName.includes('://')) { + return match; + } + // For the root index page, these are crate links, not module links + if (isRootIndex) { + return `href="${BASE_PATH}/${moduleName}/index.html"`; + } + // For module links within a crate, we need to build the full path + // If we're in a nested module, we need to go up to the crate root then down to the target + const fullPath = modulePath ? `${crateName}/${modulePath}/${moduleName}` : `${crateName}/${moduleName}`; + return `href="${BASE_PATH}/${fullPath}/index.html"`; + }) + + // Also fix other relative links (structs, enums, traits) that don't have index.html + .replace(/href="([^/:"\.#][^/:"#]*\.html)"/g, (match, pageName) => { + // Skip if it's already an absolute path or contains a protocol + if (pageName.startsWith('/') || pageName.includes('://')) { + return match; + } + // Skip for root index page as it shouldn't have such links + if (isRootIndex) { + return match; + } + // For other doc pages in nested modules, build the full path + const fullPath = modulePath ? `${crateName}/${modulePath}/${pageName}` : `${crateName}/${pageName}`; + return `href="${BASE_PATH}/${fullPath}"`; + }) // Fix root index.html links .replace(/href="\.\/index\.html"/g, `href="${BASE_PATH}/index.html"`) @@ -70,17 +114,39 @@ async function injectCargoDocs() { .replace(/data-static-root-path="\.\.\/static\.files\/"/g, `data-static-root-path="${BASE_PATH}/static.files/"`) // Fix search index paths - .replace(/data-search-index-js="([^"]+)"/g, `data-search-index-js="${BASE_PATH}/static.files/$1"`) + .replace(/data-search-index-js="[^"]+"/g, `data-search-index-js="${BASE_PATH}/search-index.js"`) .replace(/data-search-js="([^"]+)"/g, `data-search-js="${BASE_PATH}/static.files/$1"`) .replace(/data-settings-js="([^"]+)"/g, `data-settings-js="${BASE_PATH}/static.files/$1"`) // Fix logo paths .replace(/src="\.\/static\.files\/rust-logo/g, `src="${BASE_PATH}/static.files/rust-logo`) - .replace(/src="\.\.\/static\.files\/rust-logo/g, `src="${BASE_PATH}/static.files/rust-logo`); + .replace(/src="\.\.\/static\.files\/rust-logo/g, `src="${BASE_PATH}/static.files/rust-logo`) + + // Fix search functionality by ensuring correct load order + // Add the rustdoc-vars initialization before other scripts + .replace(/`); await fs.writeFile(file, content, 'utf-8'); } + // Find the actual search JS filename from the HTML files + let actualSearchJsFile = ''; + for (const htmlFile of htmlFiles) { + const htmlContent = await fs.readFile(htmlFile, 'utf-8'); + const searchMatch = htmlContent.match(/data-search-js="[^"]*\/([^"]+)"/); + if (searchMatch && searchMatch[1]) { + actualSearchJsFile = searchMatch[1]; + console.log(`Found search JS file: ${actualSearchJsFile} in ${htmlFile}`); + break; + } + } + + if (!actualSearchJsFile) { + console.error('Could not detect search JS filename from HTML files'); + process.exit(1); + } + // Also fix paths in JavaScript files const jsFiles = await glob(`${VOCS_DIST_PATH}/**/*.js`); @@ -94,6 +160,48 @@ async function injectCargoDocs() { .replace(/"\.\/([^/]+)\/index\.html"/g, `"${BASE_PATH}/$1/index.html"`) .replace(/"\.\.\/([^/]+)\/index\.html"/g, `"${BASE_PATH}/$1/index.html"`); + // Fix the search form submission issue that causes page reload + // Instead of submitting a form, just ensure the search functionality is loaded + if (file.includes('main-') && file.endsWith('.js')) { + content = content.replace( + /function sendSearchForm\(\)\{document\.getElementsByClassName\("search-form"\)\[0\]\.submit\(\)\}/g, + 'function sendSearchForm(){/* Fixed: No form submission needed - search loads via script */}' + ); + + // Also fix the root path references in the search functionality + content = content.replace( + /getVar\("root-path"\)/g, + `"${BASE_PATH}/"` + ); + + // Fix static-root-path to avoid double paths + content = content.replace( + /getVar\("static-root-path"\)/g, + `"${BASE_PATH}/static.files/"` + ); + + // Fix the search-js variable to return just the filename + // Use the detected search filename + content = content.replace( + /getVar\("search-js"\)/g, + `"${actualSearchJsFile}"` + ); + + // Fix the search index loading path + content = content.replace( + /resourcePath\("search-index",".js"\)/g, + `"${BASE_PATH}/search-index.js"` + ); + } + + // Fix paths in storage.js which contains the web components + if (file.includes('storage-') && file.endsWith('.js')) { + content = content.replace( + /getVar\("root-path"\)/g, + `"${BASE_PATH}/"` + ); + } + await fs.writeFile(file, content, 'utf-8'); } diff --git a/docs/vocs/sidebar.ts b/docs/vocs/sidebar.ts index 65829d8e48c..e51af1c260c 100644 --- a/docs/vocs/sidebar.ts +++ b/docs/vocs/sidebar.ts @@ -136,7 +136,7 @@ export const sidebar: SidebarItem[] = [ items: [ { text: "Overview", - link: "/sdk/overview" + link: "/sdk" }, { text: "Typesystem", @@ -313,6 +313,10 @@ export const sidebar: SidebarItem[] = [ text: "reth import-era", link: "/cli/reth/import-era" }, + { + text: "reth export-era", + link: "/cli/reth/export-era" + }, { text: "reth dump-genesis", link: "/cli/reth/dump-genesis" diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index 1f1b76f6a70..0df7a4ceb86 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -1,3 +1,4 @@ +import React from 'react' import { defineConfig } from 'vocs' import { sidebar } from './sidebar' import { basePath } from './redirects.config' @@ -11,11 +12,13 @@ export default defineConfig({ basePath, topNav: [ { text: 'Run', link: '/run/ethereum' }, - { text: 'SDK', link: '/sdk/overview' }, - { text: 'Rustdocs', link: '/docs' }, + { text: 'SDK', link: '/sdk' }, + { + element: React.createElement('a', { href: '/docs', target: '_self' }, 'Rustdocs') + }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { - text: 'v1.5.0', + text: 'v1.6.0', items: [ { text: 'Releases', @@ -65,6 +68,6 @@ export default defineConfig({ } }, editLink: { - pattern: "https://github.com/paradigmxyz/reth/edit/main/book/vocs/docs/pages/:path", + pattern: "https://github.com/paradigmxyz/reth/edit/main/docs/vocs/docs/pages/:path", } }) diff --git a/etc/README.md b/etc/README.md index 4f4ce7f20e4..6b6cff73e3c 100644 --- a/etc/README.md +++ b/etc/README.md @@ -13,7 +13,7 @@ up to date. ### Docker Compose To run Reth, Grafana or Prometheus with Docker Compose, refer to -the [docker docs](/book/installation/docker.md#using-docker-compose). +the [docker docs](https://reth.rs/installation/docker#using-docker-compose). ### Grafana @@ -75,4 +75,4 @@ If you are running Reth and Grafana outside of docker, and wish to import new Gr 1. Delete the old dashboard If you are running Reth and Grafana using docker, after having pulled the updated dashboards from `main`, restart the -Grafana service. This will update all dashboards. \ No newline at end of file +Grafana service. This will update all dashboards. diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 9bbb198ae12..56755b1e730 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -103,7 +103,7 @@ where .body() .transactions() .filter(|tx| tx.is_eip4844()) - .map(|tx| (tx.clone(), tx.blob_versioned_hashes().unwrap().len())) + .map(|tx| (tx.clone(), tx.blob_count().unwrap_or(0) as usize)) .collect(); let mut all_blobs_available = true; diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index d0a0543b5d3..50bd58620e3 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -9,7 +9,6 @@ license.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true -reth-engine-local.workspace = true reth-ethereum = { workspace = true, features = ["test-utils", "node", "node-api", "pool"] } reth-tracing.workspace = true reth-trie-db.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index ae42090d214..06da2f3263e 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -29,7 +29,6 @@ use alloy_rpc_types::{ Withdrawal, }; use reth_basic_payload_builder::{BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig}; -use reth_engine_local::payload::UnsupportedLocalAttributes; use reth_ethereum::{ chainspec::{Chain, ChainSpec, ChainSpecProvider}, node::{ @@ -42,7 +41,7 @@ use reth_ethereum::{ builder::{ components::{BasicPayloadServiceBuilder, ComponentsBuilder, PayloadBuilderBuilder}, rpc::{EngineValidatorBuilder, RpcAddOns}, - BuilderContext, Node, NodeAdapter, NodeBuilder, NodeComponentsBuilder, + BuilderContext, Node, NodeAdapter, NodeBuilder, }, core::{args::RpcServerArgs, node_config::NodeConfig}, node::{ @@ -52,11 +51,11 @@ use reth_ethereum::{ EthEvmConfig, EthereumEthApiBuilder, }, pool::{PoolTransaction, TransactionPool}, - primitives::{RecoveredBlock, SealedBlock}, + primitives::{Block, RecoveredBlock, SealedBlock}, provider::{EthStorage, StateProviderFactory}, rpc::types::engine::ExecutionPayload, tasks::TaskManager, - Block, EthPrimitives, TransactionSigned, + EthPrimitives, TransactionSigned, }; use reth_ethereum_payload_builder::{EthereumBuilderConfig, EthereumExecutionPayloadValidator}; use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderError}; @@ -76,9 +75,6 @@ pub struct CustomPayloadAttributes { pub custom: u64, } -// TODO(mattsse): remove this tmp workaround -impl UnsupportedLocalAttributes for CustomPayloadAttributes {} - /// Custom error type used in payload attributes validation #[derive(Debug, Error)] pub enum CustomError { @@ -195,9 +191,8 @@ impl CustomEngineValidator { } } -impl PayloadValidator for CustomEngineValidator { - type Block = Block; - type ExecutionData = ExecutionData; +impl PayloadValidator for CustomEngineValidator { + type Block = reth_ethereum::Block; fn ensure_well_formed_payload( &self, @@ -206,16 +201,22 @@ impl PayloadValidator for CustomEngineValidator { let sealed_block = self.inner.ensure_well_formed_payload(payload)?; sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into())) } + + fn validate_payload_attributes_against_header( + &self, + _attr: &CustomPayloadAttributes, + _header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + // skip default timestamp validation + Ok(()) + } } -impl EngineValidator for CustomEngineValidator -where - T: PayloadTypes, -{ +impl EngineValidator for CustomEngineValidator { fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, T::PayloadAttributes>, + payload_or_attrs: PayloadOrAttributes<'_, ExecutionData, CustomPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } @@ -223,12 +224,12 @@ where fn ensure_well_formed_attributes( &self, version: EngineApiMessageVersion, - attributes: &T::PayloadAttributes, + attributes: &CustomPayloadAttributes, ) -> Result<(), EngineObjectValidationError> { validate_version_specific_fields( self.chain_spec(), version, - PayloadOrAttributes::::PayloadAttributes( + PayloadOrAttributes::::PayloadAttributes( attributes, ), )?; @@ -242,15 +243,6 @@ where Ok(()) } - - fn validate_payload_attributes_against_header( - &self, - _attr: &::PayloadAttributes, - _header: &::Header, - ) -> Result<(), InvalidPayloadAttributesError> { - // skip default timestamp validation - Ok(()) - } } /// Custom engine validator builder @@ -296,14 +288,7 @@ pub type MyNodeAddOns = RpcAddOns Node for MyCustomNode where - N: FullNodeTypes< - Types: NodeTypes< - Payload = CustomEngineTypes, - ChainSpec = ChainSpec, - Primitives = EthPrimitives, - Storage = EthStorage, - >, - >, + N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< N, @@ -313,9 +298,7 @@ where EthereumExecutorBuilder, EthereumConsensusBuilder, >; - type AddOns = MyNodeAddOns< - NodeAdapter>::Components>, - >; + type AddOns = MyNodeAddOns>; fn components_builder(&self) -> Self::ComponentsBuilder { ComponentsBuilder::default() diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index d190fef9f85..54a68d98abe 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -13,10 +13,11 @@ reth-network-peers.workspace = true reth-node-builder.workspace = true reth-optimism-forks.workspace = true reth-db-api.workspace = true -reth-op = { workspace = true, features = ["node", "pool"] } +reth-op = { workspace = true, features = ["node", "pool", "rpc"] } reth-payload-builder.workspace = true reth-primitives-traits.workspace = true reth-rpc-api.workspace = true +reth-engine-primitives.workspace = true reth-rpc-engine-api.workspace = true reth-ethereum = { workspace = true, features = ["node-api", "network", "evm", "pool", "trie", "storage-api"] } @@ -33,9 +34,12 @@ alloy-op-evm.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-serde.workspace = true +alloy-network.workspace = true alloy-rpc-types-engine.workspace = true +alloy-rpc-types-eth.workspace = true op-alloy-consensus.workspace = true op-alloy-rpc-types-engine.workspace = true +op-alloy-rpc-types.workspace = true op-revm.workspace = true # misc @@ -65,6 +69,8 @@ arbitrary = [ "reth-ethereum/arbitrary", "alloy-rpc-types-engine/arbitrary", "reth-db-api/arbitrary", + "alloy-rpc-types-eth/arbitrary", + "op-alloy-rpc-types/arbitrary", "reth-primitives-traits/arbitrary", ] default = [] diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index e3bc6019d7b..4c8bff3a1fd 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -1,25 +1,27 @@ use crate::{ chainspec::CustomChainSpec, primitives::{CustomHeader, CustomNodePrimitives, CustomTransaction}, + CustomNode, }; use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload}; use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_engine_primitives::EngineValidator; use reth_ethereum::{ node::api::{ validate_version_specific_fields, AddOnsContext, BuiltPayload, EngineApiMessageVersion, - EngineObjectValidationError, EngineValidator, ExecutionPayload, FullNodeComponents, - InvalidPayloadAttributesError, NewPayloadError, NodePrimitives, NodeTypes, - PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, PayloadTypes, - PayloadValidator, + EngineObjectValidationError, ExecutionPayload, FullNodeComponents, NewPayloadError, + NodePrimitives, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, + PayloadTypes, PayloadValidator, }, primitives::{RecoveredBlock, SealedBlock}, storage::StateProviderFactory, trie::{KeccakKeyHasher, KeyHasher}, }; -use reth_node_builder::rpc::EngineValidatorBuilder; +use reth_node_builder::{rpc::EngineValidatorBuilder, InvalidPayloadAttributesError}; use reth_op::{ node::{ - engine::OpEngineValidator, OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes, + engine::OpEngineValidator, OpBuiltPayload, OpEngineTypes, OpPayloadAttributes, + OpPayloadBuilderAttributes, }, OpTransactionSigned, }; @@ -176,9 +178,9 @@ impl From impl PayloadTypes for CustomPayloadTypes { type ExecutionData = CustomExecutionData; - type BuiltPayload = CustomBuiltPayload; - type PayloadAttributes = CustomPayloadAttributes; - type PayloadBuilderAttributes = CustomPayloadBuilderAttributes; + type BuiltPayload = OpBuiltPayload; + type PayloadAttributes = OpPayloadAttributes; + type PayloadBuilderAttributes = OpPayloadBuilderAttributes; fn block_to_payload( block: SealedBlock< @@ -215,18 +217,20 @@ where } } -impl

PayloadValidator for CustomEngineValidator

+impl

PayloadValidator for CustomEngineValidator

where P: StateProviderFactory + Send + Sync + Unpin + 'static, { type Block = crate::primitives::block::Block; - type ExecutionData = CustomExecutionData; fn ensure_well_formed_payload( &self, payload: CustomExecutionData, ) -> Result, NewPayloadError> { - let sealed_block = self.inner.ensure_well_formed_payload(payload.inner)?; + let sealed_block = PayloadValidator::::ensure_well_formed_payload( + &self.inner, + payload.inner, + )?; let (block, senders) = sealed_block.split_sealed(); let (header, body) = block.split_sealed_header_body(); let header = CustomHeader { inner: header.into_header(), extension: payload.extension }; @@ -235,20 +239,25 @@ where Ok(block.with_senders(senders)) } + + fn validate_payload_attributes_against_header( + &self, + _attr: &OpPayloadAttributes, + _header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + // skip default timestamp validation + Ok(()) + } } -impl EngineValidator for CustomEngineValidator

+impl

EngineValidator for CustomEngineValidator

where P: StateProviderFactory + Send + Sync + Unpin + 'static, - T: PayloadTypes< - PayloadAttributes = CustomPayloadAttributes, - ExecutionData = CustomExecutionData, - >, { fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, T::PayloadAttributes>, + payload_or_attrs: PayloadOrAttributes<'_, CustomExecutionData, OpPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } @@ -256,34 +265,23 @@ where fn ensure_well_formed_attributes( &self, version: EngineApiMessageVersion, - attributes: &T::PayloadAttributes, + attributes: &OpPayloadAttributes, ) -> Result<(), EngineObjectValidationError> { validate_version_specific_fields( self.chain_spec(), version, - PayloadOrAttributes::::PayloadAttributes( - attributes, - ), + PayloadOrAttributes::::PayloadAttributes(attributes), )?; // custom validation logic - ensure that the custom field is not zero - if attributes.extension == 0 { - return Err(EngineObjectValidationError::invalid_params( - CustomError::CustomFieldIsNotZero, - )) - } + // if attributes.extension == 0 { + // return Err(EngineObjectValidationError::invalid_params( + // CustomError::CustomFieldIsNotZero, + // )) + // } Ok(()) } - - fn validate_payload_attributes_against_header( - &self, - _attr: &::PayloadAttributes, - _header: &::Header, - ) -> Result<(), InvalidPayloadAttributesError> { - // skip default timestamp validation - Ok(()) - } } /// Custom error type used in payload attributes validation @@ -300,13 +298,7 @@ pub struct CustomEngineValidatorBuilder; impl EngineValidatorBuilder for CustomEngineValidatorBuilder where - N: FullNodeComponents< - Types: NodeTypes< - Payload = CustomPayloadTypes, - ChainSpec = CustomChainSpec, - Primitives = CustomNodePrimitives, - >, - >, + N: FullNodeComponents, { type Validator = CustomEngineValidator; diff --git a/examples/custom-node/src/engine_api.rs b/examples/custom-node/src/engine_api.rs index bc92ffb8a99..7e5d1455f0e 100644 --- a/examples/custom-node/src/engine_api.rs +++ b/examples/custom-node/src/engine_api.rs @@ -1,21 +1,19 @@ use crate::{ - chainspec::CustomChainSpec, - engine::{ - CustomBuiltPayload, CustomExecutionData, CustomPayloadAttributes, CustomPayloadTypes, - }, + engine::{CustomExecutionData, CustomPayloadTypes}, primitives::CustomNodePrimitives, + CustomNode, }; use alloy_rpc_types_engine::{ ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, }; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, proc_macros::rpc, RpcModule}; +use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_ethereum::node::api::{ AddOnsContext, BeaconConsensusEngineHandle, EngineApiMessageVersion, FullNodeComponents, - NodeTypes, }; use reth_node_builder::rpc::EngineApiBuilder; -use reth_op::node::OpStorage; +use reth_op::node::OpBuiltPayload; use reth_payload_builder::PayloadStore; use reth_rpc_api::IntoEngineApiRpcModule; use reth_rpc_engine_api::EngineApiError; @@ -30,9 +28,9 @@ pub struct CustomExecutionPayloadEnvelope { extension: u64, } -impl From for CustomExecutionPayloadEnvelope { - fn from(value: CustomBuiltPayload) -> Self { - let sealed_block = value.0.into_sealed_block(); +impl From> for CustomExecutionPayloadEnvelope { + fn from(value: OpBuiltPayload) -> Self { + let sealed_block = value.into_sealed_block(); let hash = sealed_block.hash(); let extension = sealed_block.header().extension; let block = sealed_block.into_block(); @@ -53,7 +51,7 @@ pub trait CustomEngineApi { async fn fork_choice_updated( &self, fork_choice_state: ForkchoiceState, - payload_attributes: Option, + payload_attributes: Option, ) -> RpcResult; #[method(name = "getPayload")] @@ -93,7 +91,7 @@ impl CustomEngineApiServer for CustomEngineApi { async fn fork_choice_updated( &self, fork_choice_state: ForkchoiceState, - payload_attributes: Option, + payload_attributes: Option, ) -> RpcResult { Ok(self .inner @@ -132,14 +130,7 @@ pub struct CustomEngineApiBuilder {} impl EngineApiBuilder for CustomEngineApiBuilder where - N: FullNodeComponents< - Types: NodeTypes< - Payload = CustomPayloadTypes, - ChainSpec = CustomChainSpec, - Primitives = CustomNodePrimitives, - Storage = OpStorage, - >, - >, + N: FullNodeComponents, { type EngineApi = CustomEngineApi; diff --git a/examples/custom-node/src/evm/alloy.rs b/examples/custom-node/src/evm/alloy.rs index 67a9f90fdfa..6071a2c6dd8 100644 --- a/examples/custom-node/src/evm/alloy.rs +++ b/examples/custom-node/src/evm/alloy.rs @@ -70,10 +70,6 @@ where self.inner.transact_system_call(caller, contract, data) } - fn db_mut(&mut self) -> &mut Self::DB { - self.inner.db_mut() - } - fn finish(self) -> (Self::DB, EvmEnv) { self.inner.finish() } @@ -82,20 +78,12 @@ where self.inner.set_inspector_enabled(enabled) } - fn precompiles(&self) -> &Self::Precompiles { - self.inner.precompiles() - } - - fn precompiles_mut(&mut self) -> &mut Self::Precompiles { - self.inner.precompiles_mut() - } - - fn inspector(&self) -> &Self::Inspector { - self.inner.inspector() + fn components(&self) -> (&Self::DB, &Self::Inspector, &Self::Precompiles) { + self.inner.components() } - fn inspector_mut(&mut self) -> &mut Self::Inspector { - self.inner.inspector_mut() + fn components_mut(&mut self) -> (&mut Self::DB, &mut Self::Inspector, &mut Self::Precompiles) { + self.inner.components_mut() } } diff --git a/examples/custom-node/src/lib.rs b/examples/custom-node/src/lib.rs index a4511e204e8..45dbde46628 100644 --- a/examples/custom-node/src/lib.rs +++ b/examples/custom-node/src/lib.rs @@ -8,18 +8,26 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use crate::{ - evm::CustomExecutorBuilder, pool::CustomPooledTransaction, primitives::CustomTransaction, + engine::{CustomEngineValidatorBuilder, CustomPayloadTypes}, + engine_api::CustomEngineApiBuilder, + evm::CustomExecutorBuilder, + pool::CustomPooledTransaction, + primitives::CustomTransaction, + rpc::CustomRpcTypes, }; use chainspec::CustomChainSpec; use primitives::CustomNodePrimitives; use reth_ethereum::node::api::{FullNodeTypes, NodeTypes}; use reth_node_builder::{ components::{BasicPayloadServiceBuilder, ComponentsBuilder}, - Node, + Node, NodeAdapter, }; -use reth_op::node::{ - node::{OpConsensusBuilder, OpNetworkBuilder, OpPayloadBuilder, OpPoolBuilder}, - txpool, OpNode, OpPayloadTypes, +use reth_op::{ + node::{ + node::{OpConsensusBuilder, OpNetworkBuilder, OpPayloadBuilder, OpPoolBuilder}, + txpool, OpAddOns, OpNode, + }, + rpc::OpEthApiBuilder, }; pub mod chainspec; @@ -28,16 +36,19 @@ pub mod engine_api; pub mod evm; pub mod pool; pub mod primitives; +pub mod rpc; #[derive(Debug, Clone)] -pub struct CustomNode {} +pub struct CustomNode { + inner: OpNode, +} impl NodeTypes for CustomNode { type Primitives = CustomNodePrimitives; type ChainSpec = CustomChainSpec; type StateCommitment = ::StateCommitment; type Storage = ::Storage; - type Payload = OpPayloadTypes; + type Payload = CustomPayloadTypes; } impl Node for CustomNode @@ -53,7 +64,12 @@ where OpConsensusBuilder, >; - type AddOns = (); + type AddOns = OpAddOns< + NodeAdapter, + OpEthApiBuilder, + CustomEngineValidatorBuilder, + CustomEngineApiBuilder, + >; fn components_builder(&self) -> Self::ComponentsBuilder { ComponentsBuilder::default() @@ -65,5 +81,7 @@ where .consensus(OpConsensusBuilder::default()) } - fn add_ons(&self) -> Self::AddOns {} + fn add_ons(&self) -> Self::AddOns { + self.inner.add_ons_builder().build() + } } diff --git a/examples/custom-node/src/pool.rs b/examples/custom-node/src/pool.rs index 09f0b667c79..c24e4d38e75 100644 --- a/examples/custom-node/src/pool.rs +++ b/examples/custom-node/src/pool.rs @@ -1,15 +1,29 @@ use crate::primitives::{CustomTransaction, CustomTransactionEnvelope}; -use alloy_consensus::error::ValueError; -use op_alloy_consensus::OpPooledTransaction; -use reth_ethereum::primitives::Extended; +use alloy_consensus::{ + crypto::RecoveryError, error::ValueError, transaction::SignerRecoverable, TransactionEnvelope, +}; +use alloy_primitives::{Address, Sealed, B256}; +use op_alloy_consensus::{OpPooledTransaction, OpTransaction, TxDeposit}; +use reth_ethereum::primitives::{ + serde_bincode_compat::RlpBincode, InMemorySize, SignedTransaction, +}; -pub type CustomPooledTransaction = Extended; +#[derive(Clone, Debug, TransactionEnvelope)] +#[envelope(tx_type_name = CustomPooledTxType)] +pub enum CustomPooledTransaction { + /// A regular Optimism transaction as defined by [`OpPooledTransaction`]. + #[envelope(flatten)] + Op(OpPooledTransaction), + /// A [`CustomTransactionEnvelope`] tagged with type 0x7E. + #[envelope(ty = 42)] + Payment(CustomTransactionEnvelope), +} impl From for CustomTransaction { fn from(tx: CustomPooledTransaction) -> Self { match tx { - CustomPooledTransaction::BuiltIn(tx) => Self::Op(tx.into()), - CustomPooledTransaction::Other(tx) => Self::Payment(tx), + CustomPooledTransaction::Op(tx) => Self::Op(tx.into()), + CustomPooledTransaction::Payment(tx) => Self::Payment(tx), } } } @@ -19,10 +33,62 @@ impl TryFrom for CustomPooledTransaction { fn try_from(tx: CustomTransaction) -> Result { match tx { - CustomTransaction::Op(op) => Ok(Self::BuiltIn( + CustomTransaction::Op(op) => Ok(Self::Op( OpPooledTransaction::try_from(op).map_err(|op| op.map(CustomTransaction::Op))?, )), - CustomTransaction::Payment(payment) => Ok(Self::Other(payment)), + CustomTransaction::Payment(payment) => Ok(Self::Payment(payment)), + } + } +} + +impl RlpBincode for CustomPooledTransaction {} + +impl OpTransaction for CustomPooledTransaction { + fn is_deposit(&self) -> bool { + match self { + CustomPooledTransaction::Op(_) => false, + CustomPooledTransaction::Payment(payment) => payment.is_deposit(), + } + } + + fn as_deposit(&self) -> Option<&Sealed> { + match self { + CustomPooledTransaction::Op(_) => None, + CustomPooledTransaction::Payment(payment) => payment.as_deposit(), + } + } +} + +impl SignerRecoverable for CustomPooledTransaction { + fn recover_signer(&self) -> Result { + match self { + CustomPooledTransaction::Op(tx) => SignerRecoverable::recover_signer(tx), + CustomPooledTransaction::Payment(tx) => SignerRecoverable::recover_signer(tx), + } + } + + fn recover_signer_unchecked(&self) -> Result { + match self { + CustomPooledTransaction::Op(tx) => SignerRecoverable::recover_signer_unchecked(tx), + CustomPooledTransaction::Payment(tx) => SignerRecoverable::recover_signer_unchecked(tx), + } + } +} + +impl SignedTransaction for CustomPooledTransaction { + fn tx_hash(&self) -> &B256 { + match self { + CustomPooledTransaction::Op(tx) => SignedTransaction::tx_hash(tx), + CustomPooledTransaction::Payment(tx) => SignedTransaction::tx_hash(tx), + } + } +} + +impl InMemorySize for CustomPooledTransaction { + fn size(&self) -> usize { + match self { + CustomPooledTransaction::Op(tx) => InMemorySize::size(tx), + CustomPooledTransaction::Payment(tx) => InMemorySize::size(tx), } } } diff --git a/examples/custom-node/src/primitives/tx.rs b/examples/custom-node/src/primitives/tx.rs index 48348f6839a..682d1a67552 100644 --- a/examples/custom-node/src/primitives/tx.rs +++ b/examples/custom-node/src/primitives/tx.rs @@ -5,13 +5,13 @@ use alloy_consensus::{ RecoveryError, }, transaction::SignerRecoverable, - SignableTransaction, Signed, Transaction, TransactionEnvelope, + Signed, Transaction, TransactionEnvelope, }; use alloy_eips::{ eip2718::{Eip2718Result, IsTyped2718}, Decodable2718, Encodable2718, Typed2718, }; -use alloy_primitives::{bytes::Buf, keccak256, Sealed, Signature, TxHash, B256}; +use alloy_primitives::{bytes::Buf, Sealed, Signature, TxHash, B256}; use alloy_rlp::{BufMut, Decodable, Encodable, Result as RlpResult}; use op_alloy_consensus::{OpTxEnvelope, TxDeposit}; use reth_codecs::{ @@ -128,15 +128,6 @@ impl SignedTransaction for CustomTransactionEnvelope { fn tx_hash(&self) -> &TxHash { self.inner.hash() } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - self.inner.tx().encode_for_signing(buf); - let signature_hash = keccak256(buf); - recover_signer_unchecked(self.inner.signature(), signature_hash) - } } impl Typed2718 for CustomTransactionEnvelope { @@ -300,20 +291,6 @@ impl SignerRecoverable for CustomTransaction { } impl SignedTransaction for CustomTransaction { - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - CustomTransaction::Op(tx) => { - SignedTransaction::recover_signer_unchecked_with_buf(tx, buf) - } - CustomTransaction::Payment(tx) => { - SignedTransaction::recover_signer_unchecked_with_buf(tx, buf) - } - } - } - fn tx_hash(&self) -> &B256 { match self { CustomTransaction::Op(tx) => SignedTransaction::tx_hash(tx), diff --git a/examples/custom-node/src/rpc.rs b/examples/custom-node/src/rpc.rs new file mode 100644 index 00000000000..8259297367d --- /dev/null +++ b/examples/custom-node/src/rpc.rs @@ -0,0 +1,53 @@ +use crate::{ + evm::CustomTxEnv, + primitives::{CustomHeader, CustomTransaction}, +}; +use alloy_consensus::error::ValueError; +use alloy_network::TxSigner; +use op_alloy_consensus::OpTxEnvelope; +use op_alloy_rpc_types::{OpTransactionReceipt, OpTransactionRequest}; +use reth_op::rpc::RpcTypes; +use reth_rpc_api::eth::{ + transaction::TryIntoTxEnv, EthTxEnvError, SignTxRequestError, SignableTxRequest, TryIntoSimTx, +}; +use revm::context::{BlockEnv, CfgEnv}; + +#[derive(Debug, Clone, Copy, Default)] +#[non_exhaustive] +pub struct CustomRpcTypes; + +impl RpcTypes for CustomRpcTypes { + type Header = alloy_rpc_types_eth::Header; + type Receipt = OpTransactionReceipt; + type TransactionRequest = OpTransactionRequest; + type TransactionResponse = op_alloy_rpc_types::Transaction; +} + +impl TryIntoSimTx for OpTransactionRequest { + fn try_into_sim_tx(self) -> Result> { + Ok(CustomTransaction::Op(self.try_into_sim_tx()?)) + } +} + +impl TryIntoTxEnv for OpTransactionRequest { + type Err = EthTxEnvError; + + fn try_into_tx_env( + self, + cfg_env: &CfgEnv, + block_env: &BlockEnv, + ) -> Result { + Ok(CustomTxEnv::Op(self.try_into_tx_env(cfg_env, block_env)?)) + } +} + +impl SignableTxRequest for OpTransactionRequest { + async fn try_build_and_sign( + self, + signer: impl TxSigner + Send, + ) -> Result { + Ok(CustomTransaction::Op( + SignableTxRequest::::try_build_and_sign(self, signer).await?, + )) + } +} diff --git a/examples/exex-hello-world/src/main.rs b/examples/exex-hello-world/src/main.rs index 0f9e904881a..4253d8185e4 100644 --- a/examples/exex-hello-world/src/main.rs +++ b/examples/exex-hello-world/src/main.rs @@ -9,19 +9,27 @@ use clap::Parser; use futures::TryStreamExt; use reth_ethereum::{ + chainspec::EthereumHardforks, exex::{ExExContext, ExExEvent, ExExNotification}, - node::{api::FullNodeComponents, EthereumNode}, - rpc::eth::EthApiFor, + node::{ + api::{FullNodeComponents, NodeTypes}, + builder::rpc::RpcHandle, + EthereumNode, + }, + rpc::api::eth::helpers::FullEthApi, }; use reth_tracing::tracing::info; use tokio::sync::oneshot; +/// Additional CLI arguments #[derive(Parser)] struct ExExArgs { + /// whether to launch an op-reth node #[arg(long)] optimism: bool, } +/// A basic subscription loop of new blocks. async fn my_exex(mut ctx: ExExContext) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.try_next().await? { match ¬ification { @@ -44,22 +52,44 @@ async fn my_exex(mut ctx: ExExContext) -> eyre:: Ok(()) } -/// This is an example of how to access the `EthApi` inside an ExEx. It receives the `EthApi` once -/// the node is launched fully. -async fn ethapi_exex( +/// This is an example of how to access the [`RpcHandle`] inside an ExEx. It receives the +/// [`RpcHandle`] once the node is launched fully. +/// +/// This function supports both Opstack Eth API and ethereum Eth API. +/// +/// The received handle gives access to the `EthApi` has full access to all eth api functionality +/// [`FullEthApi`]. And also gives access to additional eth related rpc method handlers, such as eth +/// filter. +async fn ethapi_exex( mut ctx: ExExContext, - ethapi_rx: oneshot::Receiver>, + rpc_handle: oneshot::Receiver>, ) -> eyre::Result<()> where - Node: FullNodeComponents, + Node: FullNodeComponents>, + EthApi: FullEthApi, { // Wait for the ethapi to be sent from the main function - let _ethapi = ethapi_rx.await?; - info!("Received ethapi inside exex"); + let rpc_handle = rpc_handle.await?; + info!("Received rpc handle inside exex"); + + // obtain the ethapi from the rpc handle + let ethapi = rpc_handle.eth_api(); + + // EthFilter type that provides all eth_getlogs related logic + let _eth_filter = rpc_handle.eth_handlers().filter.clone(); + // EthPubSub type that provides all eth_subscribe logic + let _eth_pubsub = rpc_handle.eth_handlers().pubsub.clone(); + // The TraceApi type that provides all the trace_ handlers + let _trace_api = rpc_handle.trace_api(); + // The DebugApi type that provides all the trace_ handlers + let _debug_api = rpc_handle.debug_api(); while let Some(notification) = ctx.notifications.try_next().await? { if let Some(committed_chain) = notification.committed_chain() { ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; + + // can use the eth api to interact with the node + let _rpc_block = ethapi.rpc_block(committed_chain.tip().hash().into(), true).await?; } } @@ -71,30 +101,42 @@ fn main() -> eyre::Result<()> { if args.optimism { reth_op::cli::Cli::parse_args().run(|builder, _| { + let (rpc_handle_tx, rpc_handle_rx) = oneshot::channel(); Box::pin(async move { let handle = builder .node(reth_op::node::OpNode::default()) .install_exex("my-exex", async move |ctx| Ok(my_exex(ctx))) + .install_exex("ethapi-exex", async move |ctx| { + Ok(ethapi_exex(ctx, rpc_handle_rx)) + }) .launch() .await?; + // Retrieve the rpc handle from the node and send it to the exex + rpc_handle_tx + .send(handle.node.add_ons_handle.clone()) + .expect("Failed to send ethapi to ExEx"); + handle.wait_for_node_exit().await }) }) } else { reth_ethereum::cli::Cli::parse_args().run(|builder, _| { Box::pin(async move { - let (ethapi_tx, ethapi_rx) = oneshot::channel(); + let (rpc_handle_tx, rpc_handle_rx) = oneshot::channel(); let handle = builder .node(EthereumNode::default()) .install_exex("my-exex", async move |ctx| Ok(my_exex(ctx))) - .install_exex("ethapi-exex", async move |ctx| Ok(ethapi_exex(ctx, ethapi_rx))) + .install_exex("ethapi-exex", async move |ctx| { + Ok(ethapi_exex(ctx, rpc_handle_rx)) + }) .launch() .await?; - // Retrieve the ethapi from the node and send it to the exex - let ethapi = handle.node.add_ons_handle.eth_api(); - ethapi_tx.send(ethapi.clone()).expect("Failed to send ethapi to ExEx"); + // Retrieve the rpc handle from the node and send it to the exex + rpc_handle_tx + .send(handle.node.add_ons_handle.clone()) + .expect("Failed to send ethapi to ExEx"); handle.wait_for_node_exit().await }) diff --git a/examples/node-custom-rpc/src/main.rs b/examples/node-custom-rpc/src/main.rs index 9aba7c9922a..8504949d9d9 100644 --- a/examples/node-custom-rpc/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -32,7 +32,9 @@ fn main() { Cli::::parse() .run(|builder, args| async move { let handle = builder + // configure default ethereum node .node(EthereumNode::default()) + // extend the rpc modules with our custom `TxpoolExt` endpoints .extend_rpc_modules(move |ctx| { if !args.enable_ext { return Ok(()) @@ -50,6 +52,7 @@ fn main() { Ok(()) }) + // launch the node with custom rpc .launch() .await?; diff --git a/examples/rpc-db/src/myrpc_ext.rs b/examples/rpc-db/src/myrpc_ext.rs index d183ae818bd..68681ad587e 100644 --- a/examples/rpc-db/src/myrpc_ext.rs +++ b/examples/rpc-db/src/myrpc_ext.rs @@ -4,7 +4,7 @@ use reth_ethereum::{provider::BlockReaderIdExt, rpc::eth::EthResult, Block}; // Rpc related imports use jsonrpsee::proc_macros::rpc; -/// trait interface for a custom rpc namespace: `MyRpc` +/// trait interface for a custom rpc namespace: `myrpcExt` /// /// This defines an additional namespace where all methods are configured as trait functions. #[rpc(server, namespace = "myrpcExt")] @@ -14,7 +14,7 @@ pub trait MyRpcExtApi { fn custom_method(&self) -> EthResult>; } -/// The type that implements `myRpc` rpc namespace trait +/// The type that implements `myrpcExt` rpc namespace trait pub struct MyRpcExt { pub provider: Provider, } diff --git a/examples/txpool-tracing/Cargo.toml b/examples/txpool-tracing/Cargo.toml index 57c93485ccf..df72dd193f9 100644 --- a/examples/txpool-tracing/Cargo.toml +++ b/examples/txpool-tracing/Cargo.toml @@ -6,8 +6,12 @@ edition.workspace = true license.workspace = true [dependencies] -reth-ethereum = { workspace = true, features = ["node", "pool", "cli"] } +reth-ethereum = { workspace = true, features = ["node", "pool", "cli", "rpc"] } + +alloy-primitives.workspace = true alloy-rpc-types-trace.workspace = true +alloy-network.workspace = true + clap = { workspace = true, features = ["derive"] } futures-util.workspace = true -alloy-primitives.workspace = true +eyre.workspace = true diff --git a/examples/txpool-tracing/src/main.rs b/examples/txpool-tracing/src/main.rs index 655b8889f6d..a1b61422cb9 100644 --- a/examples/txpool-tracing/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -21,6 +21,8 @@ use reth_ethereum::{ rpc::eth::primitives::TransactionRequest, }; +mod submit; + fn main() { Cli::::parse() .run(|builder, args| async move { diff --git a/examples/txpool-tracing/src/submit.rs b/examples/txpool-tracing/src/submit.rs new file mode 100644 index 00000000000..b59cefe2f21 --- /dev/null +++ b/examples/txpool-tracing/src/submit.rs @@ -0,0 +1,124 @@ +//! Transaction submission functionality for the txpool tracing example +#![allow(unused)] +#![allow(clippy::too_many_arguments)] + +use alloy_network::{Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder}; +use alloy_primitives::{Address, TxHash, U256}; +use futures_util::StreamExt; +use reth_ethereum::{ + node::api::{FullNodeComponents, NodeTypes}, + pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionEvent, TransactionOrigin, + TransactionPool, + }, + primitives::SignerRecoverable, + rpc::eth::primitives::TransactionRequest, + EthPrimitives, TransactionSigned, +}; + +/// Submit a transaction to the transaction pool +/// +/// This function demonstrates how to create, sign, and submit a transaction +/// to the reth transaction pool. +pub async fn submit_transaction( + node: &FC, + wallet: &EthereumWallet, + to: Address, + data: Vec, + nonce: u64, + chain_id: u64, + gas_limit: u64, + max_priority_fee_per_gas: u128, + max_fee_per_gas: u128, +) -> eyre::Result +where + // This enforces `EthPrimitives` types for this node, this unlocks the proper conversions when + FC: FullNodeComponents>, +{ + // Create the transaction request + let request = TransactionRequest::default() + .with_to(to) + .with_input(data) + .with_nonce(nonce) + .with_chain_id(chain_id) + .with_gas_limit(gas_limit) + .with_max_priority_fee_per_gas(max_priority_fee_per_gas) + .with_max_fee_per_gas(max_fee_per_gas); + + // Sign the transaction + let transaction: TransactionSigned = + NetworkWallet::::sign_request(wallet, request).await?.into(); + // Get the transaction hash before submitting + let tx_hash = *transaction.hash(); + + // Recover the transaction + let transaction = transaction.try_into_recovered()?; + + let mut tx_events = node + .pool() + .add_consensus_transaction_and_subscribe(transaction, TransactionOrigin::Local) + .await + .map_err(|e| eyre::eyre!("Pool error: {e}"))?; + + // Wait for the transaction to be added to the pool + while let Some(event) = tx_events.next().await { + match event { + TransactionEvent::Mined(_) => { + println!("Transaction was mined: {:?}", tx_events.hash()); + break; + } + TransactionEvent::Pending => { + println!("Transaction added to pending pool: {:?}", tx_events.hash()); + break; + } + TransactionEvent::Discarded => { + return Err(eyre::eyre!("Transaction discarded: {:?}", tx_events.hash(),)); + } + _ => { + // Continue waiting for added or rejected event + } + } + } + + Ok(tx_hash) +} + +/// Helper function to submit a simple ETH transfer transaction +/// +/// This will first populate a tx request, sign it then submit to the pool in the required format. +pub async fn submit_eth_transfer( + node: &FC, + wallet: &EthereumWallet, + to: Address, + value: U256, + nonce: u64, + chain_id: u64, + gas_limit: u64, + max_priority_fee_per_gas: u128, + max_fee_per_gas: u128, +) -> eyre::Result +where + FC: FullNodeComponents>, +{ + // Create the transaction request for ETH transfer + let request = TransactionRequest::default() + .with_to(to) + .with_value(value) + .with_nonce(nonce) + .with_chain_id(chain_id) + .with_gas_limit(gas_limit) + .with_max_priority_fee_per_gas(max_priority_fee_per_gas) + .with_max_fee_per_gas(max_fee_per_gas); + + // Sign the transaction + let transaction: TransactionSigned = + NetworkWallet::::sign_request(wallet, request).await?.into(); + // Recover the transaction + let transaction = transaction.try_into_recovered()?; + + // Submit the transaction to the pool + node.pool() + .add_consensus_transaction(transaction, TransactionOrigin::Local) + .await + .map_err(|e| eyre::eyre!("Pool error: {e}")) +} diff --git a/fork.yaml b/fork.yaml index 4c171211256..1b5a96c4178 100644 --- a/fork.yaml +++ b/fork.yaml @@ -4,7 +4,7 @@ footer: | base: name: reth url: https://github.com/paradigmxyz/reth - hash: f67629fe918fcb90697b08e1d2b4d9dfafbfef49 + hash: 9d1af5a09cc7794a767858eb3219a24b7e52fc16 fork: name: scroll-reth url: https://github.com/scroll-tech/reth diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index eb4cb4e4449..06e73631ef8 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -23,7 +23,3 @@ alloy-eips.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } rand_08.workspace = true - -[dev-dependencies] -alloy-eips.workspace = true -reth-primitives-traits.workspace = true diff --git a/typos.toml b/typos.toml new file mode 100644 index 00000000000..25f54392661 --- /dev/null +++ b/typos.toml @@ -0,0 +1,39 @@ +[files] +extend-exclude = [ + ".git", + "target", + "crates/storage/libmdbx-rs/mdbx-sys/libmdbx", + "Cargo.toml", + "Cargo.lock", + "testing/ef-tests", +] + +[default] +extend-ignore-re = [ + # Hex strings of various lengths + "(?i)0x[0-9a-f]{8}", # 8 hex chars + "(?i)0x[0-9a-f]{40}", # 40 hex chars + "(?i)0x[0-9a-f]{64}", # 64 hex chars + "(?i)[0-9a-f]{8}", # 8 hex chars without 0x + "(?i)[0-9a-f]{40}", # 40 hex chars without 0x + "(?i)[0-9a-f]{64}", # 64 hex chars without 0x + # Ordinals in identifiers + "[0-9]+nd", + "[0-9]+th", + "[0-9]+st", + "[0-9]+rd", +] + +[default.extend-words] +# These are valid identifiers/terms that should be allowed +crate = "crate" +ser = "ser" +ratatui = "ratatui" +seeked = "seeked" # Past tense of seek, used in trie iterator +Seeked = "Seeked" # Type name in trie iterator +Whe = "Whe" # Part of base64 encoded signature +hel = "hel" # Part of hostname bootnode-hetzner-hel +ONL = "ONL" # Part of base64 encoded ENR +Iy = "Iy" # Part of base64 encoded ENR +flate = "flate" # zlib-flate is a valid tool name +Pn = "Pn" # Part of UPnP (Universal Plug and Play)