diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 488e6c90cf7..5a1d1df7261 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -20,11 +20,11 @@ crates/fs-util/ @onbjerg @emhane crates/metrics/ @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk @emhane -crates/node/ @mattsse @Rjected @onbjerg +crates/node/ @mattsse @Rjected @onbjerg @emhane @klkvr crates/optimism/ @mattsse @Rjected @fgimenez @emhane crates/payload/ @mattsse @Rjected -crates/primitives/ @Rjected -crates/primitives-traits/ @Rjected @joshieDo +crates/primitives/ @Rjected @emhane @mattsse @klkvr +crates/primitives-traits/ @Rjected @joshieDo @emhane @mattsse @klkvr crates/prune/ @shekhirin @joshieDo crates/revm/ @mattsse @rakita crates/rpc/ @mattsse @Rjected @emhane diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 0e704857edb..11e5b5e00b9 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -47,6 +47,7 @@ exclude_crates=( reth-optimism-node reth-optimism-payload-builder reth-optimism-rpc + reth-optimism-primitives reth-rpc reth-rpc-api reth-rpc-api-testing-util @@ -68,6 +69,7 @@ exclude_crates=( reth-static-file # tokio reth-transaction-pool # c-kzg reth-trie-parallel # tokio + reth-testing-utils ) # Array to hold the results diff --git a/.github/assets/kurtosis_network_params.yaml b/.github/assets/kurtosis_network_params.yaml index 9c104de4950..e8cc1b51dc8 100644 --- a/.github/assets/kurtosis_network_params.yaml +++ b/.github/assets/kurtosis_network_params.yaml @@ -2,8 +2,6 @@ participants: - el_type: geth cl_type: lighthouse - el_type: reth - el_extra_params: - - --engine.experimental el_image: "ghcr.io/paradigmxyz/reth:kurtosis-ci" cl_type: teku additional_services: diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 24b3fc84838..0d8ba91a00c 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -5,8 +5,8 @@ name: hive on: workflow_dispatch: schedule: - # every day - - cron: "0 0 * * *" + # run every 12 hours + - cron: "0 */12 * * *" env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index 74d26dbd3ee..3e1b7432111 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -5,8 +5,8 @@ name: kurtosis on: workflow_dispatch: schedule: - # every day - - cron: "0 1 * * *" + # run every 12 hours + - cron: "0 */12 * * *" env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 44b66b4178e..118dcf777fb 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -71,7 +71,7 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - uses: dcarbone/install-jq-action@v2 + - uses: dcarbone/install-jq-action@v3 - name: Run Wasm checks run: .github/assets/check_wasm.sh diff --git a/Cargo.lock b/Cargo.lock index f9556c0d165..60321029b73 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -182,9 +182,9 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fb9fd842fdf10a524bbf2c4de6942ad869c1c8c3d128a1b09e67ed5f7cedbd" +checksum = "5f6cee6a35793f3db8a5ffe60e86c695f321d081a567211245f503e8c498fce8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -418,7 +418,7 @@ checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -643,7 +643,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -659,7 +659,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "syn-solidity", "tiny-keccak", ] @@ -675,7 +675,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "syn-solidity", ] @@ -776,9 +776,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40d8e28db02c006f7abb20f345ffb3cc99c465e36f676ba262534e654ae76042" +checksum = "b6b2e366c0debf0af77766c23694a3f863b02633050e71e096e257ffbd395e50" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -881,7 +881,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1057,9 +1057,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" +checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" dependencies = [ "brotli", "flate2", @@ -1104,7 +1104,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1115,7 +1115,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1153,7 +1153,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1164,9 +1164,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backon" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4fa97bb310c33c811334143cf64c5bb2b7b3c06e453db6b095d7061eff8f113" +checksum = "ba5289ec98f68f28dd809fd601059e6aa908bb8f6108620930828283d4ee23d7" dependencies = [ "fastrand 2.2.0", "tokio", @@ -1259,7 +1259,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1454,7 +1454,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -1538,9 +1538,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" +checksum = "1a68f1f47cdf0ec8ee4b941b2eee2a80cb796db73118c0dd09ac63fbe405be22" dependencies = [ "memchr", "regex-automata 0.4.9", @@ -1561,9 +1561,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" +checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" dependencies = [ "bytemuck_derive", ] @@ -1576,7 +1576,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1664,9 +1664,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aeb932158bd710538c73702db6945cb68a8fb08c519e6e12706b94263b36db8" +checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" dependencies = [ "jobserver", "libc", @@ -1765,9 +1765,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -1775,9 +1775,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstream", "anstyle", @@ -1794,14 +1794,14 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" [[package]] name = "coins-bip32" @@ -1872,14 +1872,14 @@ dependencies = [ [[package]] name = "comfy-table" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" +checksum = "24f165e7b643266ea80cb858aed492ad9280e3e05ce24d4a99d7d7b889b6a4d9" dependencies = [ - "crossterm 0.27.0", + "crossterm", "strum", "strum_macros", - "unicode-width", + "unicode-width 0.2.0", ] [[package]] @@ -1928,9 +1928,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" +checksum = "487981fa1af147182687064d0a2c336586d337a606595ced9ffb0c685c250c73" dependencies = [ "cfg-if", "cpufeatures", @@ -1985,6 +1985,16 @@ dependencies = [ "libc", ] +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -2011,9 +2021,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -2120,19 +2130,6 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" -[[package]] -name = "crossterm" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" -dependencies = [ - "bitflags 2.6.0", - "crossterm_winapi", - "libc", - "parking_lot", - "winapi", -] - [[package]] name = "crossterm" version = "0.28.1" @@ -2251,7 +2248,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2275,7 +2272,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2286,7 +2283,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2408,7 +2405,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2419,7 +2416,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2440,7 +2437,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "unicode-xid", ] @@ -2554,7 +2551,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2622,7 +2619,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2705,7 +2702,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2716,7 +2713,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2773,7 +2770,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2786,6 +2783,7 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" name = "example-beacon-api-sidecar-fetcher" version = "0.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-beacon", "clap", @@ -3213,9 +3211,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.34" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "miniz_oxide", @@ -3334,7 +3332,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3513,9 +3511,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", @@ -3778,9 +3776,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", @@ -3809,7 +3807,7 @@ dependencies = [ "hyper-util", "log", "rustls", - "rustls-native-certs 0.8.0", + "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", "tokio-rustls", @@ -3860,7 +3858,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4010,7 +4008,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4071,13 +4069,13 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] @@ -4199,7 +4197,7 @@ dependencies = [ "pretty_assertions", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4213,9 +4211,9 @@ dependencies = [ [[package]] name = "interprocess" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f4e4a06d42fab3e85ab1b419ad32b09eab58b901d40c57935ff92db3287a13" +checksum = "894148491d817cb36b6f778017b8ac46b17408d522dd90f539d677ea938362eb" dependencies = [ "doctest-file", "futures-core", @@ -4300,9 +4298,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" [[package]] name = "jni" @@ -4447,7 +4445,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4607,9 +4605,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.162" +version = "0.2.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" +checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" [[package]] name = "libloading" @@ -4629,9 +4627,9 @@ checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libp2p-identity" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" +checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" dependencies = [ "asn1_der", "bs58", @@ -4740,9 +4738,9 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "lock_api" @@ -4865,7 +4863,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4992,9 +4990,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" dependencies = [ "cfg-if", "downcast", @@ -5006,14 +5004,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5261,7 +5259,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5314,9 +5312,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff54d1d790eca1f3aedbd666162e9c42eceff90b9f9d24b352ed9c2df1e901a" +checksum = "fce158d886815d419222daa67fcdf949a34f7950653a4498ebeb4963331f70ed" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5327,14 +5325,14 @@ dependencies = [ "derive_more 1.0.0", "serde", "serde_with", - "spin", + "thiserror 2.0.3", ] [[package]] name = "op-alloy-genesis" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae84fd64fbc53b3e958ea5a96d7f5633e4a111092e41c51672c2d91835c09efb" +checksum = "2734e9a65efb90fe4520303f984c124766b7d2f2e5dd51cbe54d6269c85a3c91" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5342,13 +5340,14 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", + "thiserror 2.0.3", ] [[package]] name = "op-alloy-network" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71e777450ee3e9c5177e00865e9b4496472b623c50f146fc907b667c6b4ab37" +checksum = "87e4aef8ed017004a176ab1de49df419f59c0fb4a6ce3b693a10fe099fe1afe7" dependencies = [ "alloy-consensus", "alloy-network", @@ -5361,29 +5360,32 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e854d2d4958d0a213731560172e8455536329ee9574473ff79fa953da91eb6a" +checksum = "6c68a3e2770890da3ad2fd20d7fe0c8e15672707577b4168a60e388c8eceaca0" dependencies = [ + "alloc-no-stdlib", "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", "async-trait", - "derive_more 1.0.0", + "brotli", + "miniz_oxide", "op-alloy-consensus", "op-alloy-genesis", "serde", + "thiserror 2.0.3", "tracing", "unsigned-varint", ] [[package]] name = "op-alloy-rpc-types" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "981b7f8ab11fe85ba3c1723702f000429b8d0c16b5883c93d577895f262cbac6" +checksum = "060ebeaea8c772e396215f69bb86d231ec8b7f36aca0dd6ce367ceaa9a8c33e6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5400,9 +5402,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a227b16c9c5df68b112c8db9d268ebf46b3e26c744b4d59d4949575cd603a292" +checksum = "864dbd5511ef4ef00b6c2c980739259b25b24048007b7751ca0069b30b1e3fee" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5414,11 +5416,12 @@ dependencies = [ "op-alloy-protocol", "serde", "snap", + "thiserror 2.0.3", ] [[package]] name = "op-reth" -version = "1.1.1" +version = "1.1.2" dependencies = [ "clap", "reth-cli-util", @@ -5493,9 +5496,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" dependencies = [ "arbitrary", "arrayvec", @@ -5504,19 +5507,20 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", + "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] @@ -5631,7 +5635,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5660,7 +5664,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5748,9 +5752,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "poseidon-bn254" @@ -5841,7 +5845,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5892,14 +5896,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -5990,7 +5994,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -6189,7 +6193,7 @@ dependencies = [ "bitflags 2.6.0", "cassowary", "compact_str", - "crossterm 0.28.1", + "crossterm", "instability", "itertools 0.13.0", "lru", @@ -6198,7 +6202,7 @@ dependencies = [ "strum_macros", "unicode-segmentation", "unicode-truncate", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -6341,13 +6345,13 @@ dependencies = [ "pin-project-lite", "quinn", "rustls", - "rustls-native-certs 0.8.0", + "rustls-native-certs 0.8.1", "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", "tokio-rustls", "tokio-util", @@ -6373,7 +6377,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6446,7 +6450,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6474,7 +6478,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6488,6 +6492,7 @@ dependencies = [ "reth-blockchain-tree", "reth-blockchain-tree-api", "reth-chainspec", + "reth-codecs", "reth-config", "reth-consensus", "reth-db", @@ -6528,7 +6533,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -6564,7 +6569,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6602,7 +6607,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6615,7 +6620,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6644,7 +6649,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -6665,7 +6670,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-genesis", "clap", @@ -6678,7 +6683,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.1.1" +version = "1.1.2" dependencies = [ "ahash", "alloy-consensus", @@ -6689,7 +6694,7 @@ dependencies = [ "backon", "clap", "comfy-table", - "crossterm 0.28.1", + "crossterm", "eyre", "fdlimit", "futures", @@ -6720,6 +6725,7 @@ dependencies = [ "reth-network", "reth-network-p2p", "reth-network-peers", + "reth-node-api", "reth-node-builder", "reth-node-core", "reth-node-events", @@ -6745,7 +6751,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-tasks", "tokio", @@ -6754,7 +6760,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6772,7 +6778,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6794,18 +6800,18 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.1.1" +version = "1.1.2" dependencies = [ "convert_case", "proc-macro2", "quote", "similar-asserts", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "reth-config" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "eyre", @@ -6821,7 +6827,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6834,7 +6840,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6850,7 +6856,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6873,7 +6879,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -6914,7 +6920,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -6942,7 +6948,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -6972,7 +6978,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6989,7 +6995,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7016,7 +7022,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7040,7 +7046,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7068,7 +7074,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7107,7 +7113,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7127,10 +7133,12 @@ dependencies = [ "reth-db", "reth-engine-local", "reth-network-peers", + "reth-node-api", "reth-node-builder", "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", + "reth-primitives", "reth-provider", "reth-rpc-layer", "reth-stages-types", @@ -7145,7 +7153,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.1.1" +version = "1.1.2" dependencies = [ "aes", "alloy-primitives", @@ -7175,7 +7183,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -7206,7 +7214,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -7224,7 +7232,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.1.1" +version = "1.1.2" dependencies = [ "futures", "pin-project", @@ -7253,7 +7261,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7261,9 +7269,10 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types-engine", "assert_matches", + "criterion", + "crossbeam-channel", "futures", "metrics", - "pin-project", "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", @@ -7283,6 +7292,7 @@ dependencies = [ "reth-payload-primitives", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-prune-types", @@ -7298,13 +7308,12 @@ dependencies = [ "revm-primitives", "thiserror 1.0.69", "tokio", - "tokio-stream", "tracing", ] [[package]] name = "reth-engine-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7335,7 +7344,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7347,7 +7356,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-eips", @@ -7384,7 +7393,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7408,7 +7417,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.1.1" +version = "1.1.2" dependencies = [ "clap", "eyre", @@ -7419,7 +7428,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7434,7 +7443,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7453,7 +7462,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7473,7 +7482,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7499,7 +7508,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "rayon", @@ -7509,7 +7518,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7537,7 +7546,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7561,7 +7570,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7576,7 +7585,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7594,7 +7603,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7637,7 +7646,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "eyre", @@ -7670,7 +7679,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7680,13 +7689,14 @@ dependencies = [ "reth-chain-state", "reth-execution-types", "reth-primitives", + "reth-primitives-traits", "serde", "serde_with", ] [[package]] name = "reth-fs-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "serde", "serde_json", @@ -7695,7 +7705,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7720,7 +7730,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.1.1" +version = "1.1.2" dependencies = [ "async-trait", "bytes", @@ -7742,7 +7752,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.1.1" +version = "1.1.2" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -7763,7 +7773,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.1.1" +version = "1.1.2" dependencies = [ "bindgen", "cc", @@ -7771,7 +7781,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.1.1" +version = "1.1.2" dependencies = [ "futures", "metrics", @@ -7782,14 +7792,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.1.1" +version = "1.1.2" dependencies = [ "futures-util", "if-addrs", @@ -7803,7 +7813,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7832,6 +7842,7 @@ dependencies = [ "reth-ecies", "reth-eth-wire", "reth-eth-wire-types", + "reth-ethereum-forks", "reth-fs-util", "reth-metrics", "reth-net-banlist", @@ -7865,7 +7876,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -7887,7 +7898,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7909,7 +7920,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7925,7 +7936,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -7938,7 +7949,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.1.1" +version = "1.1.2" dependencies = [ "anyhow", "bincode", @@ -7956,7 +7967,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -7977,7 +7988,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8042,7 +8053,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8092,7 +8103,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-contract", @@ -8137,7 +8148,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8150,7 +8161,7 @@ dependencies = [ "reth-engine-primitives", "reth-network-api", "reth-primitives-traits", - "reth-prune", + "reth-prune-types", "reth-stages", "reth-static-file-types", "reth-storage-api", @@ -8160,21 +8171,18 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.1.1" +version = "1.1.2" dependencies = [ "eyre", "http", - "jsonrpsee", + "jsonrpsee-server", "metrics", "metrics-exporter-prometheus", "metrics-process", "metrics-util", "procfs 0.16.0", "reqwest", - "reth-db-api", "reth-metrics", - "reth-primitives-traits", - "reth-provider", "reth-tasks", "socket2", "tikv-jemalloc-ctl", @@ -8186,7 +8194,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-chainspec", "reth-db-api", @@ -8197,7 +8205,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8217,7 +8225,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8267,7 +8275,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8284,7 +8292,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8301,6 +8309,7 @@ dependencies = [ "reth-optimism-chainspec", "reth-optimism-consensus", "reth-optimism-forks", + "reth-optimism-primitives", "reth-primitives", "reth-prune-types", "reth-revm", @@ -8311,7 +8320,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8322,7 +8331,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8355,12 +8364,14 @@ dependencies = [ "reth-optimism-forks", "reth-optimism-node", "reth-optimism-payload-builder", + "reth-optimism-primitives", "reth-optimism-rpc", "reth-payload-builder", "reth-payload-util", "reth-primitives", "reth-provider", "reth-revm", + "reth-rpc-server-types", "reth-tracing", "reth-transaction-pool", "reth-trie-db", @@ -8372,7 +8383,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8409,24 +8420,27 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", + "arbitrary", "bytes", "derive_more 1.0.0", "op-alloy-consensus", "reth-codecs", + "reth-node-types", "reth-primitives", "reth-primitives-traits", "rstest", + "serde", ] [[package]] name = "reth-optimism-rpc" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8470,7 +8484,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-codecs", "reth-db-api", @@ -8481,7 +8495,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8503,7 +8517,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-rpc-types-engine", "async-trait", @@ -8516,7 +8530,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8534,7 +8548,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8543,7 +8557,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -8553,7 +8567,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8600,7 +8614,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8629,7 +8643,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8662,6 +8676,7 @@ dependencies = [ "reth-node-types", "reth-optimism-primitives", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-scroll-primitives", "reth-stages-types", @@ -8679,8 +8694,9 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-eips", "alloy-primitives", "assert_matches", "itertools 0.13.0", @@ -8709,7 +8725,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "arbitrary", @@ -8729,7 +8745,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8747,7 +8763,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8819,7 +8835,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -8843,7 +8859,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8862,7 +8878,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8913,7 +8929,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8951,7 +8967,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8993,7 +9009,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9014,6 +9030,7 @@ dependencies = [ "reth-execution-types", "reth-metrics", "reth-primitives", + "reth-primitives-traits", "reth-revm", "reth-rpc-server-types", "reth-rpc-types-compat", @@ -9035,7 +9052,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-rpc-types-engine", "http", @@ -9052,7 +9069,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9067,7 +9084,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9085,7 +9102,7 @@ dependencies = [ [[package]] name = "reth-scroll-execution" -version = "1.1.1" +version = "1.1.2" dependencies = [ "auto_impl", "reth-revm", @@ -9096,7 +9113,7 @@ dependencies = [ [[package]] name = "reth-scroll-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "arbitrary", @@ -9110,7 +9127,7 @@ dependencies = [ [[package]] name = "reth-scroll-revm" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-scroll-primitives", "revm", @@ -9119,7 +9136,7 @@ dependencies = [ [[package]] name = "reth-scroll-storage" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "eyre", @@ -9133,9 +9150,10 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "assert_matches", @@ -9184,7 +9202,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "aquamarine", @@ -9212,7 +9230,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "arbitrary", @@ -9229,14 +9247,16 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "assert_matches", "parking_lot", "rayon", + "reth-codecs", "reth-db", "reth-db-api", + "reth-primitives-traits", "reth-provider", "reth-prune-types", "reth-stages", @@ -9251,7 +9271,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "clap", @@ -9262,7 +9282,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9275,6 +9295,7 @@ dependencies = [ "reth-db-models", "reth-execution-types", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-stages-types", "reth-storage-errors", @@ -9283,7 +9304,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9295,7 +9316,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.1.1" +version = "1.1.2" dependencies = [ "auto_impl", "dyn-clone", @@ -9312,7 +9333,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9325,7 +9346,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "tokio", "tokio-stream", @@ -9334,7 +9355,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.1.1" +version = "1.1.2" dependencies = [ "clap", "eyre", @@ -9348,7 +9369,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9395,7 +9416,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9425,7 +9446,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9450,7 +9471,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9480,7 +9501,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9507,7 +9528,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9516,7 +9537,9 @@ dependencies = [ "itertools 0.13.0", "pretty_assertions", "proptest", + "proptest-arbitrary-interop", "rand 0.8.5", + "reth-primitives-traits", "reth-testing-utils", "reth-tracing", "reth-trie", @@ -9707,6 +9730,7 @@ checksum = "8f4b84ba6e838ceb47b41de5194a60244fac43d9fe03b71dbe8c5a201081d6d1" dependencies = [ "bytemuck", "byteorder", + "serde", ] [[package]] @@ -9750,7 +9774,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.87", + "syn 2.0.89", "unicode-ident", ] @@ -9832,9 +9856,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.40" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -9845,9 +9869,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.16" +version = "0.23.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" dependencies = [ "log", "once_cell", @@ -9868,20 +9892,19 @@ dependencies = [ "rustls-pemfile", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 2.11.1", ] [[package]] name = "rustls-native-certs" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" dependencies = [ "openssl-probe", - "rustls-pemfile", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.0.1", ] [[package]] @@ -9908,7 +9931,7 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afbb878bdfdf63a336a5e63561b1835e7a8c91524f51621db870169eac84b490" dependencies = [ - "core-foundation", + "core-foundation 0.9.4", "core-foundation-sys", "jni", "log", @@ -9917,7 +9940,7 @@ dependencies = [ "rustls-native-certs 0.7.3", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework", + "security-framework 2.11.1", "security-framework-sys", "webpki-roots", "winapi", @@ -9981,18 +10004,18 @@ dependencies = [ [[package]] name = "scc" -version = "2.2.4" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8d25269dd3a12467afe2e510f69fb0b46b698e5afb296b59f2145259deaf8e8" +checksum = "66b202022bb57c049555430e11fc22fea12909276a80a4c3d368da36ac1d88ed" dependencies = [ "sdd", ] [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -10067,13 +10090,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", - "core-foundation", + "core-foundation 0.9.4", "core-foundation-sys", "libc", "num-bigint", "security-framework-sys", ] +[[package]] +name = "security-framework" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8" +dependencies = [ + "bitflags 2.6.0", + "core-foundation 0.10.0", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + [[package]] name = "security-framework-sys" version = "2.12.1" @@ -10104,9 +10140,9 @@ dependencies = [ [[package]] name = "semver-parser" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" dependencies = [ "pest", ] @@ -10140,14 +10176,14 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "indexmap 2.6.0", "itoa", @@ -10175,7 +10211,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10226,7 +10262,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10249,7 +10285,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10480,9 +10516,6 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] [[package]] name = "spki" @@ -10543,7 +10576,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10601,9 +10634,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.87" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -10619,7 +10652,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10630,9 +10663,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] @@ -10645,7 +10678,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10722,7 +10755,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10770,7 +10803,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10781,7 +10814,7 @@ checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10958,7 +10991,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11086,9 +11119,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", "base64 0.22.1", @@ -11159,7 +11192,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11408,9 +11441,9 @@ checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-normalization" @@ -11435,7 +11468,7 @@ checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" dependencies = [ "itertools 0.13.0", "unicode-segmentation", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -11444,6 +11477,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -11474,9 +11513,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna 1.0.3", @@ -11551,7 +11590,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11622,7 +11661,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-shared", ] @@ -11656,7 +11695,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11682,9 +11721,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb4f099acbc1043cc752b91615b24b02d7f6fcd975bd781fed9f50b3c3e15bf7" +checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" dependencies = [ "futures", "js-sys", @@ -11716,9 +11755,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.6" +version = "0.26.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" dependencies = [ "rustls-pki-types", ] @@ -11822,7 +11861,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11833,7 +11872,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11844,7 +11883,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11855,7 +11894,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -12112,9 +12151,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" dependencies = [ "serde", "stable_deref_trait", @@ -12124,13 +12163,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -12152,27 +12191,27 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "zerofrom" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -12193,7 +12232,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -12215,7 +12254,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 4b803229252..74762824cbe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.1.1" +version = "1.1.2" edition = "2021" rust-version = "1.82" license = "MIT OR Apache-2.0" @@ -483,10 +483,10 @@ alloy-transport-ws = { version = "0.6.4", default-features = false } poseidon-bn254 = { git = "https://github.com/scroll-tech/poseidon-bn254", rev = "526a64a", features = ["bn254"] } # op -op-alloy-rpc-types = "0.6.5" -op-alloy-rpc-types-engine = "0.6.5" -op-alloy-network = "0.6.5" -op-alloy-consensus = "0.6.5" +op-alloy-rpc-types = "0.6.7" +op-alloy-rpc-types-engine = "0.6.7" +op-alloy-network = "0.6.7" +op-alloy-consensus = "0.6.7" # misc aquamarine = "0.6" @@ -540,6 +540,7 @@ tracing = "0.1.0" tracing-appender = "0.2" url = "2.3" zstd = "0.13" +byteorder = "1" # metrics metrics = "0.24.0" @@ -567,6 +568,7 @@ hyper = "1.3" hyper-util = "0.1.5" pin-project = "1.0.12" reqwest = { version = "0.12", default-features = false } +tracing-futures = "0.2" tower = "0.4" tower-http = "0.6" @@ -578,6 +580,7 @@ if-addrs = "0.13" # rpc jsonrpsee = "0.24" jsonrpsee-core = "0.24" +jsonrpsee-server = "0.24" jsonrpsee-http-client = "0.24" jsonrpsee-types = "0.24" @@ -648,7 +651,7 @@ tracy-client = "0.17.3" #alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } #alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } +#op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } #op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } #op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } diff --git a/HARDFORK-CHECKLIST.md b/HARDFORK-CHECKLIST.md index 80ebfc20c98..17c639f0d5e 100644 --- a/HARDFORK-CHECKLIST.md +++ b/HARDFORK-CHECKLIST.md @@ -17,5 +17,5 @@ ### Updates to the engine API - Add new endpoints to the `EngineApi` trait and implement endpoints. -- Update the `ExceuctionPayload` + `ExecutionPayloadSidecar` to `Block` conversion if there are any additional parameters. -- Update version specific validation checks in the `EngineValidator` trait. \ No newline at end of file +- Update the `ExecutionPayload` + `ExecutionPayloadSidecar` to `Block` conversion if there are any additional parameters. +- Update version specific validation checks in the `EngineValidator` trait. diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index adb2c83b1b2..aa89b4112c3 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -201,7 +201,7 @@ impl> Command { let encoded_length = pooled.encode_2718_len(); // insert the blob into the store - blob_store.insert(transaction.hash, sidecar)?; + blob_store.insert(transaction.hash(), sidecar)?; encoded_length } diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index da928645b9f..efe4a2f7c22 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -4,7 +4,7 @@ use crate::{args::NetworkArgs, utils::get_single_header}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::Parser; -use futures::{stream::select as stream_select, StreamExt}; +use futures::StreamExt; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; @@ -19,14 +19,13 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider, NetworkHandle}; +use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; use reth_node_api::NodeTypesWithDBAdapter; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ - providers::ProviderNodeTypes, BlockExecutionWriter, ChainSpecProvider, ProviderFactory, - StageCheckpointReader, + providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory, StageCheckpointReader, }; use reth_prune::PruneModes; use reth_stages::{ @@ -59,7 +58,7 @@ pub struct Command { } impl> Command { - fn build_pipeline, Client>( + fn build_pipeline + CliNodeTypes, Client>( &self, config: &Config, client: Client, @@ -73,7 +72,7 @@ impl> Command { { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(client.clone(), Arc::clone(&consensus)) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) @@ -207,17 +206,12 @@ impl> Command { return Ok(()) } - let pipeline_events = pipeline.events(); - let events = stream_select( - network.event_listener().map(Into::into), - pipeline_events.map(Into::into), - ); ctx.task_executor.spawn_critical( "events task", reth_node_events::node::handle_events( Some(Box::new(network)), latest_block_number, - events, + pipeline.events().map(Into::into), ), ); @@ -235,11 +229,7 @@ impl> Command { trace!(target: "reth::cli", from = next_block, to = target_block, tip = ?target_block_hash, ?result, "Pipeline finished"); // Unwind the pipeline without committing. - { - provider_factory - .provider_rw()? - .take_block_and_execution_range(next_block..=target_block)?; - } + provider_factory.provider_rw()?.unwind_trie_state_range(next_block..=target_block)?; // Update latest block current_max_block = target_block; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index db4cd952e8d..bb8a6a2c4a1 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -21,7 +21,7 @@ use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, + OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, StorageLocation, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -148,7 +148,7 @@ impl> Command { .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; trace!(target: "reth::cli", block_number, "Executing block"); - provider_rw.insert_block(sealed_block.clone())?; + provider_rw.insert_block(sealed_block.clone(), StorageLocation::Database)?; td += sealed_block.difficulty; let mut executor = executor_provider.batch_executor(StateProviderDatabase::new( diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 6b71f48de12..53c592063ec 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -15,7 +15,7 @@ //! - `min-error-logs`: Disables all logs below `error` level. //! - `min-warn-logs`: Disables all logs below `warn` level. //! - `min-info-logs`: Disables all logs below `info` level. This can speed up the node, since fewer -//! calls to the logging component is made. +//! calls to the logging component are made. //! - `min-debug-logs`: Disables all logs below `debug` level. //! - `min-trace-logs`: Disables all logs below `trace` level. diff --git a/book/developers/profiling.md b/book/developers/profiling.md index f1fdf520eb2..956bc563303 100644 --- a/book/developers/profiling.md +++ b/book/developers/profiling.md @@ -25,7 +25,7 @@ In this tutorial, we will be reviewing: [Jemalloc](https://jemalloc.net/) is a general-purpose allocator that is used [across the industry in production](https://engineering.fb.com/2011/01/03/core-data/scalable-memory-allocation-using-jemalloc/), well known for its performance benefits, predictability, and profiling capabilities. We've seen significant performance benefits in reth when using jemalloc, but will be primarily focusing on its profiling capabilities. -Jemalloc also provides tools for analyzing and visualizing its the allocation profiles it generates, notably `jeprof`. +Jemalloc also provides tools for analyzing and visualizing its allocation profiles it generates, notably `jeprof`. #### Enabling jemalloc in reth diff --git a/book/run/config.md b/book/run/config.md index 10fd40ca763..bb28d855de8 100644 --- a/book/run/config.md +++ b/book/run/config.md @@ -36,7 +36,7 @@ The defaults shipped with Reth try to be relatively reasonable, but may not be o ### `headers` -The headers section controls both the behavior of the header stage, which download historical headers, as well as the primary downloader that fetches headers over P2P. +The headers section controls both the behavior of the header stage, which downloads historical headers, as well as the primary downloader that fetches headers over P2P. ```toml [stages.headers] @@ -65,7 +65,7 @@ commit_threshold = 10000 ### `bodies` -The bodies section controls both the behavior of the bodies stage, which download historical block bodies, as well as the primary downloader that fetches block bodies over P2P. +The bodies section controls both the behavior of the bodies stage, which downloads historical block bodies, as well as the primary downloader that fetches block bodies over P2P. ```toml [stages.bodies] @@ -102,7 +102,7 @@ The sender recovery stage recovers the address of transaction senders using tran ```toml [stages.sender_recovery] -# The amount of transactions to recover senders for before +# The number of transactions to recover senders for before # writing the results to disk. # # Lower thresholds correspond to more frequent disk I/O (writes), diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md index 3a987e52c73..28253ca9f01 100644 --- a/book/run/private-testnet.md +++ b/book/run/private-testnet.md @@ -6,7 +6,7 @@ This guide uses [Kurtosis' ethereum-package](https://github.com/ethpandaops/ethe * Go [here](https://docs.kurtosis.com/install/) to install Kurtosis * Go [here](https://docs.docker.com/get-docker/) to install Docker -The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. +The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth and various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/ethpandaops/ethereum-package#configuration). diff --git a/book/run/sync-op-mainnet.md b/book/run/sync-op-mainnet.md index 2a862314a1d..0e2090acbcb 100644 --- a/book/run/sync-op-mainnet.md +++ b/book/run/sync-op-mainnet.md @@ -1,6 +1,6 @@ # Sync OP Mainnet -To sync OP mainnet, bedrock state needs to be imported as a starting point. There are currently two ways: +To sync OP mainnet, Bedrock state needs to be imported as a starting point. There are currently two ways: * Minimal bootstrap **(recommended)**: only state snapshot at Bedrock block is imported without any OVM historical data. * Full bootstrap **(not recommended)**: state, blocks and receipts are imported. *Not recommended for now: [storage consistency issue](https://github.com/paradigmxyz/reth/pull/11099) tldr: sudden crash may break the node diff --git a/book/run/transactions.md b/book/run/transactions.md index 61327b57300..edb3a24d76f 100644 --- a/book/run/transactions.md +++ b/book/run/transactions.md @@ -38,7 +38,7 @@ Alongside the `accessList` parameter and legacy parameters (except `gasPrice`), The base fee is burned, while the priority fee is paid to the miner who includes the transaction, incentivizing miners to include transactions with higher priority fees per gas. -## EIP-4844 Transaction +## EIP-4844 Transactions [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844) transactions (type `0x3`) was introduced in Ethereum's Dencun fork. This provides a temporary but significant scaling relief for rollups by allowing them to initially scale to 0.375 MB per slot, with a separate fee market allowing fees to be very low while usage of this system is limited. diff --git a/book/run/troubleshooting.md b/book/run/troubleshooting.md index 7368b6631ab..cab39cb1165 100644 --- a/book/run/troubleshooting.md +++ b/book/run/troubleshooting.md @@ -8,7 +8,7 @@ This page tries to answer how to deal with the most popular issues. If you're: 1. Running behind the tip -2. Have slow canonical commit time according to the `Canonical Commit Latency time` chart on [Grafana dashboard](./observability.md#prometheus--grafana) (more than 2-3 seconds) +2. Have slow canonical commit time according to the `Canonical Commit Latency Time` chart on [Grafana dashboard](./observability.md#prometheus--grafana) (more than 2-3 seconds) 3. Seeing warnings in your logs such as ```console 2023-11-08T15:17:24.789731Z WARN providers::db: Transaction insertion took too long block_number=18528075 tx_num=2150227643 hash=0xb7de1d6620efbdd3aa8547c47a0ff09a7fd3e48ba3fd2c53ce94c6683ed66e7c elapsed=6.793759034s @@ -48,7 +48,7 @@ equal to the [freshly synced node](../installation/installation.md#hardware-requ mv reth_compact.dat $(reth db path)/mdbx.dat ``` 7. Start Reth -8. Confirm that the values on the `Freelist` chart is near zero and the values on the `Canonical Commit Latency time` chart +8. Confirm that the values on the `Freelist` chart are near zero and the values on the `Canonical Commit Latency Time` chart is less than 1 second. 9. Delete original database ```bash diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 1a8a390e99d..d2ff0f5c844 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1,6 +1,7 @@ //! Implementation of [`BlockchainTree`] use crate::{ + externals::TreeNodeTypes, metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics}, state::{SidechainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals, @@ -21,10 +22,10 @@ use reth_primitives::{ SealedHeader, StaticFileSegment, }; use reth_provider::{ - providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, BlockWriter, - CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, - ChainSpecProvider, ChainSplit, ChainSplitTarget, DBProvider, DisplayBlocksChain, - HeaderProvider, ProviderError, StaticFileProviderFactory, + BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, + CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, + ChainSplitTarget, DBProvider, DisplayBlocksChain, HeaderProvider, ProviderError, + StaticFileProviderFactory, StorageLocation, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; @@ -93,7 +94,7 @@ impl BlockchainTree { impl BlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. @@ -1332,7 +1333,7 @@ where info!(target: "blockchain_tree", "REORG: revert canonical from database by unwinding chain blocks {:?}", revert_range); // read block and execution result from database. and remove traces of block from tables. let blocks_and_execution = provider_rw - .take_block_and_execution_range(revert_range) + .take_block_and_execution_above(revert_until, StorageLocation::Database) .map_err(|e| CanonicalError::CanonicalRevert(e.to_string()))?; provider_rw.commit()?; @@ -1386,16 +1387,18 @@ mod tests { use reth_db_api::transaction::DbTxMut; use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_node_types::FullNodePrimitives; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, Account, BlockBody, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use reth_provider::{ + providers::ProviderNodeTypes, test_utils::{ blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }, - ProviderFactory, + ProviderFactory, StorageLocation, }; use reth_revm::primitives::AccountInfo; use reth_stages_api::StageCheckpoint; @@ -1420,7 +1423,12 @@ mod tests { TreeExternals::new(provider_factory, consensus, executor_factory) } - fn setup_genesis(factory: &ProviderFactory, mut genesis: SealedBlock) { + fn setup_genesis< + N: ProviderNodeTypes>, + >( + factory: &ProviderFactory, + mut genesis: SealedBlock, + ) { // insert genesis to db. genesis.header.set_block_number(10); @@ -1551,6 +1559,7 @@ mod tests { SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()) .try_seal_with_senders() .unwrap(), + StorageLocation::Database, ) .unwrap(); let account = Account { balance: initial_signer_balance, ..Default::default() }; @@ -1561,7 +1570,7 @@ mod tests { let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce, diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 4e22fcb78b6..2b9dae9a3df 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -2,10 +2,10 @@ use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::Consensus; -use reth_db::{static_file::HeaderMask, tables}; +use reth_db::{static_file::BlockHashMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; -use reth_node_types::NodeTypesWithDB; -use reth_primitives::StaticFileSegment; +use reth_node_types::{FullNodePrimitives, NodeTypesWithDB}; +use reth_primitives::{BlockBody, StaticFileSegment}; use reth_provider::{ providers::ProviderNodeTypes, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, StatsReader, @@ -13,6 +13,16 @@ use reth_provider::{ use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; +/// A helper trait with requirements for [`ProviderNodeTypes`] to be used within [`TreeExternals`]. +pub trait TreeNodeTypes: + ProviderNodeTypes> +{ +} +impl TreeNodeTypes for T where + T: ProviderNodeTypes> +{ +} + /// A container for external components. /// /// This is a simple container for external components used throughout the blockchain tree @@ -75,7 +85,7 @@ impl TreeExternals { hashes.extend(range.clone().zip(static_file_provider.fetch_range_with_predicate( StaticFileSegment::Headers, range, - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::(number.into()), |_| true, )?)); } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 8e6cceccdd1..f997e0a062d 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -1,5 +1,7 @@ //! Wrapper around `BlockchainTree` that allows for it to be shared. +use crate::externals::TreeNodeTypes; + use super::BlockchainTree; use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; @@ -13,8 +15,8 @@ use reth_evm::execute::BlockExecutorProvider; use reth_node_types::NodeTypesWithDB; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ - providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateSubscriptions, - FullExecutionDataProvider, ProviderError, + providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateNotifications, + CanonStateSubscriptions, FullExecutionDataProvider, ProviderError, }; use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; @@ -36,7 +38,7 @@ impl ShareableBlockchainTree { impl BlockchainTreeEngine for ShareableBlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { @@ -107,7 +109,7 @@ where impl BlockchainTreeViewer for ShareableBlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { @@ -170,7 +172,7 @@ where impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { fn find_pending_state_provider( @@ -188,7 +190,7 @@ where N: ProviderNodeTypes, E: Send + Sync, { - fn subscribe_to_canonical_state(&self) -> reth_provider::CanonStateNotifications { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); self.tree.read().subscribe_canon_state() } diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index ca8af6f9b58..a8e43240f4f 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -184,7 +184,7 @@ mod tests { let mut tree_state = TreeState::new(0, vec![], 5); // Create a chain with two blocks - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::random(); let block2_hash = B256::random(); @@ -254,8 +254,8 @@ mod tests { let block1_hash = B256::random(); let block2_hash = B256::random(); - let mut block1 = SealedBlockWithSenders::default(); - let mut block2 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(block1_hash); block1.block.header.set_block_number(9); @@ -296,8 +296,8 @@ mod tests { let block1_hash = B256::random(); let block2_hash = B256::random(); - let mut block1 = SealedBlockWithSenders::default(); - let mut block2 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(block1_hash); block1.block.header.set_block_number(9); @@ -336,7 +336,7 @@ mod tests { // Create a block with a random hash and add it to the buffer let block_hash = B256::random(); - let mut block = SealedBlockWithSenders::default(); + let mut block: SealedBlockWithSenders = Default::default(); block.block.header.set_hash(block_hash); // Add the block to the buffered blocks in the TreeState @@ -363,8 +363,8 @@ mod tests { let ancestor_hash = B256::random(); let descendant_hash = B256::random(); - let mut ancestor_block = SealedBlockWithSenders::default(); - let mut descendant_block = SealedBlockWithSenders::default(); + let mut ancestor_block: SealedBlockWithSenders = Default::default(); + let mut descendant_block: SealedBlockWithSenders = Default::default(); ancestor_block.block.header.set_hash(ancestor_hash); descendant_block.block.header.set_hash(descendant_hash); @@ -397,7 +397,7 @@ mod tests { let receipt1 = Receipt::default(); let receipt2 = Receipt::default(); - let mut block = SealedBlockWithSenders::default(); + let mut block: SealedBlockWithSenders = Default::default(); block.block.header.set_hash(block_hash); let receipts = vec![receipt1, receipt2]; diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 47443b36c67..24f394a761f 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -12,7 +12,7 @@ use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, + BlockWithSenders, NodePrimitives, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, }; use reth_storage_api::StateProviderBox; @@ -50,22 +50,22 @@ pub(crate) struct InMemoryStateMetrics { /// This holds, because only lookup by number functions need to acquire the numbers lock first to /// get the block hash. #[derive(Debug, Default)] -pub(crate) struct InMemoryState { +pub(crate) struct InMemoryState { /// All canonical blocks that are not on disk yet. - blocks: RwLock>>, + blocks: RwLock>>>, /// Mapping of block numbers to block hashes. numbers: RwLock>, /// The pending block that has not yet been made canonical. - pending: watch::Sender>, + pending: watch::Sender>>, /// Metrics for the in-memory state. metrics: InMemoryStateMetrics, } -impl InMemoryState { +impl InMemoryState { pub(crate) fn new( - blocks: HashMap>, + blocks: HashMap>>, numbers: BTreeMap, - pending: Option, + pending: Option>, ) -> Self { let (pending, _) = watch::channel(pending); let this = Self { @@ -95,12 +95,12 @@ impl InMemoryState { } /// Returns the state for a given block hash. - pub(crate) fn state_by_hash(&self, hash: B256) -> Option> { + pub(crate) fn state_by_hash(&self, hash: B256) -> Option>> { self.blocks.read().get(&hash).cloned() } /// Returns the state for a given block number. - pub(crate) fn state_by_number(&self, number: u64) -> Option> { + pub(crate) fn state_by_number(&self, number: u64) -> Option>> { let hash = self.hash_by_number(number)?; self.state_by_hash(hash) } @@ -111,14 +111,14 @@ impl InMemoryState { } /// Returns the current chain head state. - pub(crate) fn head_state(&self) -> Option> { + pub(crate) fn head_state(&self) -> Option>> { let hash = *self.numbers.read().last_key_value()?.1; self.state_by_hash(hash) } /// Returns the pending state corresponding to the current head plus one, /// from the payload received in newPayload that does not have a FCU yet. - pub(crate) fn pending_state(&self) -> Option { + pub(crate) fn pending_state(&self) -> Option> { self.pending.borrow().clone() } @@ -131,17 +131,17 @@ impl InMemoryState { /// Inner type to provide in memory state. It includes a chain tracker to be /// advanced internally by the tree. #[derive(Debug)] -pub(crate) struct CanonicalInMemoryStateInner { +pub(crate) struct CanonicalInMemoryStateInner { /// Tracks certain chain information, such as the canonical head, safe head, and finalized /// head. pub(crate) chain_info_tracker: ChainInfoTracker, /// Tracks blocks at the tip of the chain that have not been persisted to disk yet. - pub(crate) in_memory_state: InMemoryState, + pub(crate) in_memory_state: InMemoryState, /// A broadcast stream that emits events when the canonical chain is updated. - pub(crate) canon_state_notification_sender: CanonStateNotificationSender, + pub(crate) canon_state_notification_sender: CanonStateNotificationSender, } -impl CanonicalInMemoryStateInner { +impl CanonicalInMemoryStateInner { /// Clears all entries in the in memory state. fn clear(&self) { { @@ -162,17 +162,17 @@ impl CanonicalInMemoryStateInner { /// all canonical blocks not on disk yet and keeps track of the block range that /// is in memory. #[derive(Debug, Clone)] -pub struct CanonicalInMemoryState { - pub(crate) inner: Arc, +pub struct CanonicalInMemoryState { + pub(crate) inner: Arc>, } -impl CanonicalInMemoryState { +impl CanonicalInMemoryState { /// Create a new in-memory state with the given blocks, numbers, pending state, and optional /// finalized header. pub fn new( - blocks: HashMap>, + blocks: HashMap>>, numbers: BTreeMap, - pending: Option, + pending: Option>, finalized: Option, safe: Option, ) -> Self { @@ -236,7 +236,7 @@ impl CanonicalInMemoryState { /// Updates the pending block with the given block. /// /// Note: This assumes that the parent block of the pending block is canonical. - pub fn set_pending_block(&self, pending: ExecutedBlock) { + pub fn set_pending_block(&self, pending: ExecutedBlock) { // fetch the state of the pending block's parent block let parent = self.state_by_hash(pending.block().parent_hash); let pending = BlockState::with_parent(pending, parent); @@ -252,7 +252,7 @@ impl CanonicalInMemoryState { /// them to their parent blocks. fn update_blocks(&self, new_blocks: I, reorged: I) where - I: IntoIterator, + I: IntoIterator>, { { // acquire locks, starting with the numbers lock @@ -288,7 +288,7 @@ impl CanonicalInMemoryState { } /// Update the in memory state with the given chain update. - pub fn update_chain(&self, new_chain: NewCanonicalChain) { + pub fn update_chain(&self, new_chain: NewCanonicalChain) { match new_chain { NewCanonicalChain::Commit { new } => { self.update_blocks(new, vec![]); @@ -359,22 +359,22 @@ impl CanonicalInMemoryState { } /// Returns in memory state corresponding the given hash. - pub fn state_by_hash(&self, hash: B256) -> Option> { + pub fn state_by_hash(&self, hash: B256) -> Option>> { self.inner.in_memory_state.state_by_hash(hash) } /// Returns in memory state corresponding the block number. - pub fn state_by_number(&self, number: u64) -> Option> { + pub fn state_by_number(&self, number: u64) -> Option>> { self.inner.in_memory_state.state_by_number(number) } /// Returns the in memory head state. - pub fn head_state(&self) -> Option> { + pub fn head_state(&self) -> Option>> { self.inner.in_memory_state.head_state() } /// Returns the in memory pending state. - pub fn pending_state(&self) -> Option { + pub fn pending_state(&self) -> Option> { self.inner.in_memory_state.pending_state() } @@ -479,14 +479,14 @@ impl CanonicalInMemoryState { /// Returns a tuple with the `SealedBlock` corresponding to the pending /// state and a vector of its `Receipt`s. - pub fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { + pub fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { self.pending_state().map(|block_state| { (block_state.block_ref().block().clone(), block_state.executed_block_receipts()) }) } /// Subscribe to new blocks events. - pub fn subscribe_canon_state(&self) -> CanonStateNotifications { + pub fn subscribe_canon_state(&self) -> CanonStateNotifications { self.inner.canon_state_notification_sender.subscribe() } @@ -501,7 +501,7 @@ impl CanonicalInMemoryState { } /// Attempts to send a new [`CanonStateNotification`] to all active Receiver handles. - pub fn notify_canon_state(&self, event: CanonStateNotification) { + pub fn notify_canon_state(&self, event: CanonStateNotification) { self.inner.canon_state_notification_sender.send(event).ok(); } @@ -513,7 +513,7 @@ impl CanonicalInMemoryState { &self, hash: B256, historical: StateProviderBox, - ) -> MemoryOverlayStateProvider { + ) -> MemoryOverlayStateProvider { let in_memory = if let Some(state) = self.state_by_hash(hash) { state.chain().map(|block_state| block_state.block()).collect() } else { @@ -527,7 +527,7 @@ impl CanonicalInMemoryState { /// oldest (highest to lowest). /// /// This iterator contains a snapshot of the in-memory state at the time of the call. - pub fn canonical_chain(&self) -> impl Iterator> { + pub fn canonical_chain(&self) -> impl Iterator>> { self.inner.in_memory_state.head_state().into_iter().flat_map(|head| head.iter()) } @@ -577,22 +577,22 @@ impl CanonicalInMemoryState { /// State after applying the given block, this block is part of the canonical chain that partially /// stored in memory and can be traced back to a canonical block on disk. #[derive(Debug, PartialEq, Eq, Clone)] -pub struct BlockState { +pub struct BlockState { /// The executed block that determines the state after this block has been executed. - block: ExecutedBlock, + block: ExecutedBlock, /// The block's parent block if it exists. - parent: Option>, + parent: Option>>, } #[allow(dead_code)] -impl BlockState { +impl BlockState { /// [`BlockState`] constructor. - pub const fn new(block: ExecutedBlock) -> Self { + pub const fn new(block: ExecutedBlock) -> Self { Self { block, parent: None } } /// [`BlockState`] constructor with parent. - pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { + pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { Self { block, parent } } @@ -606,12 +606,12 @@ impl BlockState { } /// Returns the executed block that determines the state. - pub fn block(&self) -> ExecutedBlock { + pub fn block(&self) -> ExecutedBlock { self.block.clone() } /// Returns a reference to the executed block that determines the state. - pub const fn block_ref(&self) -> &ExecutedBlock { + pub const fn block_ref(&self) -> &ExecutedBlock { &self.block } @@ -619,7 +619,7 @@ impl BlockState { pub fn block_with_senders(&self) -> BlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); - BlockWithSenders { block: block.unseal(), senders } + BlockWithSenders::new_unchecked(block.unseal(), senders) } /// Returns the sealed block with senders for the state. @@ -646,7 +646,7 @@ impl BlockState { } /// Returns the `Receipts` of executed block that determines the state. - pub fn receipts(&self) -> &Receipts { + pub fn receipts(&self) -> &Receipts { &self.block.execution_outcome().receipts } @@ -654,7 +654,7 @@ impl BlockState { /// We assume that the `Receipts` in the executed block `ExecutionOutcome` /// has only one element corresponding to the executed block associated to /// the state. - pub fn executed_block_receipts(&self) -> Vec { + pub fn executed_block_receipts(&self) -> Vec { let receipts = self.receipts(); debug_assert!( @@ -713,7 +713,7 @@ impl BlockState { /// /// This merges the state of all blocks that are part of the chain that the this block is /// the head of. This includes all blocks that connect back to the canonical block on disk. - pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { + pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { let in_memory = self.chain().map(|block_state| block_state.block()).collect(); MemoryOverlayStateProvider::new(historical, in_memory) @@ -771,25 +771,25 @@ impl BlockState { /// Represents an executed block stored in-memory. #[derive(Clone, Debug, PartialEq, Eq, Default)] -pub struct ExecutedBlock { +pub struct ExecutedBlock { /// Sealed block the rest of fields refer to. pub block: Arc, /// Block's senders. pub senders: Arc>, /// Block's execution outcome. - pub execution_output: Arc, + pub execution_output: Arc>, /// Block's hashed state. pub hashed_state: Arc, /// Trie updates that result of applying the block. pub trie: Arc, } -impl ExecutedBlock { +impl ExecutedBlock { /// [`ExecutedBlock`] constructor. pub const fn new( block: Arc, senders: Arc>, - execution_output: Arc, + execution_output: Arc>, hashed_state: Arc, trie: Arc, ) -> Self { @@ -814,7 +814,7 @@ impl ExecutedBlock { } /// Returns a reference to the block's execution outcome - pub fn execution_outcome(&self) -> &ExecutionOutcome { + pub fn execution_outcome(&self) -> &ExecutionOutcome { &self.execution_output } @@ -831,23 +831,23 @@ impl ExecutedBlock { /// Non-empty chain of blocks. #[derive(Debug)] -pub enum NewCanonicalChain { +pub enum NewCanonicalChain { /// A simple append to the current canonical head Commit { /// all blocks that lead back to the canonical head - new: Vec, + new: Vec>, }, /// A reorged chain consists of two chains that trace back to a shared ancestor block at which /// point they diverge. Reorg { /// All blocks of the _new_ chain - new: Vec, + new: Vec>, /// All blocks of the _old_ chain - old: Vec, + old: Vec>, }, } -impl NewCanonicalChain { +impl NewCanonicalChain { /// Returns the length of the new chain. pub fn new_block_count(&self) -> usize { match self { @@ -864,7 +864,7 @@ impl NewCanonicalChain { } /// Converts the new chain into a notification that will be emitted to listeners - pub fn to_chain_notification(&self) -> CanonStateNotification { + pub fn to_chain_notification(&self) -> CanonStateNotification { match self { Self::Commit { new } => { let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| { @@ -917,7 +917,7 @@ mod tests { use alloy_primitives::{map::HashSet, BlockNumber, Bytes, StorageKey, StorageValue}; use rand::Rng; use reth_errors::ProviderResult; - use reth_primitives::{Account, Bytecode, Receipt}; + use reth_primitives::{Account, Bytecode, EthPrimitives, Receipt}; use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, @@ -925,7 +925,7 @@ mod tests { use reth_trie::{AccountProof, HashedStorage, MultiProof, StorageProof, TrieInput}; fn create_mock_state( - test_block_builder: &mut TestBlockBuilder, + test_block_builder: &mut TestBlockBuilder, block_number: u64, parent_hash: B256, ) -> BlockState { @@ -935,7 +935,7 @@ mod tests { } fn create_mock_state_chain( - test_block_builder: &mut TestBlockBuilder, + test_block_builder: &mut TestBlockBuilder, num_blocks: u64, ) -> Vec { let mut chain = Vec::with_capacity(num_blocks as usize); @@ -1065,7 +1065,7 @@ mod tests { fn test_in_memory_state_impl_state_by_hash() { let mut state_by_hash = HashMap::default(); let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state = Arc::new(create_mock_state(&mut test_block_builder, number, B256::random())); state_by_hash.insert(state.hash(), state.clone()); @@ -1081,7 +1081,7 @@ mod tests { let mut hash_by_number = BTreeMap::new(); let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state = Arc::new(create_mock_state(&mut test_block_builder, number, B256::random())); let hash = state.hash(); @@ -1098,7 +1098,7 @@ mod tests { fn test_in_memory_state_impl_head_state() { let mut state_by_hash = HashMap::default(); let mut hash_by_number = BTreeMap::new(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state1 = Arc::new(create_mock_state(&mut test_block_builder, 1, B256::random())); let hash1 = state1.hash(); let state2 = Arc::new(create_mock_state(&mut test_block_builder, 2, hash1)); @@ -1118,7 +1118,7 @@ mod tests { #[test] fn test_in_memory_state_impl_pending_state() { let pending_number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let pending_state = create_mock_state(&mut test_block_builder, pending_number, B256::random()); let pending_hash = pending_state.hash(); @@ -1135,7 +1135,8 @@ mod tests { #[test] fn test_in_memory_state_impl_no_pending_state() { - let in_memory_state = InMemoryState::new(HashMap::default(), BTreeMap::new(), None); + let in_memory_state: InMemoryState = + InMemoryState::new(HashMap::default(), BTreeMap::new(), None); assert_eq!(in_memory_state.pending_state(), None); } @@ -1143,7 +1144,7 @@ mod tests { #[test] fn test_state_new() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1154,7 +1155,7 @@ mod tests { #[test] fn test_state_block() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1165,7 +1166,7 @@ mod tests { #[test] fn test_state_hash() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1176,7 +1177,7 @@ mod tests { #[test] fn test_state_number() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block); @@ -1187,7 +1188,7 @@ mod tests { #[test] fn test_state_state_root() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1198,7 +1199,7 @@ mod tests { #[test] fn test_state_receipts() { let receipts = Receipts { receipt_vec: vec![vec![Some(Receipt::default())]] }; - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_receipts(receipts.clone(), B256::random()); @@ -1209,8 +1210,8 @@ mod tests { #[test] fn test_in_memory_state_chain_update() { - let state = CanonicalInMemoryState::empty(); - let mut test_block_builder = TestBlockBuilder::default(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block1 = test_block_builder.get_executed_block_with_number(0, B256::random()); let block2 = test_block_builder.get_executed_block_with_number(0, B256::random()); let chain = NewCanonicalChain::Commit { new: vec![block1.clone()] }; @@ -1234,8 +1235,8 @@ mod tests { #[test] fn test_in_memory_state_set_pending_block() { - let state = CanonicalInMemoryState::empty(); - let mut test_block_builder = TestBlockBuilder::default(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); // First random block let block1 = test_block_builder.get_executed_block_with_number(0, B256::random()); @@ -1286,7 +1287,7 @@ mod tests { #[test] fn test_canonical_in_memory_state_state_provider() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block1 = test_block_builder.get_executed_block_with_number(1, B256::random()); let block2 = test_block_builder.get_executed_block_with_number(2, block1.block().hash()); let block3 = test_block_builder.get_executed_block_with_number(3, block2.block().hash()); @@ -1333,14 +1334,15 @@ mod tests { #[test] fn test_canonical_in_memory_state_canonical_chain_empty() { - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); let chain: Vec<_> = state.canonical_chain().collect(); assert!(chain.is_empty()); } #[test] fn test_canonical_in_memory_state_canonical_chain_single_block() { - let block = TestBlockBuilder::default().get_executed_block_with_number(1, B256::random()); + let block = TestBlockBuilder::::default() + .get_executed_block_with_number(1, B256::random()); let hash = block.block().hash(); let mut blocks = HashMap::default(); blocks.insert(hash, Arc::new(BlockState::new(block))); @@ -1359,7 +1361,7 @@ mod tests { fn test_canonical_in_memory_state_canonical_chain_multiple_blocks() { let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); for i in 1..=3 { let block = block_builder.get_executed_block_with_number(i, parent_hash); @@ -1381,7 +1383,7 @@ mod tests { fn test_canonical_in_memory_state_canonical_chain_with_pending_block() { let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); for i in 1..=2 { let block = block_builder.get_executed_block_with_number(i, parent_hash); @@ -1401,7 +1403,7 @@ mod tests { #[test] fn test_block_state_parent_blocks() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 4); let parents = chain[3].parent_state_chain(); @@ -1422,7 +1424,7 @@ mod tests { #[test] fn test_block_state_single_block_state_chain() { let single_block_number = 1; - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let single_block = create_mock_state(&mut test_block_builder, single_block_number, B256::random()); let single_block_hash = single_block.block().block.hash(); @@ -1438,7 +1440,7 @@ mod tests { #[test] fn test_block_state_chain() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 3); let block_state_chain = chain[2].chain().collect::>(); @@ -1460,7 +1462,7 @@ mod tests { #[test] fn test_to_chain_notification() { // Generate 4 blocks - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block0 = test_block_builder.get_executed_block_with_number(0, B256::random()); let block1 = test_block_builder.get_executed_block_with_number(1, block0.block.hash()); let block1a = test_block_builder.get_executed_block_with_number(1, block0.block.hash()); diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs index bd9b43a59ea..519469d67f6 100644 --- a/crates/chain-state/src/lib.rs +++ b/crates/chain-state/src/lib.rs @@ -27,3 +27,6 @@ pub use memory_overlay::{MemoryOverlayStateProvider, MemoryOverlayStateProviderR #[cfg(any(test, feature = "test-utils"))] /// Common test helpers pub mod test_utils; + +// todo: remove when generic data prim integration complete +pub use reth_primitives::EthPrimitives; diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index ada0faee490..88cd411d38b 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -5,7 +5,7 @@ use alloy_primitives::{ Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, }; use reth_errors::ProviderResult; -use reth_primitives::{Account, Bytecode}; +use reth_primitives::{Account, Bytecode, NodePrimitives}; use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, @@ -18,11 +18,11 @@ use std::sync::OnceLock; /// A state provider that stores references to in-memory blocks along with their state as well as a /// reference of the historical state provider for fallback lookups. #[allow(missing_debug_implementations)] -pub struct MemoryOverlayStateProviderRef<'a> { +pub struct MemoryOverlayStateProviderRef<'a, N: NodePrimitives = reth_primitives::EthPrimitives> { /// Historical state provider for state lookups that are not found in in-memory blocks. pub(crate) historical: Box, /// The collection of executed parent blocks. Expected order is newest to oldest. - pub(crate) in_memory: Vec, + pub(crate) in_memory: Vec>, /// Lazy-loaded in-memory trie data. pub(crate) trie_state: OnceLock, } @@ -30,11 +30,11 @@ pub struct MemoryOverlayStateProviderRef<'a> { /// A state provider that stores references to in-memory blocks along with their state as well as /// the historical state provider for fallback lookups. #[allow(missing_debug_implementations)] -pub struct MemoryOverlayStateProvider { +pub struct MemoryOverlayStateProvider { /// Historical state provider for state lookups that are not found in in-memory blocks. pub(crate) historical: Box, /// The collection of executed parent blocks. Expected order is newest to oldest. - pub(crate) in_memory: Vec, + pub(crate) in_memory: Vec>, /// Lazy-loaded in-memory trie data. pub(crate) trie_state: OnceLock, } @@ -49,7 +49,7 @@ macro_rules! impl_state_provider { /// - `in_memory` - the collection of executed ancestor blocks in reverse. /// - `historical` - a historical state provider for the latest ancestor block stored in the /// database. - pub fn new(historical: $historical_type, in_memory: Vec) -> Self { + pub fn new(historical: $historical_type, in_memory: Vec>) -> Self { Self { historical, in_memory, trie_state: OnceLock::new() } } @@ -230,8 +230,8 @@ macro_rules! impl_state_provider { }; } -impl_state_provider!([], MemoryOverlayStateProvider, Box); -impl_state_provider!([<'a>], MemoryOverlayStateProviderRef<'a>, Box); +impl_state_provider!([], MemoryOverlayStateProvider, Box); +impl_state_provider!([<'a, N: NodePrimitives>], MemoryOverlayStateProviderRef<'a, N>, Box); /// The collection of data necessary for trie-related operations for [`MemoryOverlayStateProvider`]. #[derive(Clone, Default, Debug)] diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 582e1d2a05d..03d740d3d13 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -3,7 +3,7 @@ use auto_impl::auto_impl; use derive_more::{Deref, DerefMut}; use reth_execution_types::{BlockReceipts, Chain}; -use reth_primitives::{SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; use std::{ pin::Pin, sync::Arc, @@ -17,10 +17,12 @@ use tokio_stream::{ use tracing::debug; /// Type alias for a receiver that receives [`CanonStateNotification`] -pub type CanonStateNotifications = broadcast::Receiver; +pub type CanonStateNotifications = + broadcast::Receiver>; /// Type alias for a sender that sends [`CanonStateNotification`] -pub type CanonStateNotificationSender = broadcast::Sender; +pub type CanonStateNotificationSender = + broadcast::Sender>; /// A type that allows to register chain related event subscriptions. #[auto_impl(&, Arc)] @@ -41,13 +43,13 @@ pub trait CanonStateSubscriptions: Send + Sync { /// A Stream of [`CanonStateNotification`]. #[derive(Debug)] #[pin_project::pin_project] -pub struct CanonStateNotificationStream { +pub struct CanonStateNotificationStream { #[pin] - st: BroadcastStream, + st: BroadcastStream>, } -impl Stream for CanonStateNotificationStream { - type Item = CanonStateNotification; +impl Stream for CanonStateNotificationStream { + type Item = CanonStateNotification; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { @@ -68,11 +70,11 @@ impl Stream for CanonStateNotificationStream { /// The notification contains at least one [`Chain`] with the imported segment. If some blocks were /// reverted (e.g. during a reorg), the old chain is also returned. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum CanonStateNotification { +pub enum CanonStateNotification { /// The canonical chain was extended. Commit { /// The newly added chain segment. - new: Arc, + new: Arc>, }, /// A chain segment was reverted or reorged. /// @@ -82,18 +84,18 @@ pub enum CanonStateNotification { /// chain segment. Reorg { /// The chain segment that was reverted. - old: Arc, + old: Arc>, /// The chain segment that was added on top of the canonical chain, minus the reverted /// blocks. /// /// In the case of a revert, not a reorg, this chain segment is empty. - new: Arc, + new: Arc>, }, } -impl CanonStateNotification { +impl CanonStateNotification { /// Get the chain segment that was reverted, if any. - pub fn reverted(&self) -> Option> { + pub fn reverted(&self) -> Option>> { match self { Self::Commit { .. } => None, Self::Reorg { old, .. } => Some(old.clone()), @@ -101,7 +103,7 @@ impl CanonStateNotification { } /// Get the newly imported chain segment, if any. - pub fn committed(&self) -> Arc { + pub fn committed(&self) -> Arc> { match self { Self::Commit { new } | Self::Reorg { new, .. } => new.clone(), } @@ -122,7 +124,7 @@ impl CanonStateNotification { /// /// The boolean in the tuple (2nd element) denotes whether the receipt was from the reverted /// chain segment. - pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> { + pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> { let mut receipts = Vec::new(); // get old receipts @@ -194,13 +196,13 @@ impl Stream for ForkChoiceStream { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::B256; + use alloy_primitives::{b256, B256}; use reth_execution_types::ExecutionOutcome; use reth_primitives::{Receipt, Receipts, TransactionSigned, TxType}; #[test] fn test_commit_notification() { - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); @@ -212,7 +214,7 @@ mod tests { block2.set_block_number(2); block2.set_hash(block2_hash); - let chain = Arc::new(Chain::new( + let chain: Arc = Arc::new(Chain::new( vec![block1.clone(), block2.clone()], ExecutionOutcome::default(), None, @@ -233,7 +235,7 @@ mod tests { #[test] fn test_reorg_notification() { - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); let block3_hash = B256::new([0x03; 32]); @@ -250,7 +252,7 @@ mod tests { block3.set_block_number(3); block3.set_hash(block3_hash); - let old_chain = + let old_chain: Arc = Arc::new(Chain::new(vec![block1.clone()], ExecutionOutcome::default(), None)); let new_chain = Arc::new(Chain::new( vec![block2.clone(), block3.clone()], @@ -275,7 +277,7 @@ mod tests { #[test] fn test_block_receipts_commit() { // Create a default block instance for use in block definitions. - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); // Define unique hashes for two blocks to differentiate them in the chain. let block1_hash = B256::new([0x01; 32]); @@ -313,7 +315,7 @@ mod tests { let execution_outcome = ExecutionOutcome { receipts, ..Default::default() }; // Create a new chain segment with `block1` and `block2` and the execution outcome. - let new_chain = + let new_chain: Arc = Arc::new(Chain::new(vec![block1.clone(), block2.clone()], execution_outcome, None)); // Create a commit notification containing the new chain segment. @@ -330,7 +332,11 @@ mod tests { block_receipts[0].0, BlockReceipts { block: block1.num_hash(), - tx_receipts: vec![(B256::default(), receipt1)] + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + receipt1 + )] } ); @@ -341,7 +347,7 @@ mod tests { #[test] fn test_block_receipts_reorg() { // Define block1 for the old chain segment, which will be reverted. - let mut old_block1 = SealedBlockWithSenders::default(); + let mut old_block1: SealedBlockWithSenders = Default::default(); old_block1.set_block_number(1); old_block1.set_hash(B256::new([0x01; 32])); old_block1.block.body.transactions.push(TransactionSigned::default()); @@ -361,10 +367,11 @@ mod tests { ExecutionOutcome { receipts: old_receipts, ..Default::default() }; // Create an old chain segment to be reverted, containing `old_block1`. - let old_chain = Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None)); + let old_chain: Arc = + Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None)); // Define block2 for the new chain segment, which will be committed. - let mut new_block1 = SealedBlockWithSenders::default(); + let mut new_block1: SealedBlockWithSenders = Default::default(); new_block1.set_block_number(2); new_block1.set_hash(B256::new([0x02; 32])); new_block1.block.body.transactions.push(TransactionSigned::default()); @@ -400,7 +407,11 @@ mod tests { block_receipts[0].0, BlockReceipts { block: old_block1.num_hash(), - tx_receipts: vec![(B256::default(), old_receipt)] + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + old_receipt + )] } ); // Confirm this is from the reverted segment. @@ -412,7 +423,11 @@ mod tests { block_receipts[1].0, BlockReceipts { block: new_block1.num_hash(), - tx_receipts: vec![(B256::default(), new_receipt)] + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + new_receipt + )] } ); // Confirm this is from the committed segment. diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 60a90e43fee..af0c363fe48 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -1,3 +1,5 @@ +use core::marker::PhantomData; + use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, @@ -12,8 +14,8 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, - TransactionSigned, TransactionSignedEcRecovered, + BlockBody, NodePrimitives, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, + SealedHeader, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; @@ -27,7 +29,7 @@ use tokio::sync::broadcast::{self, Sender}; /// Functionality to build blocks for tests and help with assertions about /// their execution. #[derive(Debug)] -pub struct TestBlockBuilder { +pub struct TestBlockBuilder { /// The account that signs all the block's transactions. pub signer: Address, /// Private key for signing. @@ -40,9 +42,10 @@ pub struct TestBlockBuilder { pub signer_build_account_info: AccountInfo, /// Chain spec of the blocks generated by this builder pub chain_spec: ChainSpec, + _prims: PhantomData, } -impl Default for TestBlockBuilder { +impl Default for TestBlockBuilder { fn default() -> Self { let initial_account_info = AccountInfo::from_balance(U256::from(10).pow(U256::from(18))); let signer_pk = PrivateKeySigner::random(); @@ -53,6 +56,7 @@ impl Default for TestBlockBuilder { signer_pk, signer_execute_account_info: initial_account_info.clone(), signer_build_account_info: initial_account_info, + _prims: PhantomData, } } } @@ -98,8 +102,7 @@ impl TestBlockBuilder { let signature_hash = tx.signature_hash(); let signature = self.signer_pk.sign_hash_sync(&signature_hash).unwrap(); - TransactionSigned::from_transaction_and_signature(tx, signature) - .with_signer(self.signer) + TransactionSigned::new_unhashed(tx, signature).with_signer(self.signer) }; let num_txs = rng.gen_range(0..5); @@ -289,8 +292,8 @@ impl TestBlockBuilder { } /// A test `ChainEventSubscriptions` #[derive(Clone, Debug, Default)] -pub struct TestCanonStateSubscriptions { - canon_notif_tx: Arc>>>, +pub struct TestCanonStateSubscriptions { + canon_notif_tx: Arc>>>>, } impl TestCanonStateSubscriptions { diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index f0cc31bb44d..94b4285f92d 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -109,6 +109,6 @@ impl EthChainSpec for ChainSpec { } fn is_optimism(&self) -> bool { - self.chain.is_optimism() + false } } diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 7e27d9b4e2e..90acb82d71d 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -32,6 +32,7 @@ reth-fs-util.workspace = true reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } +reth-node-api.workspace = true reth-node-builder.workspace = true reth-node-core.workspace = true reth-node-events.workspace = true diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 0e4eb2723c3..251e01a105a 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -10,12 +10,16 @@ use reth_db::{init_db, open_db_read_only, DatabaseEnv}; use reth_db_common::init::init_genesis; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; +use reth_node_api::FullNodePrimitives; use reth_node_builder::{NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_core::{ args::{DatabaseArgs, DatadirArgs}, dirs::{ChainPath, DataDirPath}, }; -use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory}; +use reth_provider::{ + providers::{NodeTypesForProvider, StaticFileProvider}, + ProviderFactory, StaticFileProviderFactory, +}; use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; @@ -191,5 +195,13 @@ impl AccessRights { /// Helper trait with a common set of requirements for the /// [`NodeTypes`](reth_node_builder::NodeTypes) in CLI. -pub trait CliNodeTypes: NodeTypesWithEngine {} -impl CliNodeTypes for N where N: NodeTypesWithEngine {} +pub trait CliNodeTypes: + NodeTypesWithEngine + + NodeTypesForProvider> +{ +} +impl CliNodeTypes for N where + N: NodeTypesWithEngine + + NodeTypesForProvider> +{ +} diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index e9fc034519f..8f9a5f1d322 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -2,7 +2,9 @@ use alloy_consensus::Header; use alloy_primitives::{hex, BlockHash}; use clap::Parser; use reth_db::{ - static_file::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask, ReceiptMask, TransactionMask}, + static_file::{ + ColumnSelectorOne, ColumnSelectorTwo, HeaderWithHashMask, ReceiptMask, TransactionMask, + }, tables, RawKey, RawTable, Receipts, TableViewer, Transactions, }; use reth_db_api::table::{Decompress, DupSort, Table}; @@ -61,7 +63,7 @@ impl Command { Subcommand::StaticFile { segment, key, raw } => { let (key, mask): (u64, _) = match segment { StaticFileSegment::Headers => { - (table_key::(&key)?, >::MASK) + (table_key::(&key)?, >::MASK) } StaticFileSegment::Transactions => ( table_key::(&key)?, diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index 539211a22f7..c1f6408b49b 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -167,7 +167,7 @@ pub fn build_import_pipeline( executor: E, ) -> eyre::Result<(Pipeline, impl Stream)> where - N: ProviderNodeTypes, + N: ProviderNodeTypes + CliNodeTypes, C: Consensus + 'static, E: BlockExecutorProvider, { diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index c6e1f9a51dd..e3594a59363 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -3,12 +3,10 @@ use alloy_rlp::Decodable; use alloy_consensus::Header; use reth_node_builder::NodePrimitives; -use reth_primitives::{ - BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, -}; +use reth_primitives::{SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment}; use reth_provider::{ providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileProviderFactory, - StaticFileWriter, + StaticFileWriter, StorageLocation, }; use reth_stages::{StageCheckpoint, StageId}; @@ -33,7 +31,9 @@ pub fn setup_without_evm( total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: StaticFileProviderFactory + StageCheckpointWriter + BlockWriter, + Provider: StaticFileProviderFactory + + StageCheckpointWriter + + BlockWriter, { info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); @@ -64,11 +64,12 @@ fn append_first_block( total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: BlockWriter + StaticFileProviderFactory, + Provider: BlockWriter + StaticFileProviderFactory, { provider_rw.insert_block( - SealedBlockWithSenders::new(SealedBlock::new(header.clone(), BlockBody::default()), vec![]) + SealedBlockWithSenders::new(SealedBlock::new(header.clone(), Default::default()), vec![]) .expect("no senders or txes"), + StorageLocation::Database, )?; let sf_provider = provider_rw.static_file_provider(); diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index f3c3bbef965..c852eea05a7 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -11,6 +11,7 @@ use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::config::{HashingConfig, SenderRecoveryConfig, TransactionLookupConfig}; +use reth_db_api::database_metrics::DatabaseMetrics; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, @@ -132,10 +133,20 @@ impl> Command }, ChainSpecInfo { name: provider_factory.chain_spec().chain().to_string() }, ctx.task_executor, - Hooks::new( - provider_factory.db_ref().clone(), - provider_factory.static_file_provider(), - ), + Hooks::builder() + .with_hook({ + let db = provider_factory.db_ref().clone(); + move || db.report_metrics() + }) + .with_hook({ + let sfp = provider_factory.static_file_provider(); + move || { + if let Err(error) = sfp.report_metrics() { + error!(%error, "Failed to report metrics from static file provider"); + } + } + }) + .build(), ); MetricServer::new(config).serve().await?; diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index e71861a988d..2d29121d069 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -2,7 +2,7 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::B256; use clap::{Parser, Subcommand}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -17,6 +17,7 @@ use reth_node_core::args::NetworkArgs; use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, ChainSpecProvider, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, + StorageLocation, }; use reth_prune::PruneModes; use reth_stages::{ @@ -25,7 +26,7 @@ use reth_stages::{ ExecutionStageThresholds, Pipeline, StageSet, }; use reth_static_file::StaticFileProducer; -use std::{ops::RangeInclusive, sync::Arc}; +use std::sync::Arc; use tokio::sync::watch; use tracing::info; @@ -52,16 +53,13 @@ impl> Command pub async fn execute>(self) -> eyre::Result<()> { let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; - let range = self.command.unwind_range(provider_factory.clone())?; - if *range.start() == 0 { - eyre::bail!("Cannot unwind genesis block") - } + let target = self.command.unwind_target(provider_factory.clone())?; let highest_static_file_block = provider_factory .static_file_provider() .get_highest_static_files() .max() - .filter(|highest_static_file_block| highest_static_file_block >= range.start()); + .filter(|highest_static_file_block| *highest_static_file_block > target); // Execute a pipeline unwind if the start of the range overlaps the existing static // files. If that's the case, then copy all available data from MDBX to static files, and @@ -75,9 +73,9 @@ impl> Command } if let Some(highest_static_file_block) = highest_static_file_block { - info!(target: "reth::cli", ?range, ?highest_static_file_block, "Executing a pipeline unwind."); + info!(target: "reth::cli", ?target, ?highest_static_file_block, "Executing a pipeline unwind."); } else { - info!(target: "reth::cli", ?range, "Executing a pipeline unwind."); + info!(target: "reth::cli", ?target, "Executing a pipeline unwind."); } // This will build an offline-only pipeline if the `offline` flag is enabled @@ -86,34 +84,30 @@ impl> Command // Move all applicable data from database to static files. pipeline.move_to_static_files()?; - pipeline.unwind((*range.start()).saturating_sub(1), None)?; + pipeline.unwind(target, None)?; } else { - info!(target: "reth::cli", ?range, "Executing a database unwind."); + info!(target: "reth::cli", ?target, "Executing a database unwind."); let provider = provider_factory.provider_rw()?; - let _ = provider - .take_block_and_execution_range(range.clone()) + provider + .remove_block_and_execution_above(target, StorageLocation::Both) .map_err(|err| eyre::eyre!("Transaction error on unwind: {err}"))?; // update finalized block if needed let last_saved_finalized_block_number = provider.last_finalized_block_number()?; - let range_min = - range.clone().min().ok_or(eyre::eyre!("Could not fetch lower range end"))?; - if last_saved_finalized_block_number.is_none() || - Some(range_min) < last_saved_finalized_block_number - { - provider.save_finalized_block_number(BlockNumber::from(range_min))?; + if last_saved_finalized_block_number.is_none_or(|f| f > target) { + provider.save_finalized_block_number(target)?; } provider.commit()?; } - info!(target: "reth::cli", range=?range.clone(), count=range.count(), "Unwound blocks"); + info!(target: "reth::cli", ?target, "Unwound blocks"); Ok(()) } - fn build_pipeline>( + fn build_pipeline + CliNodeTypes>( self, config: Config, provider_factory: ProviderFactory, @@ -183,13 +177,11 @@ enum Subcommands { } impl Subcommands { - /// Returns the block range to unwind. - /// - /// This returns an inclusive range: [target..=latest] - fn unwind_range>>( + /// Returns the block to unwind to. The returned block will stay in database. + fn unwind_target>>( &self, factory: ProviderFactory, - ) -> eyre::Result> { + ) -> eyre::Result { let provider = factory.provider()?; let last = provider.last_block_number()?; let target = match self { @@ -200,11 +192,11 @@ impl Subcommands { BlockHashOrNumber::Number(num) => *num, }, Self::NumBlocks { amount } => last.saturating_sub(*amount), - } + 1; + }; if target > last { eyre::bail!("Target block number is higher than the latest block number") } - Ok(target..=last) + Ok(target) } } diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index c498718e9fc..5490f568d3a 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -22,8 +22,7 @@ use reth_db::{ }; use reth_fs_util as fs; use reth_primitives::{ - Account, Log, LogData, Receipt, ReceiptWithBloom, StorageEntry, Transaction, - TransactionSignedNoHash, TxType, + Account, Log, LogData, Receipt, StorageEntry, Transaction, TransactionSignedNoHash, TxType, }; use reth_prune_types::{PruneCheckpoint, PruneMode}; use reth_stages_types::{ @@ -76,7 +75,6 @@ compact_types!( // reth-primitives Account, Receipt, - ReceiptWithBloom, // reth_codecs::alloy Authorization, GenesisAccount, diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 245ebe8541e..65994557c06 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -14,6 +14,8 @@ workspace = true # reth reth-ethereum-consensus.workspace = true reth-blockchain-tree-api.workspace = true +reth-codecs.workspace = true +reth-db-api.workspace = true reth-primitives.workspace = true reth-stages-api.workspace = true reth-errors.workspace = true @@ -80,6 +82,7 @@ assert_matches.workspace = true [features] optimism = [ "reth-blockchain-tree/optimism", + "reth-codecs/optimism", "reth-chainspec", "reth-db-api/optimism", "reth-db/optimism", diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 99854209cb3..7cd286f659c 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -6,8 +6,10 @@ use crate::{ }; use alloy_primitives::BlockNumber; use futures::FutureExt; +use reth_codecs::Compact; +use reth_db_api::table::Value; use reth_errors::RethResult; -use reth_primitives::static_file::HighestStaticFiles; +use reth_primitives::{static_file::HighestStaticFiles, NodePrimitives}; use reth_provider::{ BlockReader, ChainStateBlockReader, DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, @@ -33,8 +35,9 @@ impl StaticFileHook where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StaticFileProviderFactory - + StageCheckpointReader + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + BlockReader + ChainStateBlockReader, > + 'static, @@ -148,8 +151,9 @@ impl EngineHook for StaticFileHook where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StaticFileProviderFactory - + StageCheckpointReader + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + BlockReader + ChainStateBlockReader, > + 'static, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 0b93ae0f29a..0fedbdd452d 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1991,7 +1991,8 @@ mod tests { use alloy_rpc_types_engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; use assert_matches::assert_matches; use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_provider::{BlockWriter, ProviderFactory}; + use reth_node_types::FullNodePrimitives; + use reth_provider::{BlockWriter, ProviderFactory, StorageLocation}; use reth_rpc_types_compat::engine::payload::block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; use reth_stages_api::StageCheckpoint; @@ -2169,7 +2170,10 @@ mod tests { assert_matches!(rx.await, Ok(Ok(()))); } - fn insert_blocks<'a, N: ProviderNodeTypes>( + fn insert_blocks< + 'a, + N: ProviderNodeTypes>, + >( provider_factory: ProviderFactory, mut blocks: impl Iterator, ) { @@ -2179,6 +2183,7 @@ mod tests { provider .insert_block( b.clone().try_seal_with_senders().expect("invalid tx signature in block"), + StorageLocation::Database, ) .map(drop) }) diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index b6e75f802e3..b140846981e 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -10,7 +10,7 @@ use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, EthBlockClient, }; -use reth_primitives::SealedBlock; +use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlock}; use reth_provider::providers::ProviderNodeTypes; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; use reth_tasks::TaskSpawner; @@ -361,9 +361,9 @@ impl Ord for OrderedSealedBlock { /// The event type emitted by the [`EngineSyncController`]. #[derive(Debug)] -pub(crate) enum EngineSyncEvent { +pub(crate) enum EngineSyncEvent { /// A full block has been downloaded from the network. - FetchedFullBlock(SealedBlock), + FetchedFullBlock(SealedBlock), /// Pipeline started syncing /// /// This is none if the pipeline is triggered without a specific target. diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 64daba2b453..0ebef1efe6e 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -370,7 +370,7 @@ where .with_tip_sender(tip_tx), TestPipelineConfig::Real => { let header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(client.clone(), consensus.clone()) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task(); let body_downloader = BodiesDownloaderBuilder::default() diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 62357b4b9b1..6042f16bf50 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -450,7 +450,7 @@ mod tests { let signature = Signature::new(U256::default(), U256::default(), true); - TransactionSigned::from_transaction_and_signature(request, signature) + TransactionSigned::new_unhashed(request, signature) } /// got test block diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index ec296f3ed49..3ad53456cbd 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -11,7 +11,7 @@ extern crate alloc; -use alloc::{fmt::Debug, vec::Vec}; +use alloc::{fmt::Debug, sync::Arc, vec::Vec}; use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; @@ -30,23 +30,60 @@ pub mod test_utils; /// Post execution input passed to [`Consensus::validate_block_post_execution`]. #[derive(Debug)] -pub struct PostExecutionInput<'a> { +pub struct PostExecutionInput<'a, R = Receipt> { /// Receipts of the block. - pub receipts: &'a [Receipt], + pub receipts: &'a [R], /// EIP-7685 requests of the block. pub requests: &'a Requests, } -impl<'a> PostExecutionInput<'a> { +impl<'a, R> PostExecutionInput<'a, R> { /// Creates a new instance of `PostExecutionInput`. - pub const fn new(receipts: &'a [Receipt], requests: &'a Requests) -> Self { + pub const fn new(receipts: &'a [R], requests: &'a Requests) -> Self { Self { receipts, requests } } } /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] -pub trait Consensus: Debug + Send + Sync { +pub trait Consensus: + AsHeaderValidator + HeaderValidator + Debug + Send + Sync +{ + /// Ensures that body field values match the header. + fn validate_body_against_header( + &self, + body: &B, + header: &SealedHeader, + ) -> Result<(), ConsensusError>; + + /// Validate a block disregarding world state, i.e. things that can be checked before sender + /// recovery and execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and + /// 11.1 "Ommer Validation". + /// + /// **This should not be called for the genesis block**. + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_pre_execution(&self, block: &SealedBlock) + -> Result<(), ConsensusError>; + + /// Validate a block considering world state, i.e. things that can not be checked before + /// execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity". + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError>; +} + +/// HeaderValidator is a protocol that validates headers and their relationships. +#[auto_impl::auto_impl(&, Arc)] +pub trait HeaderValidator: Debug + Send + Sync { /// Validate if header is correct and follows consensus specification. /// /// This is called on standalone header to check if all hashes are correct. @@ -60,7 +97,8 @@ pub trait Consensus: Debug + Send + Sync { /// /// **This should not be called for the genesis block**. /// - /// Note: Validating header against its parent does not include other Consensus validations. + /// Note: Validating header against its parent does not include other HeaderValidator + /// validations. fn validate_header_against_parent( &self, header: &SealedHeader, @@ -99,43 +137,29 @@ pub trait Consensus: Debug + Send + Sync { /// /// Some consensus engines may want to do additional checks here. /// - /// Note: validating headers with TD does not include other Consensus validation. + /// Note: validating headers with TD does not include other HeaderValidator validation. fn validate_header_with_total_difficulty( &self, header: &H, total_difficulty: U256, ) -> Result<(), ConsensusError>; +} - /// Ensures that body field values match the header. - fn validate_body_against_header( - &self, - body: &B, - header: &SealedHeader, - ) -> Result<(), ConsensusError>; - - /// Validate a block disregarding world state, i.e. things that can be checked before sender - /// recovery and execution. - /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and - /// 11.1 "Ommer Validation". - /// - /// **This should not be called for the genesis block**. - /// - /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_pre_execution(&self, block: &SealedBlock) - -> Result<(), ConsensusError>; +/// Helper trait to cast `Arc` to `Arc` +pub trait AsHeaderValidator: HeaderValidator { + /// Converts the [`Arc`] of self to [`Arc`] of [`HeaderValidator`] + fn as_header_validator<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a; +} - /// Validate a block considering world state, i.e. things that can not be checked before - /// execution. - /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity". - /// - /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError>; +impl, H> AsHeaderValidator for T { + fn as_header_validator<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a, + { + self + } } /// Consensus Errors diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 9b72f89b176..6d12af08d51 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,4 +1,4 @@ -use crate::{Consensus, ConsensusError, PostExecutionInput}; +use crate::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; @@ -7,7 +7,7 @@ use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; #[non_exhaustive] pub struct NoopConsensus; -impl Consensus for NoopConsensus { +impl HeaderValidator for NoopConsensus { fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { Ok(()) } @@ -27,7 +27,9 @@ impl Consensus for NoopConsensus { ) -> Result<(), ConsensusError> { Ok(()) } +} +impl Consensus for NoopConsensus { fn validate_body_against_header( &self, _body: &B, diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index 52926ec323e..ba683dd255f 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,4 +1,4 @@ -use crate::{Consensus, ConsensusError, PostExecutionInput}; +use crate::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; use core::sync::atomic::{AtomicBool, Ordering}; use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; @@ -47,18 +47,21 @@ impl TestConsensus { } impl Consensus for TestConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { - if self.fail_validation() { + fn validate_body_against_header( + &self, + _body: &B, + _header: &SealedHeader, + ) -> Result<(), ConsensusError> { + if self.fail_body_against_header() { Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } - fn validate_header_against_parent( + fn validate_block_pre_execution( &self, - _header: &SealedHeader, - _parent: &SealedHeader, + _block: &SealedBlock, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -67,10 +70,10 @@ impl Consensus for TestConsensus { } } - fn validate_header_with_total_difficulty( + fn validate_block_post_execution( &self, - _header: &H, - _total_difficulty: U256, + _block: &BlockWithSenders, + _input: PostExecutionInput<'_>, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -78,22 +81,21 @@ impl Consensus for TestConsensus { Ok(()) } } +} - fn validate_body_against_header( - &self, - _body: &B, - _header: &SealedHeader, - ) -> Result<(), ConsensusError> { - if self.fail_body_against_header() { +impl HeaderValidator for TestConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { + if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } - fn validate_block_pre_execution( + fn validate_header_against_parent( &self, - _block: &SealedBlock, + _header: &SealedHeader, + _parent: &SealedHeader, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -102,10 +104,10 @@ impl Consensus for TestConsensus { } } - fn validate_block_post_execution( + fn validate_header_with_total_difficulty( &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, + _header: &H, + _total_difficulty: U256, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index c4c74ebcdf1..9c40e2ba99d 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -19,7 +19,9 @@ reth-rpc-layer.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true +reth-primitives.workspace = true reth-provider.workspace = true +reth-node-api.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-stages-types.workspace = true diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 1e9b39058e6..73a7e39f1a4 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -5,12 +5,12 @@ use std::sync::Arc; use node::NodeTestContext; use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, - builder::{NodeBuilder, NodeConfig, NodeHandle}, + builder::{FullNodePrimitives, NodeBuilder, NodeConfig, NodeHandle}, network::PeersHandleProvider, rpc::server_types::RpcModuleSelection, tasks::TaskManager, }; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::EthChainSpec; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_node_builder::{ @@ -18,7 +18,7 @@ use reth_node_builder::{ FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodeTypesWithDBAdapter, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, }; -use reth_provider::providers::{BlockchainProvider, BlockchainProvider2}; +use reth_provider::providers::{BlockchainProvider, BlockchainProvider2, NodeTypesForProvider}; use tracing::{span, Level}; use wallet::Wallet; @@ -53,12 +53,13 @@ pub async fn setup( attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + Node> + NodeTypesWithEngine, + N: Default + Node> + NodeTypesForProvider + NodeTypesWithEngine, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, >, N::AddOns: RethRpcAddOns>, + N::Primitives: FullNodePrimitives, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -120,7 +121,8 @@ pub async fn setup_engine( where N: Default + Node>>> - + NodeTypesWithEngine, + + NodeTypesWithEngine + + NodeTypesForProvider, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, Components: NodeComponents< @@ -132,6 +134,7 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, + N::Primitives: FullNodePrimitives, { let tasks = TaskManager::current(); let exec = tasks.executor(); diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 4e4826be31d..3575bc133c6 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -27,7 +27,7 @@ use reth_engine_tree::{ EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineRequestHandler, FromEngine, RequestHandlerEvent, }, - persistence::PersistenceHandle, + persistence::{PersistenceHandle, PersistenceNodeTypes}, tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; use reth_evm::execute::BlockExecutorProvider; @@ -59,7 +59,7 @@ where impl LocalEngineService where - N: EngineNodeTypes, + N: EngineNodeTypes + PersistenceNodeTypes, { /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index cec9d981f1b..49233439e0a 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -8,7 +8,7 @@ use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, engine::{EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineHandler}, - persistence::PersistenceHandle, + persistence::{PersistenceHandle, PersistenceNodeTypes}, tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; pub use reth_engine_tree::{ @@ -59,7 +59,7 @@ where impl EngineService where - N: EngineNodeTypes, + N: EngineNodeTypes + PersistenceNodeTypes, Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 278457145e7..70be84a9f79 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -27,6 +27,7 @@ reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-revm.workspace = true @@ -45,9 +46,7 @@ revm-primitives.workspace = true # common futures.workspace = true -pin-project.workspace = true tokio = { workspace = true, features = ["macros", "sync"] } -tokio-stream.workspace = true thiserror.workspace = true # metrics @@ -82,6 +81,12 @@ reth-chainspec.workspace = true alloy-rlp.workspace = true assert_matches.workspace = true +criterion.workspace = true +crossbeam-channel = "0.5.13" + +[[bench]] +name = "channel_perf" +harness = false [features] test-utils = [ @@ -103,4 +108,5 @@ test-utils = [ "reth-provider/test-utils", "reth-trie/test-utils", "reth-prune-types?/test-utils", + "reth-primitives-traits/test-utils", ] diff --git a/crates/engine/tree/benches/channel_perf.rs b/crates/engine/tree/benches/channel_perf.rs new file mode 100644 index 00000000000..c1c65e0a68e --- /dev/null +++ b/crates/engine/tree/benches/channel_perf.rs @@ -0,0 +1,132 @@ +//! Benchmark comparing `std::sync::mpsc` and `crossbeam` channels for `StateRootTask`. + +#![allow(missing_docs)] + +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use revm_primitives::{ + Account, AccountInfo, AccountStatus, Address, EvmState, EvmStorage, EvmStorageSlot, HashMap, + B256, U256, +}; +use std::thread; + +/// Creates a mock state with the specified number of accounts for benchmarking +fn create_bench_state(num_accounts: usize) -> EvmState { + let mut state_changes = HashMap::default(); + + for i in 0..num_accounts { + let storage = + EvmStorage::from_iter([(U256::from(i), EvmStorageSlot::new(U256::from(i + 1)))]); + + let account = Account { + info: AccountInfo { + balance: U256::from(100), + nonce: 10, + code_hash: B256::random(), + code: Default::default(), + }, + storage, + status: AccountStatus::Loaded, + }; + + let address = Address::random(); + state_changes.insert(address, account); + } + + state_changes +} + +/// Simulated `StateRootTask` with `std::sync::mpsc` +struct StdStateRootTask { + rx: std::sync::mpsc::Receiver, +} + +impl StdStateRootTask { + const fn new(rx: std::sync::mpsc::Receiver) -> Self { + Self { rx } + } + + fn run(self) { + while let Ok(state) = self.rx.recv() { + criterion::black_box(state); + } + } +} + +/// Simulated `StateRootTask` with `crossbeam-channel` +struct CrossbeamStateRootTask { + rx: crossbeam_channel::Receiver, +} + +impl CrossbeamStateRootTask { + const fn new(rx: crossbeam_channel::Receiver) -> Self { + Self { rx } + } + + fn run(self) { + while let Ok(state) = self.rx.recv() { + criterion::black_box(state); + } + } +} + +/// Benchmarks the performance of different channel implementations for state streaming +fn bench_state_stream(c: &mut Criterion) { + let mut group = c.benchmark_group("state_stream_channels"); + group.sample_size(10); + + for size in &[1, 10, 100] { + let bench_setup = || { + let states: Vec<_> = (0..100).map(|_| create_bench_state(*size)).collect(); + states + }; + + group.bench_with_input(BenchmarkId::new("std_channel", size), size, |b, _| { + b.iter_batched( + bench_setup, + |states| { + let (tx, rx) = std::sync::mpsc::channel(); + let task = StdStateRootTask::new(rx); + + let processor = thread::spawn(move || { + task.run(); + }); + + for state in states { + tx.send(state).unwrap(); + } + drop(tx); + + processor.join().unwrap(); + }, + BatchSize::LargeInput, + ); + }); + + group.bench_with_input(BenchmarkId::new("crossbeam_channel", size), size, |b, _| { + b.iter_batched( + bench_setup, + |states| { + let (tx, rx) = crossbeam_channel::unbounded(); + let task = CrossbeamStateRootTask::new(rx); + + let processor = thread::spawn(move || { + task.run(); + }); + + for state in states { + tx.send(state).unwrap(); + } + drop(tx); + + processor.join().unwrap(); + }, + BatchSize::LargeInput, + ); + }); + } + + group.finish(); +} + +criterion_group!(benches, bench_state_stream); +criterion_main!(benches); diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index e0c9e0362d0..86d18ceb48c 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -2,6 +2,8 @@ use crate::metrics::PersistenceMetrics; use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; +use reth_primitives::BlockBody; +use reth_primitives_traits::FullNodePrimitives; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StaticFileProviderFactory, @@ -16,6 +18,16 @@ use thiserror::Error; use tokio::sync::oneshot; use tracing::{debug, error}; +/// A helper trait with requirements for [`ProviderNodeTypes`] to be used within +/// [`PersistenceService`]. +pub trait PersistenceNodeTypes: + ProviderNodeTypes> +{ +} +impl PersistenceNodeTypes for T where + T: ProviderNodeTypes> +{ +} /// Writes parts of reth's in memory tree state to the database and static files. /// /// This is meant to be a spawned service that listens for various incoming persistence operations, @@ -60,7 +72,7 @@ impl PersistenceService { } } -impl PersistenceService { +impl PersistenceService { /// This is the main loop, that will listen to database events and perform the requested /// database actions pub fn run(mut self) -> Result<(), PersistenceError> { @@ -198,7 +210,7 @@ impl PersistenceHandle { } /// Create a new [`PersistenceHandle`], and spawn the persistence service. - pub fn spawn_service( + pub fn spawn_service( provider_factory: ProviderFactory, pruner: PrunerWithFactory>, sync_metrics_tx: MetricEventsSender, diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index fbf6c348138..45cf5a78031 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,18 +1,13 @@ //! State root task related functionality. -use futures::Stream; -use pin_project::pin_project; use reth_provider::providers::ConsistentDbView; use reth_trie::{updates::TrieUpdates, TrieInput}; use reth_trie_parallel::root::ParallelStateRootError; use revm_primitives::{EvmState, B256}; -use std::{ - future::Future, - pin::Pin, - sync::{mpsc, Arc}, - task::{Context, Poll}, +use std::sync::{ + mpsc::{self, Receiver, RecvError}, + Arc, }; -use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::debug; /// Result of the state root calculation @@ -28,12 +23,43 @@ pub(crate) struct StateRootHandle { #[allow(dead_code)] impl StateRootHandle { + /// Creates a new handle from a receiver. + pub(crate) const fn new(rx: mpsc::Receiver) -> Self { + Self { rx } + } + /// Waits for the state root calculation to complete. pub(crate) fn wait_for_result(self) -> StateRootResult { self.rx.recv().expect("state root task was dropped without sending result") } } +/// Common configuration for state root tasks +#[derive(Debug)] +pub(crate) struct StateRootConfig { + /// View over the state in the database. + pub consistent_view: ConsistentDbView, + /// Latest trie input. + pub input: Arc, +} + +/// Wrapper for std channel receiver to maintain compatibility with `UnboundedReceiverStream` +#[allow(dead_code)] +pub(crate) struct StdReceiverStream { + rx: Receiver, +} + +#[allow(dead_code)] +impl StdReceiverStream { + pub(crate) const fn new(rx: Receiver) -> Self { + Self { rx } + } + + pub(crate) fn recv(&self) -> Result { + self.rx.recv() + } +} + /// Standalone task that receives a transaction state stream and updates relevant /// data structures to calculate state root. /// @@ -42,15 +68,12 @@ impl StateRootHandle { /// fetches the proofs for relevant accounts from the database and reveal them /// to the tree. /// Then it updates relevant leaves according to the result of the transaction. -#[pin_project] +#[allow(dead_code)] pub(crate) struct StateRootTask { - /// View over the state in the database. - consistent_view: ConsistentDbView, /// Incoming state updates. - #[pin] - state_stream: UnboundedReceiverStream, - /// Latest trie input. - input: Arc, + state_stream: StdReceiverStream, + /// Task configuration. + config: StateRootConfig, } #[allow(dead_code)] @@ -60,65 +83,109 @@ where { /// Creates a new `StateRootTask`. pub(crate) const fn new( - consistent_view: ConsistentDbView, - input: Arc, - state_stream: UnboundedReceiverStream, + config: StateRootConfig, + state_stream: StdReceiverStream, ) -> Self { - Self { consistent_view, state_stream, input } + Self { config, state_stream } } /// Spawns the state root task and returns a handle to await its result. pub(crate) fn spawn(self) -> StateRootHandle { - let (tx, rx) = mpsc::channel(); - - // Spawn the task that will process state updates and calculate the root - tokio::spawn(async move { - debug!(target: "engine::tree", "Starting state root task"); - let result = self.await; - let _ = tx.send(result); - }); + let (tx, rx) = mpsc::sync_channel(1); + std::thread::Builder::new() + .name("State Root Task".to_string()) + .spawn(move || { + debug!(target: "engine::tree", "Starting state root task"); + let result = self.run(); + let _ = tx.send(result); + }) + .expect("failed to spawn state root thread"); - StateRootHandle { rx } + StateRootHandle::new(rx) } /// Handles state updates. fn on_state_update( - _view: &ConsistentDbView, - _input: &Arc, + _view: &reth_provider::providers::ConsistentDbView, + _input: &std::sync::Arc, _state: EvmState, ) { + // Default implementation of state update handling // TODO: calculate hashed state update and dispatch proof gathering for it. } } -impl Future for StateRootTask +#[allow(dead_code)] +impl StateRootTask where Factory: Send + 'static, { - type Output = StateRootResult; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - - // Process all items until the stream is closed - loop { - match this.state_stream.as_mut().poll_next(cx) { - Poll::Ready(Some(state)) => { - Self::on_state_update(this.consistent_view, this.input, state); - } - Poll::Ready(None) => { - // stream closed, return final result - return Poll::Ready(Ok((B256::default(), TrieUpdates::default()))); - } - Poll::Pending => { - return Poll::Pending; - } - } + fn run(self) -> StateRootResult { + while let Ok(state) = self.state_stream.recv() { + Self::on_state_update(&self.config.consistent_view, &self.config.input, state); } // TODO: // * keep track of proof calculation // * keep track of intermediate root computation // * return final state root result + Ok((B256::default(), TrieUpdates::default())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_provider::{providers::ConsistentDbView, test_utils::MockEthProvider}; + use reth_trie::TrieInput; + use revm_primitives::{ + Account, AccountInfo, AccountStatus, Address, EvmState, EvmStorage, EvmStorageSlot, + HashMap, B256, U256, + }; + use std::sync::Arc; + + fn create_mock_config() -> StateRootConfig { + let factory = MockEthProvider::default(); + let view = ConsistentDbView::new(factory, None); + let input = Arc::new(TrieInput::default()); + StateRootConfig { consistent_view: view, input } + } + + fn create_mock_state() -> revm_primitives::EvmState { + let mut state_changes: EvmState = HashMap::default(); + let storage = EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); + let account = Account { + info: AccountInfo { + balance: U256::from(100), + nonce: 10, + code_hash: B256::random(), + code: Default::default(), + }, + storage, + status: AccountStatus::Loaded, + }; + + let address = Address::random(); + state_changes.insert(address, account); + + state_changes + } + + #[test] + fn test_state_root_task() { + let config = create_mock_config(); + let (tx, rx) = std::sync::mpsc::channel(); + let stream = StdReceiverStream::new(rx); + + let task = StateRootTask::new(config, stream); + let handle = task.spawn(); + + for _ in 0..10 { + tx.send(create_mock_state()).expect("failed to send state"); + } + drop(tx); + + let result = handle.wait_for_result(); + assert!(result.is_ok(), "sync block execution failed"); } } diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index ec69bbd0024..fd80fa9e165 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -339,7 +339,7 @@ where // Treat error as fatal Err(error) => { return Err(RethError::Execution(BlockExecutionError::Validation( - BlockValidationError::EVM { hash: tx.hash, error: Box::new(error) }, + BlockValidationError::EVM { hash: tx.hash(), error: Box::new(error) }, ))) } }; diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 7198a703672..ffabe5b1952 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -11,7 +11,7 @@ use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; +use reth_consensus::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use reth_consensus_common::validation::{ validate_4844_header_standalone, validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, @@ -92,6 +92,30 @@ impl EthBeaconConsensus impl Consensus for EthBeaconConsensus +{ + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header) + } + + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + validate_block_pre_execution(block, &self.chain_spec) + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) + } +} + +impl HeaderValidator + for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header)?; @@ -210,26 +234,6 @@ impl Consensu Ok(()) } - - fn validate_body_against_header( - &self, - body: &BlockBody, - header: &SealedHeader, - ) -> Result<(), ConsensusError> { - validate_body_against_header(body, header) - } - - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - validate_block_pre_execution(block, &self.chain_spec) - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) - } } #[cfg(test)] diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index b5e5aecf730..6aa7b2ccca6 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -18,7 +18,7 @@ use reth_evm::{ }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, + ConfigureEvm, TxEnvOverrides, }; use reth_primitives::{BlockWithSenders, Receipt}; use reth_revm::db::State; @@ -83,6 +83,8 @@ where chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Optional overrides for the transactions environment. + tx_env_overrides: Option>, /// Current state for block execution. state: State, /// Utility to call system smart contracts. @@ -96,7 +98,7 @@ where /// Creates a new [`EthExecutionStrategy`] pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); - Self { state, chain_spec, evm_config, system_caller } + Self { state, chain_spec, evm_config, system_caller, tx_env_overrides: None } } } @@ -130,6 +132,10 @@ where { type Error = BlockExecutionError; + fn init(&mut self, tx_env_overrides: Box) { + self.tx_env_overrides = Some(tx_env_overrides); + } + fn apply_pre_execution_changes( &mut self, block: &BlockWithSenders, @@ -172,6 +178,10 @@ where self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); + if let Some(tx_env_overrides) = &mut self.tx_env_overrides { + tx_env_overrides.apply(evm.tx_mut()); + } + // Execute transaction. let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index c8ed58df03b..206230cd00e 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -55,7 +55,7 @@ impl EthEvmConfig { } /// Returns the chain spec associated with this configuration. - pub fn chain_spec(&self) -> &ChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } } diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 5265329f19a..a2ae2374b96 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -13,8 +13,7 @@ use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_network::{NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodePrimitives, - NodeTypesWithDB, + AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodeTypesWithDB, }; use reth_node_builder::{ components::{ @@ -26,8 +25,8 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Receipt, TransactionSigned, TxType}; -use reth_provider::CanonStateSubscriptions; +use reth_primitives::EthPrimitives; +use reth_provider::{CanonStateSubscriptions, EthStorage}; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -38,17 +37,6 @@ use reth_trie_db::MerklePatriciaTrie; use crate::{EthEngineTypes, EthEvmConfig}; -/// Ethereum primitive types. -#[derive(Debug, Default, Clone)] -pub struct EthPrimitives; - -impl NodePrimitives for EthPrimitives { - type Block = Block; - type SignedTx = TransactionSigned; - type TxType = TxType; - type Receipt = Receipt; -} - /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -86,6 +74,7 @@ impl NodeTypes for EthereumNode { type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; } impl NodeTypesWithEngine for EthereumNode { @@ -106,7 +95,13 @@ pub type EthereumAddOns = RpcAddOns< impl Node for EthereumNode where - Types: NodeTypesWithDB + NodeTypesWithEngine, + Types: NodeTypesWithDB + + NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 4ec1e212c8d..b2f78da6de9 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -14,7 +14,7 @@ use alloy_eips::{eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::Requests, merge::BEAC use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, - PayloadConfig, WithdrawalsOutcome, + PayloadConfig, }; use reth_chain_state::ExecutedBlock; use reth_chainspec::ChainSpec; @@ -356,8 +356,8 @@ where None }; - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals)?; + let withdrawals_root = + commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, &attributes.withdrawals)?; // merge all transitions into bundle state, this would apply the withdrawal balance changes // and 4788 contract call @@ -399,7 +399,7 @@ where // grab the blob sidecars from the executed txs blob_sidecars = pool .get_all_blobs_exact( - executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), + executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash()).collect(), ) .map_err(PayloadBuilderError::other)?; @@ -440,6 +440,10 @@ where requests_hash, }; + let withdrawals = chain_spec + .is_shanghai_active_at_timestamp(attributes.timestamp) + .then(|| attributes.withdrawals.clone()); + // seal the block let block = Block { header, @@ -447,7 +451,7 @@ where }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", ?sealed_block, "sealed built block"); + debug!(target: "payload_builder", id=%attributes.id, sealed_block_header = ?sealed_block.header, "sealed built block"); // create the executed block data let executed = ExecutedBlock { diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 13b0aef8ad4..4d2d8214ff9 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -40,7 +40,8 @@ serde = [ "revm/serde", "alloy-eips/serde", "alloy-primitives/serde", - "rand/serde" + "rand/serde", + "reth-primitives-traits/serde", ] serde-bincode-compat = [ "reth-primitives/serde-bincode-compat", diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index dc633e2d7ab..200a37423cf 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -7,9 +7,10 @@ use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; use core::{fmt, ops::RangeInclusive}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{ - Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionSigned, + SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, }; +use reth_primitives_traits::NodePrimitives; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; @@ -25,7 +26,7 @@ use revm::db::BundleState; /// A chain of blocks should not be empty. #[derive(Clone, Debug, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct Chain { +pub struct Chain { /// All blocks in this chain. blocks: BTreeMap, /// The outcome of block execution for this chain. @@ -34,14 +35,14 @@ pub struct Chain { /// chain, ranging from the [`Chain::first`] block to the [`Chain::tip`] block, inclusive. /// /// Additionally, it includes the individual state changes that led to the current state. - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, /// State trie updates after block is added to the chain. /// NOTE: Currently, trie updates are present only for /// single-block chains that extend the canonical chain. trie_updates: Option, } -impl Chain { +impl Chain { /// Create new Chain from blocks and state. /// /// # Warning @@ -49,7 +50,7 @@ impl Chain { /// A chain of blocks should not be empty. pub fn new( blocks: impl IntoIterator, - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { let blocks = blocks.into_iter().map(|b| (b.number, b)).collect::>(); @@ -61,7 +62,7 @@ impl Chain { /// Create new Chain from a single block and its state. pub fn from_block( block: SealedBlockWithSenders, - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { Self::new([block], execution_outcome, trie_updates) @@ -93,12 +94,12 @@ impl Chain { } /// Get execution outcome of this chain - pub const fn execution_outcome(&self) -> &ExecutionOutcome { + pub const fn execution_outcome(&self) -> &ExecutionOutcome { &self.execution_outcome } /// Get mutable execution outcome of this chain - pub fn execution_outcome_mut(&mut self) -> &mut ExecutionOutcome { + pub fn execution_outcome_mut(&mut self) -> &mut ExecutionOutcome { &mut self.execution_outcome } @@ -132,7 +133,7 @@ impl Chain { pub fn execution_outcome_at_block( &self, block_number: BlockNumber, - ) -> Option { + ) -> Option> { if self.tip().number == block_number { return Some(self.execution_outcome.clone()) } @@ -149,19 +150,21 @@ impl Chain { /// 1. The blocks contained in the chain. /// 2. The execution outcome representing the final state. /// 3. The optional trie updates. - pub fn into_inner(self) -> (ChainBlocks<'static>, ExecutionOutcome, Option) { + pub fn into_inner( + self, + ) -> (ChainBlocks<'static>, ExecutionOutcome, Option) { (ChainBlocks { blocks: Cow::Owned(self.blocks) }, self.execution_outcome, self.trie_updates) } /// Destructure the chain into its inner components: /// 1. A reference to the blocks contained in the chain. /// 2. A reference to the execution outcome representing the final state. - pub const fn inner(&self) -> (ChainBlocks<'_>, &ExecutionOutcome) { + pub const fn inner(&self) -> (ChainBlocks<'_>, &ExecutionOutcome) { (ChainBlocks { blocks: Cow::Borrowed(&self.blocks) }, &self.execution_outcome) } /// Returns an iterator over all the receipts of the blocks in the chain. - pub fn block_receipts_iter(&self) -> impl Iterator>> + '_ { + pub fn block_receipts_iter(&self) -> impl Iterator>> + '_ { self.execution_outcome.receipts().iter() } @@ -173,7 +176,7 @@ impl Chain { /// Returns an iterator over all blocks and their receipts in the chain. pub fn blocks_and_receipts( &self, - ) -> impl Iterator>)> + '_ { + ) -> impl Iterator>)> + '_ { self.blocks_iter().zip(self.block_receipts_iter()) } @@ -219,7 +222,7 @@ impl Chain { } /// Get all receipts for the given block. - pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { + pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { let num = self.block_number(block_hash)?; self.execution_outcome.receipts_by_block(num).iter().map(Option::as_ref).collect() } @@ -227,7 +230,7 @@ impl Chain { /// Get all receipts with attachment. /// /// Attachment includes block number, block hash, transaction hash and transaction index. - pub fn receipts_with_attachment(&self) -> Vec { + pub fn receipts_with_attachment(&self) -> Vec> { let mut receipt_attach = Vec::with_capacity(self.blocks().len()); for ((block_num, block), receipts) in self.blocks().iter().zip(self.execution_outcome.receipts().iter()) @@ -250,7 +253,7 @@ impl Chain { pub fn append_block( &mut self, block: SealedBlockWithSenders, - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, ) { self.blocks.insert(block.number, block); self.execution_outcome.extend(execution_outcome); @@ -300,7 +303,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn split(mut self, split_at: ChainSplitTarget) -> ChainSplit { + pub fn split(mut self, split_at: ChainSplitTarget) -> ChainSplit { let chain_tip = *self.blocks.last_entry().expect("chain is never empty").key(); let block_number = match split_at { ChainSplitTarget::Hash(block_hash) => { @@ -438,7 +441,7 @@ impl ChainBlocks<'_> { /// Returns an iterator over all transaction hashes in the block #[inline] pub fn transaction_hashes(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.transactions().map(|tx| tx.hash)) + self.blocks.values().flat_map(|block| block.transactions().map(|tx| tx.hash())) } } @@ -454,11 +457,11 @@ impl IntoIterator for ChainBlocks<'_> { /// Used to hold receipts and their attachment. #[derive(Default, Clone, Debug, PartialEq, Eq)] -pub struct BlockReceipts { +pub struct BlockReceipts { /// Block identifier pub block: BlockNumHash, /// Transaction identifier and receipt. - pub tx_receipts: Vec<(TxHash, Receipt)>, + pub tx_receipts: Vec<(TxHash, T)>, } /// The target block where the chain should be split. @@ -484,26 +487,26 @@ impl From for ChainSplitTarget { /// Result of a split chain. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum ChainSplit { +pub enum ChainSplit { /// Chain is not split. Pending chain is returned. /// Given block split is higher than last block. /// Or in case of split by hash when hash is unknown. - NoSplitPending(Chain), + NoSplitPending(Chain), /// Chain is not split. Canonical chain is returned. /// Given block split is lower than first block. - NoSplitCanonical(Chain), + NoSplitCanonical(Chain), /// Chain is split into two: `[canonical]` and `[pending]` /// The target of this chain split [`ChainSplitTarget`] belongs to the `canonical` chain. Split { /// Contains lower block numbers that are considered canonicalized. It ends with /// the [`ChainSplitTarget`] block. The state of this chain is now empty and no longer /// usable. - canonical: Chain, + canonical: Chain, /// Right contains all subsequent blocks __after__ the [`ChainSplitTarget`] that are still /// pending. /// /// The state of the original chain is moved here. - pending: Chain, + pending: Chain, }, } @@ -660,7 +663,7 @@ mod tests { #[test] fn chain_append() { - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); let block3_hash = B256::new([0x03; 32]); @@ -678,7 +681,7 @@ mod tests { block3.set_parent_hash(block2_hash); - let mut chain1 = + let mut chain1: Chain = Chain { blocks: BTreeMap::from([(1, block1), (2, block2)]), ..Default::default() }; let chain2 = @@ -692,7 +695,7 @@ mod tests { #[test] fn test_number_split() { - let execution_outcome1 = ExecutionOutcome::new( + let execution_outcome1: ExecutionOutcome = ExecutionOutcome::new( BundleState::new( vec![( Address::new([2; 20]), @@ -724,13 +727,13 @@ mod tests { vec![], ); - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([15; 32]); block1.set_block_number(1); block1.set_hash(block1_hash); block1.senders.push(Address::new([4; 20])); - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); let block2_hash = B256::new([16; 32]); block2.set_block_number(2); block2.set_hash(block2_hash); @@ -739,7 +742,8 @@ mod tests { let mut block_state_extended = execution_outcome1; block_state_extended.extend(execution_outcome2); - let chain = Chain::new(vec![block1.clone(), block2.clone()], block_state_extended, None); + let chain: Chain = + Chain::new(vec![block1.clone(), block2.clone()], block_state_extended, None); let (split1_execution_outcome, split2_execution_outcome) = chain.execution_outcome.clone().split_at(2); @@ -793,7 +797,7 @@ mod tests { use reth_primitives::{Receipt, Receipts, TxType}; // Create a default SealedBlockWithSenders object - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); // Define block hashes for block1 and block2 let block1_hash = B256::new([0x01; 32]); @@ -838,7 +842,7 @@ mod tests { // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, // including block1_hash and block2_hash, and the execution_outcome - let chain = Chain { + let chain: Chain = Chain { blocks: BTreeMap::from([(10, block1), (11, block2)]), execution_outcome: execution_outcome.clone(), ..Default::default() diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 82f84301f03..85bc7e7f9a7 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -6,6 +6,7 @@ use crate::{ execute::{BatchExecutor, BlockExecutorProvider, Executor}, system_calls::OnStateHook, }; +use alloc::boxed::Box; use alloy_primitives::BlockNumber; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; @@ -70,6 +71,13 @@ where type Output = BlockExecutionOutput; type Error = BlockExecutionError; + fn init(&mut self, tx_env_overrides: Box) { + match self { + Self::Left(a) => a.init(tx_env_overrides), + Self::Right(b) => b.init(tx_env_overrides), + } + } + fn execute(self, input: Self::Input<'_>) -> Result { match self { Self::Left(a) => a.execute(input), diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 677a15dfa1b..42c756f4d93 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -6,9 +6,8 @@ pub use reth_execution_errors::{ }; pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; pub use reth_storage_errors::provider::ProviderError; -use revm::db::states::bundle_state::BundleRetention; -use crate::system_calls::OnStateHook; +use crate::{system_calls::OnStateHook, TxEnvOverrides}; use alloc::{boxed::Box, vec::Vec}; use alloy_eips::eip7685::Requests; use alloy_primitives::BlockNumber; @@ -17,7 +16,10 @@ use reth_consensus::ConsensusError; use reth_primitives::{BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_revm::batch::BlockBatchRecord; -use revm::{db::BundleState, State}; +use revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + State, +}; use revm_primitives::{db::Database, U256}; /// A general purpose executor trait that executes an input (e.g. block) and produces an output @@ -32,6 +34,9 @@ pub trait Executor { /// The error type returned by the executor. type Error; + /// Initialize the executor with the given transaction environment overrides. + fn init(&mut self, _tx_env_overrides: Box) {} + /// Consumes the type and executes the block. /// /// # Note @@ -184,6 +189,9 @@ where /// The error type returned by this strategy's methods. type Error: From + core::error::Error; + /// Initialize the strategy with the given transaction environment overrides. + fn init(&mut self, _tx_env_overrides: Box) {} + /// Applies any necessary changes before executing the block's transactions. fn apply_pre_execution_changes( &mut self, @@ -329,6 +337,10 @@ where type Output = BlockExecutionOutput; type Error = S::Error; + fn init(&mut self, env_overrides: Box) { + self.strategy.init(env_overrides); + } + fn execute(mut self, input: Self::Input<'_>) -> Result { let BlockExecutionInput { block, total_difficulty } = input; @@ -480,7 +492,7 @@ mod tests { use alloy_primitives::U256; use reth_chainspec::{ChainSpec, MAINNET}; use revm::db::{CacheDB, EmptyDBTyped}; - use revm_primitives::bytes; + use revm_primitives::{bytes, TxEnv}; use std::sync::Arc; #[derive(Clone, Default)] @@ -703,4 +715,28 @@ mod tests { assert_eq!(block_execution_output.requests, expected_apply_post_execution_changes_result); assert_eq!(block_execution_output.state, expected_finish_result); } + + #[test] + fn test_tx_env_overrider() { + let strategy_factory = TestExecutorStrategyFactory { + execute_transactions_result: ExecuteOutput { + receipts: vec![Receipt::default()], + gas_used: 10, + }, + apply_post_execution_changes_result: Requests::new(vec![bytes!("deadbeef")]), + finish_result: BundleState::default(), + }; + let provider = BasicBlockExecutorProvider::new(strategy_factory); + let db = CacheDB::>::default(); + + // if we want to apply tx env overrides the executor must be mut. + let mut executor = provider.executor(db); + // execute consumes the executor, so we can only call it once. + // let result = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO)); + executor.init(Box::new(|tx_env: &mut TxEnv| { + tx_env.nonce.take(); + })); + let result = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO)); + assert!(result.is_ok()); + } } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index d20dbe4594a..f01701d5989 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -212,3 +212,18 @@ pub struct NextBlockEnvAttributes { /// The randomness value for the next block. pub prev_randao: B256, } + +/// Function hook that allows to modify a transaction environment. +pub trait TxEnvOverrides { + /// Apply the overrides by modifying the given `TxEnv`. + fn apply(&mut self, env: &mut TxEnv); +} + +impl TxEnvOverrides for F +where + F: FnMut(&mut TxEnv), +{ + fn apply(&mut self, env: &mut TxEnv) { + self(env) + } +} diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index f7ab4fce5df..3cbeb115b06 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -78,5 +78,6 @@ serde = [ "alloy-primitives/serde", "parking_lot/serde", "rand/serde", - "secp256k1/serde" + "secp256k1/serde", + "reth-primitives-traits/serde", ] diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 80af408c5c8..169d2d758de 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -9,6 +9,7 @@ use reth_evm::execute::{ BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }; use reth_evm_ethereum::execute::EthExecutorProvider; +use reth_node_api::FullNodePrimitives; use reth_primitives::{ Block, BlockBody, BlockWithSenders, Receipt, SealedBlockWithSenders, Transaction, }; @@ -57,7 +58,7 @@ pub(crate) fn execute_block_and_commit_to_database( block: &BlockWithSenders, ) -> eyre::Result> where - N: ProviderNodeTypes, + N: ProviderNodeTypes>, { let provider = provider_factory.provider()?; @@ -161,7 +162,7 @@ pub(crate) fn blocks_and_execution_outputs( key_pair: Keypair, ) -> eyre::Result)>> where - N: ProviderNodeTypes, + N: ProviderNodeTypes>, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; @@ -183,6 +184,7 @@ pub(crate) fn blocks_and_execution_outcome( ) -> eyre::Result<(Vec, ExecutionOutcome)> where N: ProviderNodeTypes, + N::Primitives: FullNodePrimitives, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index a17de660862..e3d3a3c0690 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -10,7 +10,7 @@ use reth_chainspec::Head; use reth_metrics::{metrics::Counter, Metrics}; use reth_primitives::SealedHeader; use reth_provider::HeaderProvider; -use reth_tracing::tracing::debug; +use reth_tracing::tracing::{debug, warn}; use std::{ collections::VecDeque, fmt::Debug, @@ -35,6 +35,12 @@ use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture}; /// or 17 minutes of 1-second blocks. pub const DEFAULT_EXEX_MANAGER_CAPACITY: usize = 1024; +/// The maximum number of blocks allowed in the WAL before emitting a warning. +/// +/// This constant defines the threshold for the Write-Ahead Log (WAL) size. If the number of blocks +/// in the WAL exceeds this limit, a warning is logged to indicate potential issues. +pub const WAL_BLOCKS_WARNING: usize = 128; + /// The source of the notification. /// /// This distinguishment is needed to not commit any pipeline notificatations to [WAL](`Wal`), @@ -377,6 +383,13 @@ where .unwrap(); self.wal.finalize(lowest_finished_height)?; + if self.wal.num_blocks() > WAL_BLOCKS_WARNING { + warn!( + target: "exex::manager", + blocks = ?self.wal.num_blocks(), + "WAL contains too many blocks and is not getting cleared. That will lead to increased disk space usage. Check that you emit the FinishedHeight event from your ExExes." + ); + } } else { let unfinalized_exexes = exex_finished_heights .into_iter() @@ -644,7 +657,7 @@ mod tests { use reth_primitives::SealedBlockWithSenders; use reth_provider::{ providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockReader, - BlockWriter, Chain, DatabaseProviderFactory, TransactionVariant, + BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, TransactionVariant, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; @@ -723,7 +736,7 @@ mod tests { ExExManager::new((), vec![exex_handle], 10, wal, empty_finalized_header_stream()); // Define the notification for testing - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); @@ -741,7 +754,7 @@ mod tests { assert_eq!(exex_manager.next_id, 1); // Push another notification - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block2.block.header.set_hash(B256::new([0x02; 32])); block2.block.header.set_block_number(20); @@ -779,7 +792,7 @@ mod tests { ); // Push some notifications to fill part of the buffer - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); @@ -1038,11 +1051,11 @@ mod tests { assert_eq!(exex_handle.next_notification_id, 0); // Setup two blocks for the chain commit notification - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block2.block.header.set_hash(B256::new([0x02; 32])); block2.block.header.set_block_number(11); @@ -1091,7 +1104,7 @@ mod tests { // Set finished_height to a value higher than the block tip exex_handle.finished_height = Some(BlockNumHash::new(15, B256::random())); - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); @@ -1225,7 +1238,7 @@ mod tests { .seal_with_senders() .unwrap(); let provider_rw = provider_factory.database_provider_rw().unwrap(); - provider_rw.insert_block(block.clone()).unwrap(); + provider_rw.insert_block(block.clone(), StorageLocation::Database).unwrap(); provider_rw.commit().unwrap(); let provider = BlockchainProvider2::new(provider_factory).unwrap(); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 14cfe9be4d9..baf504166d1 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -403,7 +403,7 @@ mod tests { use reth_primitives::Block; use reth_provider::{ providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockWriter, - Chain, DatabaseProviderFactory, + Chain, DatabaseProviderFactory, StorageLocation, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; use tokio::sync::mpsc; @@ -431,6 +431,7 @@ mod tests { let provider_rw = provider_factory.provider_rw()?; provider_rw.insert_block( node_head_block.clone().seal_with_senders().ok_or_eyre("failed to recover senders")?, + StorageLocation::Database, )?; provider_rw.commit()?; @@ -574,7 +575,7 @@ mod tests { ..Default::default() }; let provider_rw = provider.database_provider_rw()?; - provider_rw.insert_block(node_head_block)?; + provider_rw.insert_block(node_head_block, StorageLocation::Database)?; provider_rw.commit()?; let node_head_notification = ExExNotification::ChainCommitted { new: Arc::new( diff --git a/crates/exex/exex/src/wal/cache.rs b/crates/exex/exex/src/wal/cache.rs index 882b65e1589..86943f33cfa 100644 --- a/crates/exex/exex/src/wal/cache.rs +++ b/crates/exex/exex/src/wal/cache.rs @@ -35,6 +35,11 @@ impl BlockCache { self.notification_max_blocks.is_empty() } + /// Returns the number of blocks in the cache. + pub(super) fn num_blocks(&self) -> usize { + self.committed_blocks.len() + } + /// Removes all files from the cache that has notifications with a tip block less than or equal /// to the given block number. /// diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index a2e8ee8e6c6..41a7829a70f 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -66,6 +66,11 @@ impl Wal { ) -> eyre::Result> + '_>> { self.inner.iter_notifications() } + + /// Returns the number of blocks in the WAL. + pub fn num_blocks(&self) -> usize { + self.inner.block_cache().num_blocks() + } } /// Inner type for the WAL. diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 5c3468a3c1c..5b2267505c5 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -45,10 +45,10 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{Head, SealedBlockWithSenders}; +use reth_primitives::{EthPrimitives, Head, SealedBlockWithSenders}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, - BlockReader, ProviderFactory, + BlockReader, EthStorage, ProviderFactory, }; use reth_tasks::TaskManager; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; @@ -118,9 +118,10 @@ where pub struct TestNode; impl NodeTypes for TestNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = reth_trie_db::MerklePatriciaTrie; + type Storage = EthStorage; } impl NodeTypesWithEngine for TestNode { @@ -129,7 +130,14 @@ impl NodeTypesWithEngine for TestNode { impl Node for TestNode where - N: FullNodeTypes>, + N: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, + >, { type ComponentsBuilder = ComponentsBuilder< N, diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index 51097d6109c..3b67fd5aa50 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-chain-state.workspace = true reth-execution-types.workspace = true +reth-primitives-traits.workspace = true # reth alloy-primitives.workspace = true @@ -38,11 +39,13 @@ serde = [ "reth-execution-types/serde", "alloy-eips/serde", "alloy-primitives/serde", - "rand/serde" + "rand/serde", + "reth-primitives-traits/serde", ] serde-bincode-compat = [ "reth-execution-types/serde-bincode-compat", "serde_with", "reth-primitives/serde-bincode-compat", - "alloy-eips/serde-bincode-compat" + "alloy-eips/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", ] diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index 61d42a3319b..fb0762f04b3 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -2,27 +2,28 @@ use std::sync::Arc; use reth_chain_state::CanonStateNotification; use reth_execution_types::Chain; +use reth_primitives_traits::NodePrimitives; /// Notifications sent to an `ExEx`. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum ExExNotification { +pub enum ExExNotification { /// Chain got committed without a reorg, and only the new chain is returned. ChainCommitted { /// The new chain after commit. - new: Arc, + new: Arc>, }, /// Chain got reorged, and both the old and the new chains are returned. ChainReorged { /// The old chain before reorg. - old: Arc, + old: Arc>, /// The new chain after reorg. - new: Arc, + new: Arc>, }, /// Chain got reverted, and only the old chain is returned. ChainReverted { /// The old chain before reversion. - old: Arc, + old: Arc>, }, } @@ -60,8 +61,8 @@ impl ExExNotification { } } -impl From for ExExNotification { - fn from(notification: CanonStateNotification) -> Self { +impl From> for ExExNotification

{ + fn from(notification: CanonStateNotification

) -> Self { match notification { CanonStateNotification::Commit { new } => Self::ChainCommitted { new }, CanonStateNotification::Reorg { old, new } => Self::ChainReorged { old, new }, diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 4a534afbef5..61ab94b4f2f 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -412,11 +412,13 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0)); if let Some(discv5_addr) = discv5_addr_ipv4 { - warn!(target: "net::discv5", - %discv5_addr, - %rlpx_addr, - "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" - ); + if discv5_addr != rlpx_addr { + warn!(target: "net::discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" + ); + } } // overwrite discv5 ipv4 addr with RLPx address. this is since there is no @@ -429,11 +431,13 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4)); if let Some(discv5_addr) = discv5_addr_ipv6 { - warn!(target: "net::discv5", - %discv5_addr, - %rlpx_addr, - "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" - ); + if discv5_addr != rlpx_addr { + warn!(target: "net::discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" + ); + } } // overwrite discv5 ipv6 addr with RLPx address. this is since there is no diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index bebc51ad772..82f45dd23bf 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -20,6 +20,7 @@ use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::Ordering, collections::BinaryHeap, + fmt::Debug, mem, ops::RangeInclusive, pin::Pin, @@ -298,7 +299,7 @@ where impl BodyDownloader for BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { type Body = B::Body; diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index de1638f3e66..a2b63c8ed18 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -8,6 +8,7 @@ use reth_network_p2p::{ }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ + fmt::Debug, future::Future, ops::RangeInclusive, pin::Pin, @@ -47,10 +48,10 @@ impl TaskDownloader { /// use reth_network_p2p::bodies::client::BodiesClient; /// use reth_primitives_traits::InMemorySize; /// use reth_storage_api::HeaderProvider; - /// use std::sync::Arc; + /// use std::{fmt::Debug, sync::Arc}; /// /// fn t< - /// B: BodiesClient + 'static, + /// B: BodiesClient + 'static, /// Provider: HeaderProvider + Unpin + 'static, /// >( /// client: Arc, @@ -90,7 +91,7 @@ impl TaskDownloader { } } -impl BodyDownloader for TaskDownloader { +impl BodyDownloader for TaskDownloader { type Body = B; fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()> { diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 486d4a05127..ff352bc2304 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -1,8 +1,8 @@ use std::{collections::HashMap, io, path::Path}; -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockHash, BlockNumber, B256}; +use alloy_primitives::{BlockHash, BlockNumber, Sealable, B256}; use futures::Future; use itertools::Either; use reth_network_p2p::{ @@ -13,7 +13,8 @@ use reth_network_p2p::{ priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, SealedHeader}; +use reth_primitives::SealedHeader; +use reth_primitives_traits::{Block, BlockBody, FullBlock}; use thiserror::Error; use tokio::{fs::File, io::AsyncReadExt}; use tokio_stream::StreamExt; @@ -40,15 +41,15 @@ pub const DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE: u64 = 1_000_000_000; /// /// This reads the entire file into memory, so it is not suitable for large files. #[derive(Debug)] -pub struct FileClient { +pub struct FileClient { /// The buffered headers retrieved when fetching new bodies. - headers: HashMap, + headers: HashMap, /// A mapping between block hash and number. hash_to_number: HashMap, /// The buffered bodies retrieved when fetching new headers. - bodies: HashMap, + bodies: HashMap, } /// An error that can occur when constructing and using a [`FileClient`]. @@ -73,7 +74,7 @@ impl From<&'static str> for FileClientError { } } -impl FileClient { +impl FileClient { /// Create a new file client from a file path. pub async fn new>(path: P) -> Result { let file = File::open(path).await?; @@ -114,7 +115,7 @@ impl FileClient { /// Clones and returns the highest header of this client has or `None` if empty. Seals header /// before returning. - pub fn tip_header(&self) -> Option { + pub fn tip_header(&self) -> Option> { self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal(h.clone())) } @@ -137,13 +138,13 @@ impl FileClient { } /// Use the provided bodies as the file client's block body buffer. - pub fn with_bodies(mut self, bodies: HashMap) -> Self { + pub fn with_bodies(mut self, bodies: HashMap) -> Self { self.bodies = bodies; self } /// Use the provided headers as the file client's block body buffer. - pub fn with_headers(mut self, headers: HashMap) -> Self { + pub fn with_headers(mut self, headers: HashMap) -> Self { self.headers = headers; for (number, header) in &self.headers { self.hash_to_number.insert(header.hash_slow(), *number); @@ -162,14 +163,14 @@ impl FileClient { } /// Returns an iterator over headers in the client. - pub fn headers_iter(&self) -> impl Iterator { + pub fn headers_iter(&self) -> impl Iterator { self.headers.values() } /// Returns a mutable iterator over bodies in the client. /// /// Panics, if file client headers and bodies are not mapping 1-1. - pub fn bodies_iter_mut(&mut self) -> impl Iterator { + pub fn bodies_iter_mut(&mut self) -> impl Iterator { let bodies = &mut self.bodies; let numbers = &self.hash_to_number; bodies.iter_mut().map(|(hash, body)| (numbers[hash], body)) @@ -177,27 +178,28 @@ impl FileClient { /// Returns the current number of transactions in the client. pub fn total_transactions(&self) -> usize { - self.bodies.iter().fold(0, |acc, (_, body)| acc + body.transactions.len()) + self.bodies.iter().fold(0, |acc, (_, body)| acc + body.transactions().len()) } } -impl FromReader for FileClient { +impl FromReader for FileClient { type Error = FileClientError; /// Initialize the [`FileClient`] from bytes that have been read from file. - fn from_reader( - reader: B, + fn from_reader( + reader: R, num_bytes: u64, ) -> impl Future, Self::Error>> where - B: AsyncReadExt + Unpin, + R: AsyncReadExt + Unpin, { let mut headers = HashMap::default(); let mut hash_to_number = HashMap::default(); let mut bodies = HashMap::default(); // use with_capacity to make sure the internal buffer contains the entire chunk - let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); + let mut stream = + FramedRead::with_capacity(reader, BlockFileCodec::::default(), num_bytes as usize); trace!(target: "downloaders::file", target_num_bytes=num_bytes, @@ -225,13 +227,13 @@ impl FromReader for FileClient { } Err(err) => return Err(err), }; - let block_number = block.header.number; - let block_hash = block.header.hash_slow(); + let block_number = block.header().number(); + let block_hash = block.header().hash_slow(); // add to the internal maps - headers.insert(block.header.number, block.header.clone()); - hash_to_number.insert(block_hash, block.header.number); - bodies.insert(block_hash, block.into()); + headers.insert(block.header().number(), block.header().clone()); + hash_to_number.insert(block_hash, block.header().number()); + bodies.insert(block_hash, block.body().clone()); if log_interval == 0 { trace!(target: "downloaders::file", @@ -260,9 +262,9 @@ impl FromReader for FileClient { } } -impl HeadersClient for FileClient { - type Header = Header; - type Output = HeadersFut; +impl HeadersClient for FileClient { + type Header = B::Header; + type Output = HeadersFut; fn get_headers_with_priority( &self, @@ -311,9 +313,9 @@ impl HeadersClient for FileClient { } } -impl BodiesClient for FileClient { - type Body = BlockBody; - type Output = BodiesFut; +impl BodiesClient for FileClient { + type Body = B::Body; + type Output = BodiesFut; fn get_block_bodies_with_priority( &self, @@ -336,7 +338,7 @@ impl BodiesClient for FileClient { } } -impl DownloadClient for FileClient { +impl DownloadClient for FileClient { fn report_bad_message(&self, _peer_id: PeerId) { warn!("Reported a bad message on a file client, the file may be corrupted or invalid"); // noop @@ -542,7 +544,7 @@ mod tests { // create an empty file let file = tempfile::tempfile().unwrap(); - let client = + let client: Arc = Arc::new(FileClient::from_file(file.into()).await.unwrap().with_bodies(bodies.clone())); let mut downloader = BodiesDownloaderBuilder::default().build( client.clone(), @@ -567,14 +569,14 @@ mod tests { let p0 = child_header(&p1); let file = tempfile::tempfile().unwrap(); - let client = Arc::new(FileClient::from_file(file.into()).await.unwrap().with_headers( - HashMap::from([ + let client: Arc = Arc::new( + FileClient::from_file(file.into()).await.unwrap().with_headers(HashMap::from([ (0u64, p0.clone().unseal()), (1, p1.clone().unseal()), (2, p2.clone().unseal()), (3, p3.clone().unseal()), - ]), - )); + ])), + ); let mut downloader = ReverseHeadersDownloaderBuilder::default() .stream_batch_size(3) @@ -596,7 +598,7 @@ mod tests { // Generate some random blocks let (file, headers, _) = generate_bodies_file(0..=19).await; // now try to read them back - let client = Arc::new(FileClient::from_file(file).await.unwrap()); + let client: Arc = Arc::new(FileClient::from_file(file).await.unwrap()); // construct headers downloader and use first header let mut header_downloader = ReverseHeadersDownloaderBuilder::default() @@ -621,7 +623,7 @@ mod tests { let (file, headers, mut bodies) = generate_bodies_file(0..=19).await; // now try to read them back - let client = Arc::new(FileClient::from_file(file).await.unwrap()); + let client: Arc = Arc::new(FileClient::from_file(file).await.unwrap()); // insert headers in db for the bodies downloader insert_headers(factory.db_ref().db(), &headers); diff --git a/crates/net/downloaders/src/file_codec.rs b/crates/net/downloaders/src/file_codec.rs index 3e754f9cf49..57a15b6c888 100644 --- a/crates/net/downloaders/src/file_codec.rs +++ b/crates/net/downloaders/src/file_codec.rs @@ -3,7 +3,6 @@ use crate::file_client::FileClientError; use alloy_primitives::bytes::{Buf, BytesMut}; use alloy_rlp::{Decodable, Encodable}; -use reth_primitives::Block; use tokio_util::codec::{Decoder, Encoder}; /// Codec for reading raw block bodies from a file. @@ -19,10 +18,16 @@ use tokio_util::codec::{Decoder, Encoder}; /// /// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set /// the capacity of the framed reader to the size of the file. -pub(crate) struct BlockFileCodec; +pub(crate) struct BlockFileCodec(std::marker::PhantomData); -impl Decoder for BlockFileCodec { - type Item = Block; +impl Default for BlockFileCodec { + fn default() -> Self { + Self(std::marker::PhantomData) + } +} + +impl Decoder for BlockFileCodec { + type Item = B; type Error = FileClientError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -31,18 +36,17 @@ impl Decoder for BlockFileCodec { } let buf_slice = &mut src.as_ref(); - let body = - Block::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; + let body = B::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; src.advance(src.len() - buf_slice.len()); Ok(Some(body)) } } -impl Encoder for BlockFileCodec { +impl Encoder for BlockFileCodec { type Error = FileClientError; - fn encode(&mut self, item: Block, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: B, dst: &mut BytesMut) -> Result<(), Self::Error> { item.encode(dst); Ok(()) } diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 0f8111e4395..63a20ff27f5 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -9,7 +9,7 @@ use futures::{stream::Stream, FutureExt}; use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; use reth_config::config::HeadersConfig; -use reth_consensus::Consensus; +use reth_consensus::HeaderValidator; use reth_network_p2p::{ error::{DownloadError, DownloadResult, PeerRequestResult}, headers::{ @@ -68,7 +68,7 @@ impl From for ReverseHeadersDownloaderError { #[derive(Debug)] pub struct ReverseHeadersDownloader { /// Consensus client used to validate headers - consensus: Arc>, + consensus: Arc>, /// Client used to download headers. client: Arc, /// The local head of the chain. @@ -1165,7 +1165,7 @@ impl ReverseHeadersDownloaderBuilder { pub fn build( self, client: H, - consensus: Arc>, + consensus: Arc>, ) -> ReverseHeadersDownloader where H: HeadersClient + 'static, diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index 81c4cd80da3..3dbfd5e3615 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -8,6 +8,7 @@ use reth_network_p2p::headers::{ use reth_primitives::SealedHeader; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ + fmt::Debug, future::Future, pin::Pin, task::{ready, Context, Poll}, @@ -44,10 +45,10 @@ impl TaskDownloader { /// # use std::sync::Arc; /// # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloader; /// # use reth_downloaders::headers::task::TaskDownloader; - /// # use reth_consensus::Consensus; + /// # use reth_consensus::HeaderValidator; /// # use reth_network_p2p::headers::client::HeadersClient; /// # use reth_primitives_traits::BlockHeader; - /// # fn t + 'static>(consensus:Arc>, client: Arc) { + /// # fn t + 'static>(consensus:Arc>, client: Arc) { /// let downloader = ReverseHeadersDownloader::::builder().build( /// client, /// consensus @@ -82,7 +83,7 @@ impl TaskDownloader { } } -impl HeaderDownloader for TaskDownloader { +impl HeaderDownloader for TaskDownloader { type Header = H; fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { diff --git a/crates/net/downloaders/src/test_utils/mod.rs b/crates/net/downloaders/src/test_utils/mod.rs index 7755c5e6017..635383ce3f3 100644 --- a/crates/net/downloaders/src/test_utils/mod.rs +++ b/crates/net/downloaders/src/test_utils/mod.rs @@ -43,7 +43,7 @@ pub(crate) async fn generate_bodies_file( let raw_block_bodies = create_raw_bodies(headers.iter().cloned(), &mut bodies.clone()); let file: File = tempfile::tempfile().unwrap().into(); - let mut writer = FramedWrite::new(file, BlockFileCodec); + let mut writer = FramedWrite::new(file, BlockFileCodec::default()); // rlp encode one after the other for block in raw_block_bodies { diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index f9759ffc25a..8b89603167d 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -65,4 +65,5 @@ serde = [ "alloy-primitives/serde", "bytes/serde", "rand/serde", + "reth-primitives-traits/serde", ] diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 06549e769e6..97bbe36b3d6 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -342,7 +342,7 @@ mod tests { message: BlockBodies(vec![ BlockBody { transactions: vec![ - TransactionSigned::from_transaction_and_signature(Transaction::Legacy(TxLegacy { + TransactionSigned::new_unhashed(Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, gas_price: 0x4a817c808, @@ -356,7 +356,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature(Transaction::Legacy(TxLegacy { + TransactionSigned::new_unhashed(Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x9u64, gas_price: 0x4a817c809, @@ -413,7 +413,7 @@ mod tests { message: BlockBodies(vec![ BlockBody { transactions: vec![ - TransactionSigned::from_transaction_and_signature(Transaction::Legacy( + TransactionSigned::new_unhashed(Transaction::Legacy( TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -423,13 +423,13 @@ mod tests { value: U256::from(0x200u64), input: Default::default(), }), - Signature::new( + Signature::new( U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12").unwrap(), U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10").unwrap(), false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x9u64, diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index f37c5e74a04..7d74085d355 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -90,9 +90,9 @@ generate_tests!(#[rlp, 25] NewBlock, EthNewBlockTests); #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(rlp, 10)] -pub struct Transactions( +pub struct Transactions( /// New transactions for the peer to include in its mempool. - pub Vec, + pub Vec, ); impl Transactions { @@ -102,14 +102,14 @@ impl Transactions { } } -impl From> for Transactions { - fn from(txs: Vec) -> Self { +impl From> for Transactions { + fn from(txs: Vec) -> Self { Self(txs) } } -impl From for Vec { - fn from(txs: Transactions) -> Self { +impl From> for Vec { + fn from(txs: Transactions) -> Self { txs.0 } } @@ -121,9 +121,9 @@ impl From for Vec { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(rlp, 20)] -pub struct SharedTransactions( +pub struct SharedTransactions( /// New transactions for the peer to include in its mempool. - pub Vec>, + pub Vec>, ); /// A wrapper type for all different new pooled transaction types diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index f83e21124e3..9a866720310 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -12,7 +12,6 @@ use super::{ NewPooledTransactionHashes68, NodeData, PooledTransactions, Receipts, Status, Transactions, }; use crate::{EthNetworkPrimitives, EthVersion, NetworkPrimitives, SharedTransactions}; - use alloy_primitives::bytes::{Buf, BufMut}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use std::{fmt::Debug, sync::Arc}; @@ -183,7 +182,11 @@ pub enum EthMessage { )] NewBlock(Box>), /// Represents a Transactions message broadcast to the network. - Transactions(Transactions), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BroadcastedTransaction: serde::Serialize + serde::de::DeserializeOwned") + )] + Transactions(Transactions), /// Represents a `NewPooledTransactionHashes` message for eth/66 version. NewPooledTransactionHashes66(NewPooledTransactionHashes66), /// Represents a `NewPooledTransactionHashes` message for eth/68 version. @@ -208,7 +211,11 @@ pub enum EthMessage { /// Represents a `GetPooledTransactions` request-response pair. GetPooledTransactions(RequestPair), /// Represents a `PooledTransactions` request-response pair. - PooledTransactions(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::PooledTransaction: serde::Serialize + serde::de::DeserializeOwned") + )] + PooledTransactions(RequestPair>), /// Represents a `GetNodeData` request-response pair. GetNodeData(RequestPair), /// Represents a `NodeData` request-response pair. @@ -299,7 +306,7 @@ pub enum EthBroadcastMessage { /// Represents a new block broadcast message. NewBlock(Arc>), /// Represents a transactions broadcast message. - Transactions(SharedTransactions), + Transactions(SharedTransactions), } // === impl EthBroadcastMessage === diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index eab36c3b6a7..ff7ab1c801b 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -1,9 +1,8 @@ //! Abstraction over primitive types in network messages. -use std::fmt::Debug; - use alloy_rlp::{Decodable, Encodable}; use reth_primitives_traits::{Block, BlockHeader}; +use std::fmt::Debug; /// Abstraction over primitive types which might appear in network messages. See /// [`crate::EthMessage`] for more context. @@ -70,6 +69,18 @@ pub trait NetworkPrimitives: + PartialEq + Eq + 'static; + + /// The transaction type which peers return in `GetReceipts` messages. + type Receipt: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; } /// Primitive types used by Ethereum network. @@ -83,4 +94,5 @@ impl NetworkPrimitives for EthNetworkPrimitives { type Block = reth_primitives::Block; type BroadcastedTransaction = reth_primitives::TransactionSigned; type PooledTransaction = reth_primitives::PooledTransactionsElement; + type Receipt = reth_primitives::Receipt; } diff --git a/crates/net/eth-wire-types/src/response.rs b/crates/net/eth-wire-types/src/response.rs deleted file mode 100644 index dfcf5ed56a8..00000000000 --- a/crates/net/eth-wire-types/src/response.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::{ - BlockBodies, BlockHeaders, NodeData, PooledTransactions, Receipts, RequestPair, Status, -}; - -// This type is analogous to the `zebra_network::Response` type. -/// An ethereum network response for version 66. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum Response { - /// The request does not have a response. - Nil, - - /// The [`Status`](super::Status) message response in the eth protocol handshake. - Status(Status), - - /// The response to a [`Request::GetBlockHeaders`](super::Request::GetBlockHeaders) request. - BlockHeaders(RequestPair), - - /// The response to a [`Request::GetBlockBodies`](super::Request::GetBlockBodies) request. - BlockBodies(RequestPair), - - /// The response to a [`Request::GetPooledTransactions`](super::Request::GetPooledTransactions) request. - PooledTransactions(RequestPair), - - /// The response to a [`Request::GetNodeData`](super::Request::GetNodeData) request. - NodeData(RequestPair), - - /// The response to a [`Request::GetReceipts`](super::Request::GetReceipts) request. - Receipts(RequestPair), -} diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index 8db96c10042..ca76f0a8c7e 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -5,7 +5,7 @@ use alloy_primitives::B256; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use derive_more::{Constructor, Deref, IntoIterator}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{transaction::TransactionConversionError, PooledTransactionsElement}; +use reth_primitives::PooledTransactionsElement; /// A list of transaction hashes that the peer would like transaction bodies for. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -60,9 +60,9 @@ impl PooledTransactions { impl TryFrom> for PooledTransactions where - T: TryFrom, + T: TryFrom, { - type Error = TransactionConversionError; + type Error = T::Error; fn try_from(txs: Vec) -> Result { txs.into_iter().map(T::try_from).collect() @@ -130,7 +130,7 @@ mod tests { let expected = hex!("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"); let mut data = vec![]; let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -152,7 +152,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x09u64, @@ -196,7 +196,7 @@ mod tests { fn decode_pooled_transactions() { let data = hex!("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -218,7 +218,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x09u64, @@ -260,7 +260,7 @@ mod tests { let decoded_transactions = RequestPair::::decode(&mut &data[..]).unwrap(); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 15u64, @@ -282,7 +282,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: 4, nonce: 26u64, @@ -306,7 +306,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 3u64, @@ -328,7 +328,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 1u64, @@ -350,7 +350,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 2u64, @@ -397,7 +397,7 @@ mod tests { fn encode_pooled_transactions_network() { let expected = hex!("f9022980f90225f8650f84832156008287fb94cf7f9e66af820a19257a2108375b180b0ec491678204d2802ca035b7bfeb9ad9ece2cbafaaf8e202e706b4cfaeb233f46198f00b44d4a566a981a0612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860b87502f872041a8459682f008459682f0d8252089461815774383099e24810ab832a5b2a5425c154d58829a2241af62c000080c001a059e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafda0016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469f86b0384773594008398968094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba0ce6834447c0a4193c40382e6c57ae33b241379c5418caac9cdc18d786fd12071a03ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88f86b01843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac3960468702769bb01b2a00802ba0e24d8bd32ad906d6f8b8d7741e08d1959df021698b19ee232feba15361587d0aa05406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631daf86b02843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba00eb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5aea03a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18"); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 15u64, @@ -419,7 +419,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: 4, nonce: 26u64, @@ -443,7 +443,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 3u64, @@ -465,7 +465,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 1u64, @@ -487,7 +487,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 2u64, diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 3999f658e0a..ffbd3017fa6 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -87,7 +87,8 @@ serde = [ "rand/serde", "secp256k1/serde", "reth-codecs/serde", - "alloy-chains/serde" + "alloy-chains/serde", + "reth-primitives-traits/serde", ] [[test]] diff --git a/crates/net/network-api/src/events.rs b/crates/net/network-api/src/events.rs index af392b6f9ea..624c43f5e1b 100644 --- a/crates/net/network-api/src/events.rs +++ b/crates/net/network-api/src/events.rs @@ -154,7 +154,7 @@ pub enum PeerRequest { /// The request for pooled transactions. request: GetPooledTransactions, /// The channel to send the response for pooled transactions. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests `NodeData` from the peer. /// diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index dde0b4a0b23..ab9e89c2ca8 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -23,6 +23,7 @@ reth-network-p2p.workspace = true reth-discv4.workspace = true reth-discv5.workspace = true reth-dns-discovery.workspace = true +reth-ethereum-forks.workspace = true reth-eth-wire.workspace = true reth-eth-wire-types.workspace = true reth-ecies.workspace = true @@ -121,6 +122,7 @@ serde = [ "rand/serde", "smallvec/serde", "url/serde", + "reth-primitives-traits/serde", ] test-utils = [ "dep:reth-provider", diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index 31038906b25..da003a2e290 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -1,15 +1,14 @@ //! Builder support for configuring the entire setup. -use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; -use reth_network_api::test_utils::PeersHandleProvider; -use reth_transaction_pool::TransactionPool; -use tokio::sync::mpsc; - use crate::{ eth_requests::EthRequestHandler, transactions::{TransactionsManager, TransactionsManagerConfig}, NetworkHandle, NetworkManager, }; +use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; +use reth_network_api::test_utils::PeersHandleProvider; +use reth_transaction_pool::TransactionPool; +use tokio::sync::mpsc; /// We set the max channel capacity of the `EthRequestHandler` to 256 /// 256 requests with malicious 10MB body requests is 2.6GB which can be absorbed by the node. diff --git a/crates/net/network/src/cache.rs b/crates/net/network/src/cache.rs index 758b4916790..32389ec4b7b 100644 --- a/crates/net/network/src/cache.rs +++ b/crates/net/network/src/cache.rs @@ -1,11 +1,10 @@ //! Network cache support use core::hash::BuildHasher; -use std::{fmt, hash::Hash}; - use derive_more::{Deref, DerefMut}; use itertools::Itertools; use schnellru::{ByLength, Limiter, RandomState, Unlimited}; +use std::{fmt, hash::Hash}; /// A minimal LRU cache based on a [`LruMap`](schnellru::LruMap) with limited capacity. /// diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index db7b384c2b3..e54000895a7 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -1,7 +1,11 @@ //! Network config support -use std::{collections::HashSet, net::SocketAddr, sync::Arc}; - +use crate::{ + error::NetworkError, + import::{BlockImport, ProofOfStakeBlockImport}, + transactions::TransactionsManagerConfig, + NetworkHandle, NetworkManager, +}; use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks}; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::NetworkStackId; @@ -9,19 +13,13 @@ use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{ EthNetworkPrimitives, HelloMessage, HelloMessageWithProtocols, NetworkPrimitives, Status, }; +use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::{mainnet_nodes, pk2id, sepolia_nodes, PeerId, TrustedPeer}; use reth_network_types::{PeersConfig, SessionsConfig}; -use reth_primitives::{ForkFilter, Head}; use reth_storage_api::{noop::NoopBlockReader, BlockNumReader, BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; - -use crate::{ - error::NetworkError, - import::{BlockImport, ProofOfStakeBlockImport}, - transactions::TransactionsManagerConfig, - NetworkHandle, NetworkManager, -}; +use std::{collections::HashSet, net::SocketAddr, sync::Arc}; // re-export for convenience use crate::protocol::{IntoRlpxSubProtocol, RlpxSubProtocols}; diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index 5b2bb788f47..c0b9ffa7630 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -1,13 +1,9 @@ //! Discovery support for the network. -use std::{ - collections::VecDeque, - net::{IpAddr, SocketAddr}, - pin::Pin, - sync::Arc, - task::{ready, Context, Poll}, +use crate::{ + cache::LruMap, + error::{NetworkError, ServiceKind}, }; - use enr::Enr; use futures::StreamExt; use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config}; @@ -15,20 +11,22 @@ use reth_discv5::{DiscoveredPeer, Discv5}; use reth_dns_discovery::{ DnsDiscoveryConfig, DnsDiscoveryHandle, DnsDiscoveryService, DnsNodeRecordUpdate, DnsResolver, }; +use reth_ethereum_forks::{EnrForkIdEntry, ForkId}; use reth_network_api::{DiscoveredEvent, DiscoveryEvent}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::PeerAddr; -use reth_primitives::{EnrForkIdEntry, ForkId}; use secp256k1::SecretKey; +use std::{ + collections::VecDeque, + net::{IpAddr, SocketAddr}, + pin::Pin, + sync::Arc, + task::{ready, Context, Poll}, +}; use tokio::{sync::mpsc, task::JoinHandle}; use tokio_stream::{wrappers::ReceiverStream, Stream}; use tracing::trace; -use crate::{ - cache::LruMap, - error::{NetworkError, ServiceKind}, -}; - /// Default max capacity for cache of discovered peers. /// /// Default is 10 000 peers. diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index 2709c4a2907..8156392b22f 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -1,7 +1,6 @@ //! Possible errors when interacting with the network. -use std::{fmt, io, io::ErrorKind, net::SocketAddr}; - +use crate::session::PendingSessionHandshakeError; use reth_dns_discovery::resolver::ResolveError; use reth_ecies::ECIESErrorImpl; use reth_eth_wire::{ @@ -9,8 +8,7 @@ use reth_eth_wire::{ DisconnectReason, }; use reth_network_types::BackoffKind; - -use crate::session::PendingSessionHandshakeError; +use std::{fmt, io, io::ErrorKind, net::SocketAddr}; /// Service kind. #[derive(Debug, PartialEq, Eq, Copy, Clone)] diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 8121b9675ed..0f9348a42ce 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -1,12 +1,9 @@ //! Blocks/Headers management for the p2p network. -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, - time::Duration, +use crate::{ + budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget, + metrics::EthRequestHandlerMetrics, }; - use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_rlp::Encodable; @@ -20,14 +17,15 @@ use reth_network_p2p::error::RequestResult; use reth_network_peers::PeerId; use reth_primitives::BlockBody; use reth_storage_api::{BlockReader, HeaderProvider, ReceiptProvider}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use tokio::sync::{mpsc::Receiver, oneshot}; use tokio_stream::wrappers::ReceiverStream; -use crate::{ - budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget, - metrics::EthRequestHandlerMetrics, -}; - // Limits: /// Maximum number of receipts to serve. diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index 584c079b8d8..e24ea167f5f 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -1,10 +1,6 @@ //! A client implementation that can interact with the network and download data. -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; - +use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; use alloy_primitives::B256; use futures::{future, future::Either}; use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; @@ -18,10 +14,12 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; -use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; - #[cfg_attr(doc, aquamarine::aquamarine)] /// Front-end API for fetching data from the network. /// diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 8af6300b705..c5474587adf 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -4,15 +4,7 @@ mod client; pub use client::FetchClient; -use std::{ - collections::{HashMap, VecDeque}, - sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, - Arc, - }, - task::{Context, Poll}, -}; - +use crate::message::BlockRequest; use alloy_primitives::B256; use futures::StreamExt; use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives}; @@ -24,11 +16,17 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; +use std::{ + collections::{HashMap, VecDeque}, + sync::{ + atomic::{AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, +}; use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::message::BlockRequest; - type InflightHeadersRequest = Request>>; type InflightBodiesRequest = Request, PeerRequestResult>>; diff --git a/crates/net/network/src/flattened_response.rs b/crates/net/network/src/flattened_response.rs index 78c3c35f598..61dae9c7c72 100644 --- a/crates/net/network/src/flattened_response.rs +++ b/crates/net/network/src/flattened_response.rs @@ -1,10 +1,9 @@ +use futures::Future; +use pin_project::pin_project; use std::{ pin::Pin, task::{Context, Poll}, }; - -use futures::Future; -use pin_project::pin_project; use tokio::sync::oneshot::{error::RecvError, Receiver}; /// Flatten a [Receiver] message in order to get rid of the [RecvError] result @@ -24,10 +23,7 @@ where fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); - this.receiver.poll(cx).map(|r| match r { - Ok(r) => r, - Err(err) => Err(err.into()), - }) + this.receiver.poll(cx).map(|r| r.unwrap_or_else(|err| Err(err.into()))) } } diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index 749b3c347b3..f63bf2dd7a8 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -1,10 +1,8 @@ //! This module provides an abstraction over block import in the form of the `BlockImport` trait. -use std::task::{Context, Poll}; - -use reth_network_peers::PeerId; - use crate::message::NewBlockMessage; +use reth_network_peers::PeerId; +use std::task::{Context, Poll}; /// Abstraction over block import. pub trait BlockImport: std::fmt::Debug + Send + Sync { diff --git a/crates/net/network/src/listener.rs b/crates/net/network/src/listener.rs index e5094f68948..9fcc15a104b 100644 --- a/crates/net/network/src/listener.rs +++ b/crates/net/network/src/listener.rs @@ -1,13 +1,12 @@ //! Contains connection-oriented interfaces. +use futures::{ready, Stream}; use std::{ io, net::SocketAddr, pin::Pin, task::{Context, Poll}, }; - -use futures::{ready, Stream}; use tokio::net::{TcpListener, TcpStream}; /// A tcp connection listener. diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 0738be1bcac..c1db91773e3 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -15,18 +15,26 @@ //! (IP+port) of our node is published via discovery, remote peers can initiate inbound connections //! to the local node. Once a (tcp) connection is established, both peers start to authenticate a [RLPx session](https://github.com/ethereum/devp2p/blob/master/rlpx.md) via a handshake. If the handshake was successful, both peers announce their capabilities and are now ready to exchange sub-protocol messages via the `RLPx` session. -use std::{ - net::SocketAddr, - path::Path, - pin::Pin, - sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, - Arc, - }, - task::{Context, Poll}, - time::{Duration, Instant}, +use crate::{ + budget::{DEFAULT_BUDGET_TRY_DRAIN_NETWORK_HANDLE_CHANNEL, DEFAULT_BUDGET_TRY_DRAIN_SWARM}, + config::NetworkConfig, + discovery::Discovery, + error::{NetworkError, ServiceKind}, + eth_requests::IncomingEthRequest, + import::{BlockImport, BlockImportOutcome, BlockValidation}, + listener::ConnectionListener, + message::{NewBlockMessage, PeerMessage}, + metrics::{DisconnectMetrics, NetworkMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, + network::{NetworkHandle, NetworkHandleMessage}, + peers::PeersManager, + poll_nested_stream_with_budget, + protocol::IntoRlpxSubProtocol, + session::SessionManager, + state::NetworkState, + swarm::{Swarm, SwarmEvent}, + transactions::NetworkTransactionEvent, + FetchClient, NetworkBuilder, }; - use futures::{Future, StreamExt}; use parking_lot::Mutex; use reth_eth_wire::{ @@ -44,31 +52,21 @@ use reth_storage_api::BlockNumReader; use reth_tasks::shutdown::GracefulShutdown; use reth_tokio_util::EventSender; use secp256k1::SecretKey; +use std::{ + net::SocketAddr, + path::Path, + pin::Pin, + sync::{ + atomic::{AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, + time::{Duration, Instant}, +}; use tokio::sync::mpsc::{self, error::TrySendError}; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, trace, warn}; -use crate::{ - budget::{DEFAULT_BUDGET_TRY_DRAIN_NETWORK_HANDLE_CHANNEL, DEFAULT_BUDGET_TRY_DRAIN_SWARM}, - config::NetworkConfig, - discovery::Discovery, - error::{NetworkError, ServiceKind}, - eth_requests::IncomingEthRequest, - import::{BlockImport, BlockImportOutcome, BlockValidation}, - listener::ConnectionListener, - message::{NewBlockMessage, PeerMessage}, - metrics::{DisconnectMetrics, NetworkMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, - network::{NetworkHandle, NetworkHandleMessage}, - peers::PeersManager, - poll_nested_stream_with_budget, - protocol::IntoRlpxSubProtocol, - session::SessionManager, - state::NetworkState, - swarm::{Swarm, SwarmEvent}, - transactions::NetworkTransactionEvent, - FetchClient, NetworkBuilder, -}; - #[cfg_attr(doc, aquamarine::aquamarine)] /// Manages the _entire_ state of the network. /// @@ -92,7 +90,7 @@ pub struct NetworkManager { event_sender: EventSender>>, /// Sender half to send events to the /// [`TransactionsManager`](crate::transactions::TransactionsManager) task, if configured. - to_transactions_manager: Option>, + to_transactions_manager: Option>>, /// Sender half to send events to the /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler) task, if configured. /// @@ -122,7 +120,7 @@ pub struct NetworkManager { impl NetworkManager { /// Sets the dedicated channel for events indented for the /// [`TransactionsManager`](crate::transactions::TransactionsManager). - pub fn set_transactions(&mut self, tx: mpsc::UnboundedSender) { + pub fn set_transactions(&mut self, tx: mpsc::UnboundedSender>) { self.to_transactions_manager = Some(UnboundedMeteredSender::new(tx, NETWORK_POOL_TRANSACTIONS_SCOPE)); } @@ -411,7 +409,7 @@ impl NetworkManager { /// Sends an event to the [`TransactionsManager`](crate::transactions::TransactionsManager) if /// configured. - fn notify_tx_manager(&self, event: NetworkTransactionEvent) { + fn notify_tx_manager(&self, event: NetworkTransactionEvent) { if let Some(ref tx) = self.to_transactions_manager { let _ = tx.send(event); } diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 3040577415c..199498b0b4c 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -3,11 +3,6 @@ //! An `RLPx` stream is multiplexed via the prepended message-id of a framed message. //! Capabilities are exchanged via the `RLPx` `Hello` message as pairs of `(id, version)`, -use std::{ - sync::Arc, - task::{ready, Context, Poll}, -}; - use alloy_consensus::BlockHeader; use alloy_primitives::{Bytes, B256}; use futures::FutureExt; @@ -19,7 +14,11 @@ use reth_eth_wire::{ }; use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; -use reth_primitives::{PooledTransactionsElement, ReceiptWithBloom}; +use reth_primitives::ReceiptWithBloom; +use std::{ + sync::Arc, + task::{ready, Context, Poll}, +}; use tokio::sync::oneshot; /// Internal form of a `NewBlock` message @@ -49,9 +48,9 @@ pub enum PeerMessage { /// Broadcast new block. NewBlock(NewBlockMessage), /// Received transactions _from_ the peer - ReceivedTransaction(Transactions), + ReceivedTransaction(Transactions), /// Broadcast transactions _from_ local _to_ a peer. - SendTransactions(SharedTransactions), + SendTransactions(SharedTransactions), /// Send new pooled transactions PooledTransactions(NewPooledTransactionHashes), /// All `eth` request variants. @@ -90,7 +89,7 @@ pub enum PeerResponse { /// Represents a response to a request for pooled transactions. PooledTransactions { /// The receiver channel for the response to a pooled transactions request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for `NodeData`. NodeData { @@ -147,7 +146,7 @@ pub enum PeerResponseResult { /// Represents a result containing block bodies or an error. BlockBodies(RequestResult>), /// Represents a result containing pooled transactions or an error. - PooledTransactions(RequestResult>), + PooledTransactions(RequestResult>), /// Represents a result containing node data or an error. NodeData(RequestResult>), /// Represents a result containing receipts or an error. diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 1715fa63e2f..eadeccb1549 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -1,11 +1,7 @@ -use std::{ - net::SocketAddr, - sync::{ - atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, - Arc, - }, +use crate::{ + config::NetworkMode, protocol::RlpxSubProtocol, swarm::NetworkConnectionState, + transactions::TransactionsHandle, FetchClient, }; - use alloy_primitives::B256; use enr::Enr; use parking_lot::Mutex; @@ -15,6 +11,7 @@ use reth_eth_wire::{ DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, NewBlock, NewPooledTransactionHashes, SharedTransactions, }; +use reth_ethereum_forks::Head; use reth_network_api::{ test_utils::{PeersHandle, PeersHandleProvider}, BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, @@ -24,20 +21,21 @@ use reth_network_api::{ use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::{PeerAddr, PeerKind, Reputation, ReputationChangeKind}; -use reth_primitives::{Head, TransactionSigned}; use reth_tokio_util::{EventSender, EventStream}; use secp256k1::SecretKey; +use std::{ + net::SocketAddr, + sync::{ + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, + Arc, + }, +}; use tokio::sync::{ mpsc::{self, UnboundedSender}, oneshot, }; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::{ - config::NetworkMode, protocol::RlpxSubProtocol, swarm::NetworkConnectionState, - transactions::TransactionsHandle, FetchClient, -}; - /// A _shareable_ network frontend. Used to interact with the network. /// /// See also [`NetworkManager`](crate::NetworkManager). @@ -131,7 +129,7 @@ impl NetworkHandle { } /// Send full transactions to the peer - pub fn send_transactions(&self, peer_id: PeerId, msg: Vec>) { + pub fn send_transactions(&self, peer_id: PeerId, msg: Vec>) { self.send_message(NetworkHandleMessage::SendTransaction { peer_id, msg: SharedTransactions(msg), @@ -141,7 +139,7 @@ impl NetworkHandle { /// Send message to get the [`TransactionsHandle`]. /// /// Returns `None` if no transaction task is installed. - pub async fn transactions_handle(&self) -> Option { + pub async fn transactions_handle(&self) -> Option> { let (tx, rx) = oneshot::channel(); let _ = self.manager().send(NetworkHandleMessage::GetTransactionsHandle(tx)); rx.await.unwrap() @@ -252,7 +250,7 @@ impl PeersInfo for NetworkHandle { } } -impl Peers for NetworkHandle { +impl Peers for NetworkHandle { fn add_trusted_peer_id(&self, peer: PeerId) { self.send_message(NetworkHandleMessage::AddTrustedPeerId(peer)); } @@ -467,7 +465,7 @@ pub(crate) enum NetworkHandleMessage, }, /// Sends a list of transaction hashes to the given peer. SendPooledTransactionHashes { @@ -505,7 +503,7 @@ pub(crate) enum NetworkHandleMessage>), /// Retrieves the `TransactionsHandle` via a oneshot sender. - GetTransactionsHandle(oneshot::Sender>), + GetTransactionsHandle(oneshot::Sender>>), /// Initiates a graceful shutdown of the network via a oneshot sender. Shutdown(oneshot::Sender<()>), /// Sets the network state between hibernation and active. diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index 4855ff5e743..d4b762e3e12 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -1,16 +1,13 @@ //! Peer related implementations -use std::{ - collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, - fmt::Display, - io::{self}, - net::{IpAddr, SocketAddr}, - task::{Context, Poll}, - time::Duration, +use crate::{ + error::SessionError, + session::{Direction, PendingSessionHandshakeError}, + swarm::NetworkConnectionState, }; - use futures::StreamExt; use reth_eth_wire::{errors::EthStreamError, DisconnectReason}; +use reth_ethereum_forks::ForkId; use reth_net_banlist::BanList; use reth_network_api::test_utils::{PeerCommand, PeersHandle}; use reth_network_peers::{NodeRecord, PeerId}; @@ -22,7 +19,14 @@ use reth_network_types::{ ConnectionsConfig, Peer, PeerAddr, PeerConnectionState, PeerKind, PeersConfig, ReputationChangeKind, ReputationChangeOutcome, ReputationChangeWeights, }; -use reth_primitives::ForkId; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + fmt::Display, + io::{self}, + net::{IpAddr, SocketAddr}, + task::{Context, Poll}, + time::Duration, +}; use thiserror::Error; use tokio::{ sync::mpsc, @@ -31,12 +35,6 @@ use tokio::{ use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{trace, warn}; -use crate::{ - error::SessionError, - session::{Direction, PendingSessionHandshakeError}, - swarm::NetworkConnectionState, -}; - /// Maintains the state of _all_ the peers known to the network. /// /// This is supposed to be owned by the network itself, but can be reached via the [`PeersHandle`]. diff --git a/crates/net/network/src/protocol.rs b/crates/net/network/src/protocol.rs index eeffd1c95f4..aa0749c2c7b 100644 --- a/crates/net/network/src/protocol.rs +++ b/crates/net/network/src/protocol.rs @@ -2,19 +2,18 @@ //! //! See also -use std::{ - fmt, - net::SocketAddr, - ops::{Deref, DerefMut}, - pin::Pin, -}; - use alloy_primitives::bytes::BytesMut; use futures::Stream; use reth_eth_wire::{ capability::SharedCapabilities, multiplex::ProtocolConnection, protocol::Protocol, }; use reth_network_api::{Direction, PeerId}; +use std::{ + fmt, + net::SocketAddr, + ops::{Deref, DerefMut}, + pin::Pin, +}; /// A trait that allows to offer additional RLPx-based application-level protocols when establishing /// a peer-to-peer connection. diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index f979a912cd4..76701f7e2ab 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -11,6 +11,14 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + message::{NewBlockMessage, PeerMessage, PeerResponse, PeerResponseResult}, + session::{ + conn::EthRlpxConnection, + handle::{ActiveSessionMessage, SessionCommand}, + SessionId, + }, +}; use alloy_primitives::Sealable; use futures::{stream::Fuse, SinkExt, StreamExt}; use metrics::Gauge; @@ -34,15 +42,6 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::PollSender; use tracing::{debug, trace}; -use crate::{ - message::{NewBlockMessage, PeerMessage, PeerResponse, PeerResponseResult}, - session::{ - conn::EthRlpxConnection, - handle::{ActiveSessionMessage, SessionCommand}, - SessionId, - }, -}; - // Constants for timeout updating. /// Minimum timeout value diff --git a/crates/net/network/src/session/conn.rs b/crates/net/network/src/session/conn.rs index 5329f01028b..45b83d1c487 100644 --- a/crates/net/network/src/session/conn.rs +++ b/crates/net/network/src/session/conn.rs @@ -1,10 +1,5 @@ //! Connection types for a session -use std::{ - pin::Pin, - task::{Context, Poll}, -}; - use futures::{Sink, Stream}; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ @@ -13,6 +8,10 @@ use reth_eth_wire::{ multiplex::{ProtocolProxy, RlpxSatelliteStream}, EthMessage, EthNetworkPrimitives, EthStream, EthVersion, NetworkPrimitives, P2PStream, }; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use tokio::net::TcpStream; /// The type of the underlying peer network connection. diff --git a/crates/net/network/src/session/counter.rs b/crates/net/network/src/session/counter.rs index 0d8f764f206..052cf1e2570 100644 --- a/crates/net/network/src/session/counter.rs +++ b/crates/net/network/src/session/counter.rs @@ -1,8 +1,7 @@ +use super::ExceedsSessionLimit; use reth_network_api::Direction; use reth_network_types::SessionLimits; -use super::ExceedsSessionLimit; - /// Keeps track of all sessions. #[derive(Debug, Clone)] pub struct SessionCounter { diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index f80428630d9..d167dc0e6ec 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -1,7 +1,10 @@ //! Session handles. -use std::{io, net::SocketAddr, sync::Arc, time::Instant}; - +use crate::{ + message::PeerMessage, + session::{conn::EthRlpxConnection, Direction, SessionId}, + PendingSessionHandshakeError, +}; use reth_ecies::ECIESError; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, @@ -10,17 +13,12 @@ use reth_eth_wire::{ use reth_network_api::PeerInfo; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::PeerKind; +use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::sync::{ mpsc::{self, error::SendError}, oneshot, }; -use crate::{ - message::PeerMessage, - session::{conn::EthRlpxConnection, Direction, SessionId}, - PendingSessionHandshakeError, -}; - /// A handler attached to a peer session that's not authenticated yet, pending Handshake and hello /// message which exchanges the `capabilities` of the peer. /// diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index a95f0e88910..a020c540e38 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -23,6 +23,12 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + message::PeerMessage, + metrics::SessionManagerMetrics, + protocol::{IntoRlpxSubProtocol, RlpxSubProtocolHandlers, RlpxSubProtocols}, + session::active::ActiveSession, +}; use counter::SessionCounter; use futures::{future::Either, io, FutureExt, StreamExt}; use reth_ecies::{stream::ECIESStream, ECIESError}; @@ -31,11 +37,11 @@ use reth_eth_wire::{ Capabilities, DisconnectReason, EthVersion, HelloMessageWithProtocols, NetworkPrimitives, Status, UnauthedEthStream, UnauthedP2PStream, }; +use reth_ethereum_forks::{ForkFilter, ForkId, ForkTransition, Head}; use reth_metrics::common::mpsc::MeteredPollSender; use reth_network_api::{PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use reth_network_types::SessionsConfig; -use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head}; use reth_tasks::TaskSpawner; use rustc_hash::FxHashMap; use secp256k1::SecretKey; @@ -48,13 +54,6 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::PollSender; use tracing::{debug, instrument, trace}; -use crate::{ - message::PeerMessage, - metrics::SessionManagerMetrics, - protocol::{IntoRlpxSubProtocol, RlpxSubProtocolHandlers, RlpxSubProtocols}, - session::active::ActiveSession, -}; - /// Internal identifier for active sessions. #[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq, Hash)] pub struct SessionId(usize); @@ -808,7 +807,7 @@ pub(crate) async fn pending_session_with_timeout( F: Future, { if tokio::time::timeout(timeout, f).await.is_err() { - debug!(target: "net::session", ?remote_addr, ?direction, "pending session timed out"); + trace!(target: "net::session", ?remote_addr, ?direction, "pending session timed out"); let event = PendingSessionEvent::Disconnected { remote_addr, session_id, diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index c51f115c52f..473c76c260f 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -1,17 +1,13 @@ //! Keeps track of the state of the network. -use std::{ - collections::{HashMap, VecDeque}, - fmt, - net::{IpAddr, SocketAddr}, - ops::Deref, - sync::{ - atomic::{AtomicU64, AtomicUsize}, - Arc, - }, - task::{Context, Poll}, +use crate::{ + cache::LruCache, + discovery::Discovery, + fetch::{BlockResponseOutcome, FetchAction, StateFetcher}, + message::{BlockRequest, NewBlockMessage, PeerResponse, PeerResponseResult}, + peers::{PeerAction, PeersManager}, + FetchClient, }; - use alloy_consensus::BlockHeader; use alloy_primitives::B256; use rand::seq::SliceRandom; @@ -19,23 +15,25 @@ use reth_eth_wire::{ BlockHashNumber, Capabilities, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, NewBlockHashes, Status, }; +use reth_ethereum_forks::ForkId; use reth_network_api::{DiscoveredEvent, DiscoveryEvent, PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use reth_network_types::{PeerAddr, PeerKind}; -use reth_primitives::ForkId; use reth_primitives_traits::Block; +use std::{ + collections::{HashMap, VecDeque}, + fmt, + net::{IpAddr, SocketAddr}, + ops::Deref, + sync::{ + atomic::{AtomicU64, AtomicUsize}, + Arc, + }, + task::{Context, Poll}, +}; use tokio::sync::oneshot; use tracing::{debug, trace}; -use crate::{ - cache::LruCache, - discovery::Discovery, - fetch::{BlockResponseOutcome, FetchAction, StateFetcher}, - message::{BlockRequest, NewBlockMessage, PeerResponse, PeerResponseResult}, - peers::{PeerAction, PeersManager}, - FetchClient, -}; - /// Cache limit of blocks to keep track of for a single peer. const PEER_BLOCK_CACHE_LIMIT: u32 = 512; diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 655934f207a..47447783f42 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -1,11 +1,11 @@ -use std::{ - io, - net::SocketAddr, - pin::Pin, - sync::Arc, - task::{Context, Poll}, +use crate::{ + listener::{ConnectionListener, ListenerEvent}, + message::PeerMessage, + peers::InboundConnectionError, + protocol::IntoRlpxSubProtocol, + session::{Direction, PendingSessionHandshakeError, SessionEvent, SessionId, SessionManager}, + state::{NetworkState, StateAction}, }; - use futures::Stream; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, @@ -13,16 +13,14 @@ use reth_eth_wire::{ }; use reth_network_api::{PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; -use tracing::trace; - -use crate::{ - listener::{ConnectionListener, ListenerEvent}, - message::PeerMessage, - peers::InboundConnectionError, - protocol::IntoRlpxSubProtocol, - session::{Direction, PendingSessionHandshakeError, SessionEvent, SessionId, SessionManager}, - state::{NetworkState, StateAction}, +use std::{ + io, + net::SocketAddr, + pin::Pin, + sync::Arc, + task::{Context, Poll}, }; +use tracing::trace; #[cfg_attr(doc, aquamarine::aquamarine)] /// Contains the connectivity related state of the network. diff --git a/crates/net/network/src/test_utils/init.rs b/crates/net/network/src/test_utils/init.rs index 767f6818091..87ccbb5f9d7 100644 --- a/crates/net/network/src/test_utils/init.rs +++ b/crates/net/network/src/test_utils/init.rs @@ -1,7 +1,6 @@ -use std::{net::SocketAddr, time::Duration}; - use enr::{k256::ecdsa::SigningKey, Enr, EnrPublicKey}; use reth_network_peers::PeerId; +use std::{net::SocketAddr, time::Duration}; /// The timeout for tests that create a `GethInstance` pub const GETH_TIMEOUT: Duration = Duration::from_secs(60); diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index d92272a871e..a64084f2cf9 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -1,13 +1,13 @@ //! A network implementation for testing purposes. -use std::{ - fmt, - future::Future, - net::{Ipv4Addr, SocketAddr, SocketAddrV4}, - pin::Pin, - task::{Context, Poll}, +use crate::{ + builder::ETH_REQUEST_CHANNEL_CAPACITY, + error::NetworkError, + eth_requests::EthRequestHandler, + protocol::IntoRlpxSubProtocol, + transactions::{TransactionsHandle, TransactionsManager, TransactionsManagerConfig}, + NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, }; - use futures::{FutureExt, StreamExt}; use pin_project::pin_project; use reth_chainspec::{Hardforks, MAINNET}; @@ -27,6 +27,13 @@ use reth_transaction_pool::{ EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, }; use secp256k1::SecretKey; +use std::{ + fmt, + future::Future, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + pin::Pin, + task::{Context, Poll}, +}; use tokio::{ sync::{ mpsc::{channel, unbounded_channel}, @@ -35,15 +42,6 @@ use tokio::{ task::JoinHandle, }; -use crate::{ - builder::ETH_REQUEST_CHANNEL_CAPACITY, - error::NetworkError, - eth_requests::EthRequestHandler, - protocol::IntoRlpxSubProtocol, - transactions::{TransactionsHandle, TransactionsManager, TransactionsManagerConfig}, - NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, -}; - /// A test network consisting of multiple peers. pub struct Testnet { /// All running peers in the network. diff --git a/crates/net/network/src/transactions/config.rs b/crates/net/network/src/transactions/config.rs index b838f7cfe71..db59ffac5cc 100644 --- a/crates/net/network/src/transactions/config.rs +++ b/crates/net/network/src/transactions/config.rs @@ -1,5 +1,3 @@ -use derive_more::Constructor; - use super::{ DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER, DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, @@ -9,6 +7,7 @@ use crate::transactions::constants::tx_fetcher::{ DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, }; +use derive_more::Constructor; /// Configuration for managing transactions within the network. #[derive(Debug, Clone)] diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 4c4119c85c0..0833f677409 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -25,13 +25,18 @@ //! before it's re-tried. Nonetheless, the capacity of the buffered hashes cache must be large //! enough to buffer many hashes during network failure, to allow for recovery. -use std::{ - collections::HashMap, - pin::Pin, - task::{ready, Context, Poll}, - time::Duration, +use super::{ + config::TransactionFetcherConfig, + constants::{tx_fetcher::*, SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST}, + MessageFilter, PeerMetadata, PooledTransactions, + SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, +}; +use crate::{ + cache::{LruCache, LruMap}, + duration_metered_exec, + metrics::TransactionFetcherMetrics, + transactions::{validation, PartiallyFilterMessage}, }; - use alloy_primitives::TxHash; use derive_more::{Constructor, Deref}; use futures::{stream::FuturesUnordered, Future, FutureExt, Stream, StreamExt}; @@ -47,23 +52,16 @@ use reth_primitives::PooledTransactionsElement; use schnellru::ByLength; #[cfg(debug_assertions)] use smallvec::{smallvec, SmallVec}; +use std::{ + collections::HashMap, + pin::Pin, + task::{ready, Context, Poll}, + time::Duration, +}; use tokio::sync::{mpsc::error::TrySendError, oneshot, oneshot::error::RecvError}; use tracing::{debug, trace}; use validation::FilterOutcome; -use super::{ - config::TransactionFetcherConfig, - constants::{tx_fetcher::*, SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST}, - MessageFilter, PeerMetadata, PooledTransactions, - SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, -}; -use crate::{ - cache::{LruCache, LruMap}, - duration_metered_exec, - metrics::TransactionFetcherMetrics, - transactions::{validation, PartiallyFilterMessage}, -}; - /// The type responsible for fetching missing transactions from peers. /// /// This will keep track of unique transaction hashes that are currently being fetched and submits diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 36abcd3d617..9628dbb4f1b 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -18,20 +18,19 @@ pub use validation::*; pub(crate) use fetcher::{FetchEvent, TransactionFetcher}; use self::constants::{tx_manager::*, DEFAULT_SOFT_LIMIT_BYTE_SIZE_TRANSACTIONS_BROADCAST_MESSAGE}; -use constants::SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE; - -use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, - pin::Pin, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, +use crate::{ + budget::{ + DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, + DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, + DEFAULT_BUDGET_TRY_DRAIN_STREAM, }, - task::{Context, Poll}, - time::{Duration, Instant}, + cache::LruCache, + duration_metered_exec, metered_poll_nested_stream_with_budget, + metrics::{TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, + NetworkHandle, }; - use alloy_primitives::{TxHash, B256}; +use constants::SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE; use futures::{stream::FuturesUnordered, Future, StreamExt}; use reth_eth_wire::{ DedupPayload, EthNetworkPrimitives, EthVersion, GetPooledTransactions, HandleMempoolData, @@ -49,29 +48,28 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{PooledTransactionsElement, TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{PooledTransactionsElement, TransactionSigned}; +use reth_primitives_traits::{SignedTransaction, TxType}; use reth_tokio_util::EventStream; use reth_transaction_pool::{ error::{PoolError, PoolResult}, GetPooledTransactionLimit, PoolTransaction, PropagateKind, PropagatedTransactions, TransactionPool, ValidPoolTransaction, }; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, + time::{Duration, Instant}, +}; use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; use tracing::{debug, trace}; -use crate::{ - budget::{ - DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - DEFAULT_BUDGET_TRY_DRAIN_STREAM, - }, - cache::LruCache, - duration_metered_exec, metered_poll_nested_stream_with_budget, - metrics::{TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, - NetworkHandle, -}; - /// The future for importing transactions into the pool. /// /// Resolves with the result of each transaction import. @@ -85,42 +83,26 @@ pub type PoolImportFuture = Pin>> /// For example [`TransactionsHandle::get_peer_transaction_hashes`] returns the transaction hashes /// known by a specific peer. #[derive(Debug, Clone)] -pub struct TransactionsHandle { +pub struct TransactionsHandle { /// Command channel to the [`TransactionsManager`] - manager_tx: mpsc::UnboundedSender, + manager_tx: mpsc::UnboundedSender>, } /// Implementation of the `TransactionsHandle` API for use in testnet via type /// [`PeerHandle`](crate::test_utils::PeerHandle). -impl TransactionsHandle { - fn send(&self, cmd: TransactionsCommand) { +impl TransactionsHandle { + fn send(&self, cmd: TransactionsCommand) { let _ = self.manager_tx.send(cmd); } /// Fetch the [`PeerRequestSender`] for the given peer. - async fn peer_handle(&self, peer_id: PeerId) -> Result, RecvError> { - let (tx, rx) = oneshot::channel(); - self.send(TransactionsCommand::GetPeerSender { peer_id, peer_request_sender: tx }); - rx.await - } - - /// Requests the transactions directly from the given peer. - /// - /// Returns `None` if the peer is not connected. - /// - /// **Note**: this returns the response from the peer as received. - pub async fn get_pooled_transactions_from( + async fn peer_handle( &self, peer_id: PeerId, - hashes: Vec, - ) -> Result>, RequestError> { - let Some(peer) = self.peer_handle(peer_id).await? else { return Ok(None) }; - + ) -> Result>>, RecvError> { let (tx, rx) = oneshot::channel(); - let request = PeerRequest::GetPooledTransactions { request: hashes.into(), response: tx }; - peer.try_send(request).ok(); - - rx.await?.map(|res| Some(res.0)) + self.send(TransactionsCommand::GetPeerSender { peer_id, peer_request_sender: tx }); + rx.await } /// Manually propagate the transaction that belongs to the hash. @@ -180,6 +162,25 @@ impl TransactionsHandle { let res = self.get_transaction_hashes(vec![peer]).await?; Ok(res.into_values().next().unwrap_or_default()) } + + /// Requests the transactions directly from the given peer. + /// + /// Returns `None` if the peer is not connected. + /// + /// **Note**: this returns the response from the peer as received. + pub async fn get_pooled_transactions_from( + &self, + peer_id: PeerId, + hashes: Vec, + ) -> Result>, RequestError> { + let Some(peer) = self.peer_handle(peer_id).await? else { return Ok(None) }; + + let (tx, rx) = oneshot::channel(); + let request = PeerRequest::GetPooledTransactions { request: hashes.into(), response: tx }; + peer.try_send(request).ok(); + + rx.await?.map(|res| Some(res.0)) + } } /// Manages transactions on top of the p2p network. @@ -238,12 +239,12 @@ pub struct TransactionsManager, + command_tx: mpsc::UnboundedSender>, /// Incoming commands from [`TransactionsHandle`]. /// /// This will only receive commands if a user manually sends a command to the manager through /// the [`TransactionsHandle`] to interact with this type directly. - command_rx: UnboundedReceiverStream, + command_rx: UnboundedReceiverStream>, /// A stream that yields new __pending__ transactions. /// /// A transaction is considered __pending__ if it is executable on the current state of the @@ -313,22 +314,106 @@ impl TransactionsManager { } } -// === impl TransactionsManager === - -impl TransactionsManager -where - Pool: TransactionPool, -{ +impl TransactionsManager { /// Returns a new handle that can send commands to this type. - pub fn handle(&self) -> TransactionsHandle { + pub fn handle(&self) -> TransactionsHandle { TransactionsHandle { manager_tx: self.command_tx.clone() } } -} -impl TransactionsManager -where - Pool: TransactionPool + 'static, -{ + /// Returns `true` if [`TransactionsManager`] has capacity to request pending hashes. Returns + /// `false` if [`TransactionsManager`] is operating close to full capacity. + fn has_capacity_for_fetching_pending_hashes(&self) -> bool { + self.pending_pool_imports_info + .has_capacity(self.pending_pool_imports_info.max_pending_pool_imports) && + self.transaction_fetcher.has_capacity_for_fetching_pending_hashes() + } + + fn report_peer_bad_transactions(&self, peer_id: PeerId) { + self.report_peer(peer_id, ReputationChangeKind::BadTransactions); + self.metrics.reported_bad_transactions.increment(1); + } + + fn report_peer(&self, peer_id: PeerId, kind: ReputationChangeKind) { + trace!(target: "net::tx", ?peer_id, ?kind, "reporting reputation change"); + self.network.reputation_change(peer_id, kind); + } + + fn report_already_seen(&self, peer_id: PeerId) { + trace!(target: "net::tx", ?peer_id, "Penalizing peer for already seen transaction"); + self.network.reputation_change(peer_id, ReputationChangeKind::AlreadySeenTransaction); + } + + /// Clear the transaction + fn on_good_import(&mut self, hash: TxHash) { + self.transactions_by_peers.remove(&hash); + } + + /// Penalize the peers that intentionally sent the bad transaction, and cache it to avoid + /// fetching or importing it again. + /// + /// Errors that count as bad transactions are: + /// + /// - intrinsic gas too low + /// - exceeds gas limit + /// - gas uint overflow + /// - exceeds max init code size + /// - oversized data + /// - signer account has bytecode + /// - chain id mismatch + /// - old legacy chain id + /// - tx type not supported + /// + /// (and additionally for blobs txns...) + /// + /// - no blobs + /// - too many blobs + /// - invalid kzg proof + /// - kzg error + /// - not blob transaction (tx type mismatch) + /// - wrong versioned kzg commitment hash + fn on_bad_import(&mut self, err: PoolError) { + let peers = self.transactions_by_peers.remove(&err.hash); + + // if we're _currently_ syncing, we ignore a bad transaction + if !err.is_bad_transaction() || self.network.is_syncing() { + return + } + // otherwise we penalize the peer that sent the bad transaction, with the assumption that + // the peer should have known that this transaction is bad (e.g. violating consensus rules) + if let Some(peers) = peers { + for peer_id in peers { + self.report_peer_bad_transactions(peer_id); + } + } + self.metrics.bad_imports.increment(1); + self.bad_imports.insert(err.hash); + } + + /// Runs an operation to fetch hashes that are cached in [`TransactionFetcher`]. + fn on_fetch_hashes_pending_fetch(&mut self) { + // try drain transaction hashes pending fetch + let info = &self.pending_pool_imports_info; + let max_pending_pool_imports = info.max_pending_pool_imports; + let has_capacity_wrt_pending_pool_imports = + |divisor| info.has_capacity(max_pending_pool_imports / divisor); + + self.transaction_fetcher + .on_fetch_pending_hashes(&self.peers, has_capacity_wrt_pending_pool_imports); + } + + fn on_request_error(&self, peer_id: PeerId, req_err: RequestError) { + let kind = match req_err { + RequestError::UnsupportedCapability => ReputationChangeKind::BadProtocol, + RequestError::Timeout => ReputationChangeKind::Timeout, + RequestError::ChannelClosed | RequestError::ConnectionDropped => { + // peer is already disconnected + return + } + RequestError::BadResponse => return self.report_peer_bad_transactions(peer_id), + }; + self.report_peer(peer_id, kind); + } + #[inline] fn update_poll_metrics(&self, start: Instant, poll_durations: TxManagerPollDurations) { let metrics = &self.metrics; @@ -354,50 +439,34 @@ where metrics.acc_duration_fetch_pending_hashes.set(acc_pending_fetch.as_secs_f64()); metrics.acc_duration_poll_commands.set(acc_cmds.as_secs_f64()); } +} - /// Request handler for an incoming request for transactions - fn on_get_pooled_transactions( - &mut self, - peer_id: PeerId, - request: GetPooledTransactions, - response: oneshot::Sender>, - ) { - if let Some(peer) = self.peers.get_mut(&peer_id) { - if self.network.tx_gossip_disabled() { - let _ = response.send(Ok(PooledTransactions::default())); - return +impl TransactionsManager +where + Pool: TransactionPool, + N: NetworkPrimitives, +{ + /// Processes a batch import results. + fn on_batch_import_result(&mut self, batch_results: Vec>) { + for res in batch_results { + match res { + Ok(hash) => { + self.on_good_import(hash); + } + Err(err) => { + self.on_bad_import(err); + } } - let transactions = self.pool.get_pooled_transaction_elements( - request.0, - GetPooledTransactionLimit::ResponseSizeSoftLimit( - self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, - ), - ); - - trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| *tx.hash()), "Sending requested transactions to peer"); - - // we sent a response at which point we assume that the peer is aware of the - // transactions - peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.hash())); - - let resp = PooledTransactions(transactions); - let _ = response.send(Ok(resp)); } } - /// Invoked when transactions in the local mempool are considered __pending__. - /// - /// When a transaction in the local mempool is moved to the pending pool, we propagate them to - /// connected peers over network using the `Transactions` and `NewPooledTransactionHashes` - /// messages. The Transactions message relays complete transaction objects and is typically - /// sent to a small, random fraction of connected peers. - /// - /// All other peers receive a notification of the transaction hash and can request the - /// complete transaction object if it is unknown to them. The dissemination of complete - /// transactions to a fraction of peers usually ensures that all nodes receive the transaction - /// and won't need to request it. - fn on_new_pending_transactions(&mut self, hashes: Vec) { - // Nothing to propagate while initially syncing + /// Request handler for an incoming `NewPooledTransactionHashes` + fn on_new_pooled_transaction_hashes( + &mut self, + peer_id: PeerId, + msg: NewPooledTransactionHashes, + ) { + // If the node is initially syncing, ignore transactions if self.network.is_initially_syncing() { return } @@ -405,113 +474,240 @@ where return } - trace!(target: "net::tx", num_hashes=?hashes.len(), "Start propagating transactions"); - - self.propagate_all(hashes); - } + // get handle to peer's session, if the session is still active + let Some(peer) = self.peers.get_mut(&peer_id) else { + trace!( + peer_id = format!("{peer_id:#}"), + ?msg, + "discarding announcement from inactive peer" + ); - /// Propagates the given transactions to the peers - /// - /// This fetches all transaction from the pool, including the 4844 blob transactions but - /// __without__ their sidecar, because 4844 transactions are only ever announced as hashes. - fn propagate_all(&mut self, hashes: Vec) { - let propagated = self.propagate_transactions( - self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), - PropagationMode::Basic, - ); + return + }; + let client = peer.client_version.clone(); - // notify pool so events get fired - self.pool.on_propagated(propagated); - } + // keep track of the transactions the peer knows + let mut count_txns_already_seen_by_peer = 0; + for tx in msg.iter_hashes().copied() { + if !peer.seen_transactions.insert(tx) { + count_txns_already_seen_by_peer += 1; + } + } + if count_txns_already_seen_by_peer > 0 { + // this may occur if transactions are sent or announced to a peer, at the same time as + // the peer sends/announces those hashes to us. this is because, marking + // txns as seen by a peer is done optimistically upon sending them to the + // peer. + self.metrics.messages_with_hashes_already_seen_by_peer.increment(1); + self.metrics + .occurrences_hash_already_seen_by_peer + .increment(count_txns_already_seen_by_peer); - /// Propagate the transactions to all connected peers either as full objects or hashes. - /// - /// The message for new pooled hashes depends on the negotiated version of the stream. - /// See [`NewPooledTransactionHashes`] - /// - /// Note: EIP-4844 are disallowed from being broadcast in full and are only ever sent as hashes, see also . - fn propagate_transactions( - &mut self, - to_propagate: Vec, - propagation_mode: PropagationMode, - ) -> PropagatedTransactions { - let mut propagated = PropagatedTransactions::default(); - if self.network.tx_gossip_disabled() { - return propagated + trace!(target: "net::tx", + %count_txns_already_seen_by_peer, + peer_id=format!("{peer_id:#}"), + ?client, + "Peer sent hashes that have already been marked as seen by peer" + ); + + self.report_already_seen(peer_id); } - // send full transactions to a set of the connected peers based on the configured mode - let max_num_full = self.config.propagation_mode.full_peer_count(self.peers.len()); + // 1. filter out spam + let (validation_outcome, mut partially_valid_msg) = + self.transaction_fetcher.filter_valid_message.partially_filter_valid_entries(msg); - // Note: Assuming ~random~ order due to random state of the peers map hasher - for (peer_idx, (peer_id, peer)) in self.peers.iter_mut().enumerate() { - // determine whether to send full tx objects or hashes. - let mut builder = if peer_idx > max_num_full { - PropagateTransactionsBuilder::pooled(peer.version) - } else { - PropagateTransactionsBuilder::full(peer.version) - }; + if validation_outcome == FilterOutcome::ReportPeer { + self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); + } - if propagation_mode.is_forced() { - builder.extend(to_propagate.iter()); - } else { - // Iterate through the transactions to propagate and fill the hashes and full - // transaction lists, before deciding whether or not to send full transactions to - // the peer. - for tx in &to_propagate { - // Only proceed if the transaction is not in the peer's list of seen - // transactions - if !peer.seen_transactions.contains(&tx.hash()) { - builder.push(tx); - } - } - } + // 2. filter out transactions pending import to pool + partially_valid_msg.retain_by_hash(|hash| !self.transactions_by_peers.contains_key(hash)); - if builder.is_empty() { - trace!(target: "net::tx", ?peer_id, "Nothing to propagate to peer; has seen all transactions"); - continue - } + // 3. filter out known hashes + // + // known txns have already been successfully fetched or received over gossip. + // + // most hashes will be filtered out here since this the mempool protocol is a gossip + // protocol, healthy peers will send many of the same hashes. + // + let hashes_count_pre_pool_filter = partially_valid_msg.len(); + self.pool.retain_unknown(&mut partially_valid_msg); + if hashes_count_pre_pool_filter > partially_valid_msg.len() { + let already_known_hashes_count = + hashes_count_pre_pool_filter - partially_valid_msg.len(); + self.metrics + .occurrences_hashes_already_in_pool + .increment(already_known_hashes_count as u64); + } - let PropagateTransactions { pooled, full } = builder.build(); + if partially_valid_msg.is_empty() { + // nothing to request + return + } - // send hashes if any - if let Some(mut new_pooled_hashes) = pooled { - // enforce tx soft limit per message for the (unlikely) event the number of - // hashes exceeds it - new_pooled_hashes - .truncate(SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE); + // 4. filter out invalid entries (spam) + // + // validates messages with respect to the given network, e.g. allowed tx types + // + let (validation_outcome, mut valid_announcement_data) = if partially_valid_msg + .msg_version() + .expect("partially valid announcement should have version") + .is_eth68() + { + // validate eth68 announcement data + self.transaction_fetcher + .filter_valid_message + .filter_valid_entries_68(partially_valid_msg) + } else { + // validate eth66 announcement data + self.transaction_fetcher + .filter_valid_message + .filter_valid_entries_66(partially_valid_msg) + }; - for hash in new_pooled_hashes.iter_hashes().copied() { - propagated.0.entry(hash).or_default().push(PropagateKind::Hash(*peer_id)); - // mark transaction as seen by peer - peer.seen_transactions.insert(hash); - } + if validation_outcome == FilterOutcome::ReportPeer { + self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); + } - trace!(target: "net::tx", ?peer_id, num_txs=?new_pooled_hashes.len(), "Propagating tx hashes to peer"); + if valid_announcement_data.is_empty() { + // no valid announcement data + return + } - // send hashes of transactions - self.network.send_transactions_hashes(*peer_id, new_pooled_hashes); - } + // 5. filter out already seen unknown hashes + // + // seen hashes are already in the tx fetcher, pending fetch. + // + // for any seen hashes add the peer as fallback. unseen hashes are loaded into the tx + // fetcher, hence they should be valid at this point. + let bad_imports = &self.bad_imports; + self.transaction_fetcher.filter_unseen_and_pending_hashes( + &mut valid_announcement_data, + |hash| bad_imports.contains(hash), + &peer_id, + |peer_id| self.peers.contains_key(&peer_id), + &client, + ); - // send full transactions, if any - if let Some(new_full_transactions) = full { - for tx in &new_full_transactions { - propagated.0.entry(tx.hash()).or_default().push(PropagateKind::Full(*peer_id)); - // mark transaction as seen by peer - peer.seen_transactions.insert(tx.hash()); - } + if valid_announcement_data.is_empty() { + // nothing to request + return + } - trace!(target: "net::tx", ?peer_id, num_txs=?new_full_transactions.len(), "Propagating full transactions to peer"); + trace!(target: "net::tx::propagation", + peer_id=format!("{peer_id:#}"), + hashes_len=valid_announcement_data.iter().count(), + hashes=?valid_announcement_data.keys().collect::>(), + msg_version=%valid_announcement_data.msg_version(), + client_version=%client, + "received previously unseen and pending hashes in announcement from peer" + ); - // send full transactions - self.network.send_transactions(*peer_id, new_full_transactions); - } + // only send request for hashes to idle peer, otherwise buffer hashes storing peer as + // fallback + if !self.transaction_fetcher.is_idle(&peer_id) { + // load message version before announcement data is destructed in packing + let msg_version = valid_announcement_data.msg_version(); + let (hashes, _version) = valid_announcement_data.into_request_hashes(); + + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + hashes=?*hashes, + %msg_version, + %client, + "buffering hashes announced by busy peer" + ); + + self.transaction_fetcher.buffer_hashes(hashes, Some(peer_id)); + + return } - // Update propagated transactions metrics - self.metrics.propagated_transactions.increment(propagated.0.len() as u64); + // load message version before announcement data type is destructed in packing + let msg_version = valid_announcement_data.msg_version(); + // + // demand recommended soft limit on response, however the peer may enforce an arbitrary + // limit on the response (2MB) + // + // request buffer is shrunk via call to pack request! + let init_capacity_req = + self.transaction_fetcher.approx_capacity_get_pooled_transactions_req(msg_version); + let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req); + let surplus_hashes = + self.transaction_fetcher.pack_request(&mut hashes_to_request, valid_announcement_data); - propagated + if !surplus_hashes.is_empty() { + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + surplus_hashes=?*surplus_hashes, + %msg_version, + %client, + "some hashes in announcement from peer didn't fit in `GetPooledTransactions` request, buffering surplus hashes" + ); + + self.transaction_fetcher.buffer_hashes(surplus_hashes, Some(peer_id)); + } + + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + hashes=?*hashes_to_request, + %msg_version, + %client, + "sending hashes in `GetPooledTransactions` request to peer's session" + ); + + // request the missing transactions + // + // get handle to peer's session again, at this point we know it exists + let Some(peer) = self.peers.get_mut(&peer_id) else { return }; + if let Some(failed_to_request_hashes) = + self.transaction_fetcher.request_transactions_from_peer(hashes_to_request, peer) + { + let conn_eth_version = peer.version; + + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + failed_to_request_hashes=?*failed_to_request_hashes, + %conn_eth_version, + %client, + "sending `GetPooledTransactions` request to peer's session failed, buffering hashes" + ); + self.transaction_fetcher.buffer_hashes(failed_to_request_hashes, Some(peer_id)); + } + } +} + +impl TransactionsManager +where + Pool: TransactionPool, + N: NetworkPrimitives, + <::Transaction as PoolTransaction>::Consensus: + Into, +{ + /// Invoked when transactions in the local mempool are considered __pending__. + /// + /// When a transaction in the local mempool is moved to the pending pool, we propagate them to + /// connected peers over network using the `Transactions` and `NewPooledTransactionHashes` + /// messages. The Transactions message relays complete transaction objects and is typically + /// sent to a small, random fraction of connected peers. + /// + /// All other peers receive a notification of the transaction hash and can request the + /// complete transaction object if it is unknown to them. The dissemination of complete + /// transactions to a fraction of peers usually ensures that all nodes receive the transaction + /// and won't need to request it. + fn on_new_pending_transactions(&mut self, hashes: Vec) { + // Nothing to propagate while initially syncing + if self.network.is_initially_syncing() { + return + } + if self.network.tx_gossip_disabled() { + return + } + + trace!(target: "net::tx", num_hashes=?hashes.len(), "Start propagating transactions"); + + self.propagate_all(hashes); } /// Propagate the full transactions to a specific peer. @@ -540,7 +736,7 @@ where // Iterate through the transactions to propagate and fill the hashes and full // transaction for tx in to_propagate { - if !peer.seen_transactions.contains(&tx.hash()) { + if !peer.seen_transactions.contains(tx.tx_hash()) { // Only include if the peer hasn't seen the transaction full_transactions.push(&tx); } @@ -569,9 +765,9 @@ where // send full transactions, if any if let Some(new_full_transactions) = full { for tx in &new_full_transactions { - propagated.0.entry(tx.hash()).or_default().push(PropagateKind::Full(peer_id)); + propagated.0.entry(*tx.tx_hash()).or_default().push(PropagateKind::Full(peer_id)); // mark transaction as seen by peer - peer.seen_transactions.insert(tx.hash()); + peer.seen_transactions.insert(*tx.tx_hash()); } // send full transactions @@ -603,8 +799,12 @@ where return }; - let to_propagate: Vec = - self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(); + let to_propagate = self + .pool + .get_all(hashes) + .into_iter() + .map(PropagateTransaction::new) + .collect::>(); let mut propagated = PropagatedTransactions::default(); @@ -615,7 +815,7 @@ where hashes.extend(to_propagate) } else { for tx in to_propagate { - if !peer.seen_transactions.contains(&tx.hash()) { + if !peer.seen_transactions.contains(tx.tx_hash()) { // Include if the peer hasn't seen it hashes.push(&tx); } @@ -648,220 +848,147 @@ where self.pool.on_propagated(propagated); } - /// Request handler for an incoming `NewPooledTransactionHashes` - fn on_new_pooled_transaction_hashes( + /// Propagate the transactions to all connected peers either as full objects or hashes. + /// + /// The message for new pooled hashes depends on the negotiated version of the stream. + /// See [`NewPooledTransactionHashes`] + /// + /// Note: EIP-4844 are disallowed from being broadcast in full and are only ever sent as hashes, see also . + fn propagate_transactions( &mut self, - peer_id: PeerId, - msg: NewPooledTransactionHashes, - ) { - // If the node is initially syncing, ignore transactions - if self.network.is_initially_syncing() { - return - } + to_propagate: Vec>, + propagation_mode: PropagationMode, + ) -> PropagatedTransactions { + let mut propagated = PropagatedTransactions::default(); if self.network.tx_gossip_disabled() { - return + return propagated } - // get handle to peer's session, if the session is still active - let Some(peer) = self.peers.get_mut(&peer_id) else { - trace!( - peer_id = format!("{peer_id:#}"), - ?msg, - "discarding announcement from inactive peer" - ); + // send full transactions to a set of the connected peers based on the configured mode + let max_num_full = self.config.propagation_mode.full_peer_count(self.peers.len()); - return - }; - let client = peer.client_version.clone(); + // Note: Assuming ~random~ order due to random state of the peers map hasher + for (peer_idx, (peer_id, peer)) in self.peers.iter_mut().enumerate() { + // determine whether to send full tx objects or hashes. + let mut builder = if peer_idx > max_num_full { + PropagateTransactionsBuilder::pooled(peer.version) + } else { + PropagateTransactionsBuilder::full(peer.version) + }; - // keep track of the transactions the peer knows - let mut count_txns_already_seen_by_peer = 0; - for tx in msg.iter_hashes().copied() { - if !peer.seen_transactions.insert(tx) { - count_txns_already_seen_by_peer += 1; + if propagation_mode.is_forced() { + builder.extend(to_propagate.iter()); + } else { + // Iterate through the transactions to propagate and fill the hashes and full + // transaction lists, before deciding whether or not to send full transactions to + // the peer. + for tx in &to_propagate { + // Only proceed if the transaction is not in the peer's list of seen + // transactions + if !peer.seen_transactions.contains(tx.tx_hash()) { + builder.push(tx); + } + } } - } - if count_txns_already_seen_by_peer > 0 { - // this may occur if transactions are sent or announced to a peer, at the same time as - // the peer sends/announces those hashes to us. this is because, marking - // txns as seen by a peer is done optimistically upon sending them to the - // peer. - self.metrics.messages_with_hashes_already_seen_by_peer.increment(1); - self.metrics - .occurrences_hash_already_seen_by_peer - .increment(count_txns_already_seen_by_peer); - trace!(target: "net::tx", - %count_txns_already_seen_by_peer, - peer_id=format!("{peer_id:#}"), - ?client, - "Peer sent hashes that have already been marked as seen by peer" - ); - - self.report_already_seen(peer_id); - } - - // 1. filter out spam - let (validation_outcome, mut partially_valid_msg) = - self.transaction_fetcher.filter_valid_message.partially_filter_valid_entries(msg); - - if validation_outcome == FilterOutcome::ReportPeer { - self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); - } - - // 2. filter out transactions pending import to pool - partially_valid_msg.retain_by_hash(|hash| !self.transactions_by_peers.contains_key(hash)); - - // 3. filter out known hashes - // - // known txns have already been successfully fetched or received over gossip. - // - // most hashes will be filtered out here since this the mempool protocol is a gossip - // protocol, healthy peers will send many of the same hashes. - // - let hashes_count_pre_pool_filter = partially_valid_msg.len(); - self.pool.retain_unknown(&mut partially_valid_msg); - if hashes_count_pre_pool_filter > partially_valid_msg.len() { - let already_known_hashes_count = - hashes_count_pre_pool_filter - partially_valid_msg.len(); - self.metrics - .occurrences_hashes_already_in_pool - .increment(already_known_hashes_count as u64); - } - - if partially_valid_msg.is_empty() { - // nothing to request - return - } - - // 4. filter out invalid entries (spam) - // - // validates messages with respect to the given network, e.g. allowed tx types - // - let (validation_outcome, mut valid_announcement_data) = if partially_valid_msg - .msg_version() - .expect("partially valid announcement should have version") - .is_eth68() - { - // validate eth68 announcement data - self.transaction_fetcher - .filter_valid_message - .filter_valid_entries_68(partially_valid_msg) - } else { - // validate eth66 announcement data - self.transaction_fetcher - .filter_valid_message - .filter_valid_entries_66(partially_valid_msg) - }; - - if validation_outcome == FilterOutcome::ReportPeer { - self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); - } - - if valid_announcement_data.is_empty() { - // no valid announcement data - return - } - - // 5. filter out already seen unknown hashes - // - // seen hashes are already in the tx fetcher, pending fetch. - // - // for any seen hashes add the peer as fallback. unseen hashes are loaded into the tx - // fetcher, hence they should be valid at this point. - let bad_imports = &self.bad_imports; - self.transaction_fetcher.filter_unseen_and_pending_hashes( - &mut valid_announcement_data, - |hash| bad_imports.contains(hash), - &peer_id, - |peer_id| self.peers.contains_key(&peer_id), - &client, - ); - - if valid_announcement_data.is_empty() { - // nothing to request - return - } - - trace!(target: "net::tx::propagation", - peer_id=format!("{peer_id:#}"), - hashes_len=valid_announcement_data.iter().count(), - hashes=?valid_announcement_data.keys().collect::>(), - msg_version=%valid_announcement_data.msg_version(), - client_version=%client, - "received previously unseen and pending hashes in announcement from peer" - ); + if builder.is_empty() { + trace!(target: "net::tx", ?peer_id, "Nothing to propagate to peer; has seen all transactions"); + continue + } - // only send request for hashes to idle peer, otherwise buffer hashes storing peer as - // fallback - if !self.transaction_fetcher.is_idle(&peer_id) { - // load message version before announcement data is destructed in packing - let msg_version = valid_announcement_data.msg_version(); - let (hashes, _version) = valid_announcement_data.into_request_hashes(); + let PropagateTransactions { pooled, full } = builder.build(); - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - hashes=?*hashes, - %msg_version, - %client, - "buffering hashes announced by busy peer" - ); + // send hashes if any + if let Some(mut new_pooled_hashes) = pooled { + // enforce tx soft limit per message for the (unlikely) event the number of + // hashes exceeds it + new_pooled_hashes + .truncate(SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE); - self.transaction_fetcher.buffer_hashes(hashes, Some(peer_id)); + for hash in new_pooled_hashes.iter_hashes().copied() { + propagated.0.entry(hash).or_default().push(PropagateKind::Hash(*peer_id)); + // mark transaction as seen by peer + peer.seen_transactions.insert(hash); + } - return - } + trace!(target: "net::tx", ?peer_id, num_txs=?new_pooled_hashes.len(), "Propagating tx hashes to peer"); - // load message version before announcement data type is destructed in packing - let msg_version = valid_announcement_data.msg_version(); - // - // demand recommended soft limit on response, however the peer may enforce an arbitrary - // limit on the response (2MB) - // - // request buffer is shrunk via call to pack request! - let init_capacity_req = - self.transaction_fetcher.approx_capacity_get_pooled_transactions_req(msg_version); - let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req); - let surplus_hashes = - self.transaction_fetcher.pack_request(&mut hashes_to_request, valid_announcement_data); + // send hashes of transactions + self.network.send_transactions_hashes(*peer_id, new_pooled_hashes); + } - if !surplus_hashes.is_empty() { - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - surplus_hashes=?*surplus_hashes, - %msg_version, - %client, - "some hashes in announcement from peer didn't fit in `GetPooledTransactions` request, buffering surplus hashes" - ); + // send full transactions, if any + if let Some(new_full_transactions) = full { + for tx in &new_full_transactions { + propagated + .0 + .entry(*tx.tx_hash()) + .or_default() + .push(PropagateKind::Full(*peer_id)); + // mark transaction as seen by peer + peer.seen_transactions.insert(*tx.tx_hash()); + } - self.transaction_fetcher.buffer_hashes(surplus_hashes, Some(peer_id)); + trace!(target: "net::tx", ?peer_id, num_txs=?new_full_transactions.len(), "Propagating full transactions to peer"); + + // send full transactions + self.network.send_transactions(*peer_id, new_full_transactions); + } } - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - hashes=?*hashes_to_request, - %msg_version, - %client, - "sending hashes in `GetPooledTransactions` request to peer's session" + // Update propagated transactions metrics + self.metrics.propagated_transactions.increment(propagated.0.len() as u64); + + propagated + } + + /// Propagates the given transactions to the peers + /// + /// This fetches all transaction from the pool, including the 4844 blob transactions but + /// __without__ their sidecar, because 4844 transactions are only ever announced as hashes. + fn propagate_all(&mut self, hashes: Vec) { + let propagated = self.propagate_transactions( + self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), + PropagationMode::Basic, ); - // request the missing transactions - // - // get handle to peer's session again, at this point we know it exists - let Some(peer) = self.peers.get_mut(&peer_id) else { return }; - if let Some(failed_to_request_hashes) = - self.transaction_fetcher.request_transactions_from_peer(hashes_to_request, peer) - { - let conn_eth_version = peer.version; + // notify pool so events get fired + self.pool.on_propagated(propagated); + } +} - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - failed_to_request_hashes=?*failed_to_request_hashes, - %conn_eth_version, - %client, - "sending `GetPooledTransactions` request to peer's session failed, buffering hashes" +impl TransactionsManager +where + Pool: TransactionPool + 'static, + <::Transaction as PoolTransaction>::Consensus: Into, +{ + /// Request handler for an incoming request for transactions + fn on_get_pooled_transactions( + &mut self, + peer_id: PeerId, + request: GetPooledTransactions, + response: oneshot::Sender>, + ) { + if let Some(peer) = self.peers.get_mut(&peer_id) { + if self.network.tx_gossip_disabled() { + let _ = response.send(Ok(PooledTransactions::default())); + return + } + let transactions = self.pool.get_pooled_transaction_elements( + request.0, + GetPooledTransactionLimit::ResponseSizeSoftLimit( + self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, + ), ); - self.transaction_fetcher.buffer_hashes(failed_to_request_hashes, Some(peer_id)); + + trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| *tx.hash()), "Sending requested transactions to peer"); + + // we sent a response at which point we assume that the peer is aware of the + // transactions + peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.hash())); + + let resp = PooledTransactions(transactions); + let _ = response.send(Ok(resp)); } } @@ -1136,20 +1263,6 @@ where } } - /// Processes a batch import results. - fn on_batch_import_result(&mut self, batch_results: Vec>) { - for res in batch_results { - match res { - Ok(hash) => { - self.on_good_import(hash); - } - Err(err) => { - self.on_bad_import(err); - } - } - } - } - /// Processes a [`FetchEvent`]. fn on_fetch_event(&mut self, fetch_event: FetchEvent) { match fetch_event { @@ -1165,100 +1278,6 @@ where } } } - - /// Runs an operation to fetch hashes that are cached in [`TransactionFetcher`]. - fn on_fetch_hashes_pending_fetch(&mut self) { - // try drain transaction hashes pending fetch - let info = &self.pending_pool_imports_info; - let max_pending_pool_imports = info.max_pending_pool_imports; - let has_capacity_wrt_pending_pool_imports = - |divisor| info.has_capacity(max_pending_pool_imports / divisor); - - self.transaction_fetcher - .on_fetch_pending_hashes(&self.peers, has_capacity_wrt_pending_pool_imports); - } - - fn report_peer_bad_transactions(&self, peer_id: PeerId) { - self.report_peer(peer_id, ReputationChangeKind::BadTransactions); - self.metrics.reported_bad_transactions.increment(1); - } - - fn report_peer(&self, peer_id: PeerId, kind: ReputationChangeKind) { - trace!(target: "net::tx", ?peer_id, ?kind, "reporting reputation change"); - self.network.reputation_change(peer_id, kind); - } - - fn on_request_error(&self, peer_id: PeerId, req_err: RequestError) { - let kind = match req_err { - RequestError::UnsupportedCapability => ReputationChangeKind::BadProtocol, - RequestError::Timeout => ReputationChangeKind::Timeout, - RequestError::ChannelClosed | RequestError::ConnectionDropped => { - // peer is already disconnected - return - } - RequestError::BadResponse => return self.report_peer_bad_transactions(peer_id), - }; - self.report_peer(peer_id, kind); - } - - fn report_already_seen(&self, peer_id: PeerId) { - trace!(target: "net::tx", ?peer_id, "Penalizing peer for already seen transaction"); - self.network.reputation_change(peer_id, ReputationChangeKind::AlreadySeenTransaction); - } - - /// Clear the transaction - fn on_good_import(&mut self, hash: TxHash) { - self.transactions_by_peers.remove(&hash); - } - - /// Penalize the peers that intentionally sent the bad transaction, and cache it to avoid - /// fetching or importing it again. - /// - /// Errors that count as bad transactions are: - /// - /// - intrinsic gas too low - /// - exceeds gas limit - /// - gas uint overflow - /// - exceeds max init code size - /// - oversized data - /// - signer account has bytecode - /// - chain id mismatch - /// - old legacy chain id - /// - tx type not supported - /// - /// (and additionally for blobs txns...) - /// - /// - no blobs - /// - too many blobs - /// - invalid kzg proof - /// - kzg error - /// - not blob transaction (tx type mismatch) - /// - wrong versioned kzg commitment hash - fn on_bad_import(&mut self, err: PoolError) { - let peers = self.transactions_by_peers.remove(&err.hash); - - // if we're _currently_ syncing, we ignore a bad transaction - if !err.is_bad_transaction() || self.network.is_syncing() { - return - } - // otherwise we penalize the peer that sent the bad transaction, with the assumption that - // the peer should have known that this transaction is bad (e.g. violating consensus rules) - if let Some(peers) = peers { - for peer_id in peers { - self.report_peer_bad_transactions(peer_id); - } - } - self.metrics.bad_imports.increment(1); - self.bad_imports.insert(err.hash); - } - - /// Returns `true` if [`TransactionsManager`] has capacity to request pending hashes. Returns - /// `false` if [`TransactionsManager`] is operating close to full capacity. - fn has_capacity_for_fetching_pending_hashes(&self) -> bool { - self.pending_pool_imports_info - .has_capacity(self.pending_pool_imports_info.max_pending_pool_imports) && - self.transaction_fetcher.has_capacity_for_fetching_pending_hashes() - } } /// An endless future. Preemption ensure that future is non-blocking, nonetheless. See @@ -1271,6 +1290,7 @@ where impl Future for TransactionsManager where Pool: TransactionPool + Unpin + 'static, + <::Transaction as PoolTransaction>::Consensus: Into, { type Output = (); @@ -1449,40 +1469,37 @@ impl PropagationMode { /// A transaction that's about to be propagated to multiple peers. #[derive(Debug, Clone)] -struct PropagateTransaction { +struct PropagateTransaction { size: usize, - transaction: Arc, + transaction: Arc, } -// === impl PropagateTransaction === - -impl PropagateTransaction { - fn hash(&self) -> TxHash { - self.transaction.hash() - } - +impl PropagateTransaction { /// Create a new instance from a pooled transaction - fn new(tx: Arc>) -> Self + fn new

(tx: Arc>) -> Self where - T: PoolTransaction>, + P: PoolTransaction>, { let size = tx.encoded_length(); - let recovered: TransactionSignedEcRecovered = - tx.transaction.clone().into_consensus().into(); - let transaction = Arc::new(recovered.into_signed()); + let transaction = tx.transaction.clone().into_consensus().into(); + let transaction = Arc::new(transaction); Self { size, transaction } } + + fn tx_hash(&self) -> &TxHash { + self.transaction.tx_hash() + } } /// Helper type to construct the appropriate message to send to the peer based on whether the peer /// should receive them in full or as pooled #[derive(Debug, Clone)] -enum PropagateTransactionsBuilder { +enum PropagateTransactionsBuilder { Pooled(PooledTransactionsHashesBuilder), - Full(FullTransactionsBuilder), + Full(FullTransactionsBuilder), } -impl PropagateTransactionsBuilder { +impl PropagateTransactionsBuilder { /// Create a builder for pooled transactions fn pooled(version: EthVersion) -> Self { Self::Pooled(PooledTransactionsHashesBuilder::new(version)) @@ -1493,21 +1510,6 @@ impl PropagateTransactionsBuilder { Self::Full(FullTransactionsBuilder::new(version)) } - /// Appends all transactions - fn extend<'a>(&mut self, txs: impl IntoIterator) { - for tx in txs { - self.push(tx); - } - } - - /// Appends a transaction to the list. - fn push(&mut self, transaction: &PropagateTransaction) { - match self { - Self::Pooled(builder) => builder.push(transaction), - Self::Full(builder) => builder.push(transaction), - } - } - /// Returns true if no transactions are recorded. fn is_empty(&self) -> bool { match self { @@ -1517,7 +1519,7 @@ impl PropagateTransactionsBuilder { } /// Consumes the type and returns the built messages that should be sent to the peer. - fn build(self) -> PropagateTransactions { + fn build(self) -> PropagateTransactions { match self { Self::Pooled(pooled) => { PropagateTransactions { pooled: Some(pooled.build()), full: None } @@ -1527,12 +1529,29 @@ impl PropagateTransactionsBuilder { } } +impl PropagateTransactionsBuilder { + /// Appends all transactions + fn extend<'a>(&mut self, txs: impl IntoIterator>) { + for tx in txs { + self.push(tx); + } + } + + /// Appends a transaction to the list. + fn push(&mut self, transaction: &PropagateTransaction) { + match self { + Self::Pooled(builder) => builder.push(transaction), + Self::Full(builder) => builder.push(transaction), + } + } +} + /// Represents how the transactions should be sent to a peer if any. -struct PropagateTransactions { +struct PropagateTransactions { /// The pooled transaction hashes to send. pooled: Option, /// The transactions to send in full. - full: Option>>, + full: Option>>, } /// Helper type for constructing the full transaction message that enforces the @@ -1540,18 +1559,16 @@ struct PropagateTransactions { /// and enforces other propagation rules for EIP-4844 and tracks those transactions that can't be /// broadcasted in full. #[derive(Debug, Clone)] -struct FullTransactionsBuilder { +struct FullTransactionsBuilder { /// The soft limit to enforce for a single broadcast message of full transactions. total_size: usize, /// All transactions to be broadcasted. - transactions: Vec>, + transactions: Vec>, /// Transactions that didn't fit into the broadcast message pooled: PooledTransactionsHashesBuilder, } -// === impl FullTransactionsBuilder === - -impl FullTransactionsBuilder { +impl FullTransactionsBuilder { /// Create a builder for the negotiated version of the peer's session fn new(version: EthVersion) -> Self { Self { @@ -1561,8 +1578,22 @@ impl FullTransactionsBuilder { } } + /// Returns whether or not any transactions are in the [`FullTransactionsBuilder`]. + fn is_empty(&self) -> bool { + self.transactions.is_empty() && self.pooled.is_empty() + } + + /// Returns the messages that should be propagated to the peer. + fn build(self) -> PropagateTransactions { + let pooled = Some(self.pooled.build()).filter(|pooled| !pooled.is_empty()); + let full = Some(self.transactions).filter(|full| !full.is_empty()); + PropagateTransactions { pooled, full } + } +} + +impl FullTransactionsBuilder { /// Appends all transactions. - fn extend(&mut self, txs: impl IntoIterator) { + fn extend(&mut self, txs: impl IntoIterator>) { for tx in txs { self.push(&tx) } @@ -1576,7 +1607,8 @@ impl FullTransactionsBuilder { /// /// If the transaction is unsuitable for broadcast or would exceed the softlimit, it is appended /// to list of pooled transactions, (e.g. 4844 transactions). - fn push(&mut self, transaction: &PropagateTransaction) { + /// See also [`TxType::is_broadcastable_in_full`]. + fn push(&mut self, transaction: &PropagateTransaction) { // Do not send full 4844 transaction hashes to peers. // // Nodes MUST NOT automatically broadcast blob transactions to their peers. @@ -1585,7 +1617,7 @@ impl FullTransactionsBuilder { // via `GetPooledTransactions`. // // From: - if transaction.transaction.is_eip4844() { + if !transaction.transaction.tx_type().is_broadcastable_in_full() { self.pooled.push(transaction); return } @@ -1602,18 +1634,6 @@ impl FullTransactionsBuilder { self.total_size = new_size; self.transactions.push(Arc::clone(&transaction.transaction)); } - - /// Returns whether or not any transactions are in the [`FullTransactionsBuilder`]. - fn is_empty(&self) -> bool { - self.transactions.is_empty() && self.pooled.is_empty() - } - - /// Returns the messages that should be propagated to the peer. - fn build(self) -> PropagateTransactions { - let pooled = Some(self.pooled.build()).filter(|pooled| !pooled.is_empty()); - let full = Some(self.transactions).filter(|full| !full.is_empty()); - PropagateTransactions { pooled, full } - } } /// A helper type to create the pooled transactions message based on the negotiated version of the @@ -1648,17 +1668,20 @@ impl PooledTransactionsHashesBuilder { } /// Appends all hashes - fn extend(&mut self, txs: impl IntoIterator) { + fn extend( + &mut self, + txs: impl IntoIterator>, + ) { for tx in txs { self.push(&tx); } } - fn push(&mut self, tx: &PropagateTransaction) { + fn push(&mut self, tx: &PropagateTransaction) { match self { - Self::Eth66(msg) => msg.0.push(tx.hash()), + Self::Eth66(msg) => msg.0.push(*tx.tx_hash()), Self::Eth68(msg) => { - msg.hashes.push(tx.hash()); + msg.hashes.push(*tx.tx_hash()); msg.sizes.push(tx.size); msg.types.push(tx.transaction.tx_type().into()); } @@ -1732,7 +1755,7 @@ impl PeerMetadata { /// Commands to send to the [`TransactionsManager`] #[derive(Debug)] -enum TransactionsCommand { +enum TransactionsCommand { /// Propagate a transaction hash to the network. PropagateHash(B256), /// Propagate transaction hashes to a specific peer. @@ -1751,13 +1774,13 @@ enum TransactionsCommand { /// Requests a clone of the sender sender channel to the peer. GetPeerSender { peer_id: PeerId, - peer_request_sender: oneshot::Sender>, + peer_request_sender: oneshot::Sender>>>, }, } /// All events related to transactions emitted by the network. #[derive(Debug)] -pub enum NetworkTransactionEvent { +pub enum NetworkTransactionEvent { /// Represents the event of receiving a list of transactions from a peer. /// /// This indicates transactions that were broadcasted to us from the peer. @@ -1765,7 +1788,7 @@ pub enum NetworkTransactionEvent { /// The ID of the peer from which the transactions were received. peer_id: PeerId, /// The received transactions. - msg: Transactions, + msg: Transactions, }, /// Represents the event of receiving a list of transaction hashes from a peer. IncomingPooledTransactionHashes { @@ -1781,10 +1804,10 @@ pub enum NetworkTransactionEvent { /// The received `GetPooledTransactions` request. request: GetPooledTransactions, /// The sender for responding to the request with a result of `PooledTransactions`. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Represents the event of receiving a `GetTransactionsHandle` request. - GetTransactionsHandle(oneshot::Sender>), + GetTransactionsHandle(oneshot::Sender>>), } /// Tracks stats about the [`TransactionsManager`]. @@ -2155,7 +2178,7 @@ mod tests { .await; assert!(!pool.is_empty()); - assert!(pool.get(&signed_tx.hash).is_some()); + assert!(pool.get(signed_tx.hash_ref()).is_some()); handle.terminate().await; } @@ -2230,7 +2253,7 @@ mod tests { .add_transaction(reth_transaction_pool::TransactionOrigin::External, tx.clone()) .await; - let request = GetPooledTransactions(vec![tx.get_hash()]); + let request = GetPooledTransactions(vec![*tx.get_hash()]); let (send, receive) = oneshot::channel::>(); @@ -2365,7 +2388,8 @@ mod tests { #[test] fn test_transaction_builder_empty() { - let mut builder = PropagateTransactionsBuilder::pooled(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::pooled(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); @@ -2381,7 +2405,8 @@ mod tests { #[test] fn test_transaction_builder_large() { - let mut builder = PropagateTransactionsBuilder::full(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::full(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); @@ -2409,7 +2434,8 @@ mod tests { #[test] fn test_transaction_builder_eip4844() { - let mut builder = PropagateTransactionsBuilder::full(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::full(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index 7bfe07761a2..1575d9f3374 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -2,8 +2,6 @@ //! and [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68) //! announcements. Validation and filtering of announcements is network dependent. -use std::{fmt, fmt::Display, mem}; - use crate::metrics::{AnnouncedTxTypesMetrics, TxTypesCounter}; use alloy_primitives::{Signature, TxHash}; use derive_more::{Deref, DerefMut}; @@ -12,6 +10,7 @@ use reth_eth_wire::{ MAX_MESSAGE_SIZE, }; use reth_primitives::TxType; +use std::{fmt, fmt::Display, mem}; use tracing::trace; /// The size of a decoded signature in bytes. diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 4d65e3f63ba..328229e87e1 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -26,16 +26,13 @@ async fn test_large_tx_req() { // replace rng txhash with real txhash let mut tx = MockTransaction::eip1559(); - let ts = TransactionSigned { - hash: Default::default(), - signature: Signature::test_signature(), - transaction: tx.clone().into(), - }; + let ts = + TransactionSigned::new_unhashed(tx.clone().into(), Signature::test_signature()); tx.set_hash(ts.recalculate_hash()); tx }) .collect(); - let txs_hashes: Vec = txs.iter().map(|tx| tx.get_hash()).collect(); + let txs_hashes: Vec = txs.iter().map(|tx| *tx.get_hash()).collect(); // setup testnet let mut net = Testnet::create_with(2, MockEthProvider::default()).await; diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 54e1f4e12b4..0dd38c959de 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -33,7 +33,7 @@ pub fn rng_transaction(rng: &mut impl rand::RngCore) -> TransactionSigned { }); let signature = Signature::new(U256::default(), U256::default(), true); - TransactionSigned::from_transaction_and_signature(request, signature) + TransactionSigned::new_unhashed(request, signature) } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index 2e2ee4a031a..ebde61ef8ea 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -95,7 +95,7 @@ async fn test_4844_tx_gossip_penalization() { let peer0_reputation_after = peer1.peer_handle().peer_by_id(*peer0.peer_id()).await.unwrap().reputation(); assert_ne!(peer0_reputation_before, peer0_reputation_after); - assert_eq!(received, txs[1].transaction().hash); + assert_eq!(received, txs[1].transaction().hash()); // this will return an [`Empty`] error because blob txs are disallowed to be broadcasted assert!(peer1_tx_listener.try_recv().is_err()); @@ -132,10 +132,7 @@ async fn test_sending_invalid_transactions() { value: Default::default(), input: Default::default(), }; - let tx = TransactionSigned::from_transaction_and_signature( - tx.into(), - Signature::test_signature(), - ); + let tx = TransactionSigned::new_unhashed(tx.into(), Signature::test_signature()); peer0.network().send_transactions(*peer1.peer_id(), vec![Arc::new(tx)]); } diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index f335b21438b..7008c08e522 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -2,7 +2,7 @@ use super::response::BlockResponse; use crate::error::DownloadResult; use alloy_primitives::BlockNumber; use futures::Stream; -use std::ops::RangeInclusive; +use std::{fmt::Debug, ops::RangeInclusive}; /// Body downloader return type. pub type BodyDownloaderResult = DownloadResult>>; @@ -16,7 +16,7 @@ pub trait BodyDownloader: Send + Sync + Stream> + Unpin { /// The type of the body that is being downloaded. - type Body: Send + Sync + Unpin + 'static; + type Body: Debug + Send + Sync + Unpin + 'static; /// Method for setting the download range. fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()>; diff --git a/crates/net/p2p/src/headers/client.rs b/crates/net/p2p/src/headers/client.rs index 3e8f9296e07..4be6208c4a2 100644 --- a/crates/net/p2p/src/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -50,7 +50,8 @@ impl HeadersRequest { } /// The headers future type -pub type HeadersFut = Pin>> + Send + Sync>>; +pub type HeadersFut = + Pin>> + Send + Sync>>; /// The block headers downloader client #[auto_impl::auto_impl(&, Arc, Box)] diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index f02d9461fc1..eca03bdb4e7 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -4,9 +4,11 @@ use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::Stream; -use reth_consensus::Consensus; +use reth_consensus::HeaderValidator; use reth_primitives::SealedHeader; use reth_primitives_traits::BlockWithParent; +use std::fmt::Debug; + /// A downloader capable of fetching and yielding block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block headers, @@ -21,7 +23,7 @@ pub trait HeaderDownloader: + Unpin { /// The header type being downloaded. - type Header: Send + Sync + Unpin + 'static; + type Header: Debug + Send + Sync + Unpin + 'static; /// Updates the gap to sync which ranges from local head to the sync target /// @@ -83,7 +85,7 @@ impl SyncTarget { /// /// Returns Ok(false) if the pub fn validate_header_download( - consensus: &dyn Consensus, + consensus: &dyn HeaderValidator, header: &SealedHeader, parent: &SealedHeader, ) -> DownloadResult<()> { diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index bc5262abef4..5809ad6bdd4 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -147,7 +147,7 @@ impl Stream for TestDownload { let empty: SealedHeader = SealedHeader::default(); if let Err(error) = - Consensus::<_>::validate_header_against_parent(&this.consensus, &empty, &empty) + >::validate_header_against_parent(&this.consensus, &empty, &empty) { this.done = true; return Poll::Ready(Some(Err(DownloadError::HeaderValidation { diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 89892ed5985..65ae704fe83 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -22,8 +22,8 @@ use reth_network::{ NetworkHandle, NetworkManager, }; use reth_node_api::{ - FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, NodeTypesWithDBAdapter, - NodeTypesWithEngine, + FullNodePrimitives, FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, + NodeTypesWithDBAdapter, NodeTypesWithEngine, }; use reth_node_core::{ cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, @@ -31,7 +31,10 @@ use reth_node_core::{ node_config::NodeConfig, primitives::Head, }; -use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, FullProvider}; +use reth_provider::{ + providers::{BlockchainProvider, NodeTypesForProvider}, + ChainSpecProvider, FullProvider, +}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; use revm_primitives::EnvKzgSettings; @@ -240,7 +243,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> NodeBuilderWithTypes> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, { self.with_types_and_provider() } @@ -250,7 +253,7 @@ where self, ) -> NodeBuilderWithTypes, P>> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, P: FullProvider>, { NodeBuilderWithTypes::new(self.config, self.database) @@ -264,7 +267,7 @@ where node: N, ) -> NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns> where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } @@ -301,7 +304,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> WithLaunchContext>> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, { WithLaunchContext { builder: self.builder.with_types(), task_executor: self.task_executor } } @@ -313,7 +316,7 @@ where NodeBuilderWithTypes, P>>, > where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, P: FullProvider>, { WithLaunchContext { @@ -332,7 +335,7 @@ where NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns>, > where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } @@ -355,13 +358,14 @@ where >, > where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, N::AddOns: RethRpcAddOns< NodeAdapter< RethFullAdapter, >>::Components, >, >, + N::Primitives: FullNodePrimitives, { self.node(node).launch().await } @@ -549,9 +553,10 @@ where impl WithLaunchContext, CB, AO>> where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, CB: NodeComponentsBuilder>, AO: RethRpcAddOns, CB::Components>>, + T::Primitives: FullNodePrimitives, { /// Launches the node with the [`DefaultNodeLauncher`] that sets up engine API consensus and rpc pub async fn launch( diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 972fdc640df..47ec68ff0d7 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -2,17 +2,23 @@ use std::{sync::Arc, thread::available_parallelism}; +use crate::{ + components::{NodeComponents, NodeComponentsBuilder}, + hooks::OnComponentInitializedHook, + BuilderContext, NodeAdapter, +}; use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, + externals::TreeNodeTypes, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, + TreeExternals, }; use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; use reth_consensus::Consensus; -use reth_db_api::database::Database; +use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_engine_local::MiningMode; @@ -21,7 +27,7 @@ use reth_evm::noop::NoopBlockExecutorProvider; use reth_fs_util as fs; use reth_invalid_block_hooks::InvalidBlockWitnessHook; use reth_network_p2p::headers::client::HeadersClient; -use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithDB}; +use reth_node_api::{FullNodePrimitives, FullNodeTypes, NodeTypes, NodeTypesWithDB}; use reth_node_core::{ args::InvalidBlockHookType, dirs::{ChainPath, DataDirPath}, @@ -34,6 +40,7 @@ use reth_node_core::{ use reth_node_metrics::{ chain::ChainSpecInfo, hooks::Hooks, + recorder::install_prometheus_recorder, server::{MetricServer, MetricServerConfig}, version::VersionInfo, }; @@ -58,12 +65,6 @@ use tokio::sync::{ oneshot, watch, }; -use crate::{ - components::{NodeComponents, NodeComponentsBuilder}, - hooks::OnComponentInitializedHook, - BuilderContext, NodeAdapter, -}; - /// Allows to set a tree viewer for a configured blockchain provider. // TODO: remove this helper trait once the engine revamp is done, the new // blockchain provider won't require a TreeViewer. @@ -404,9 +405,11 @@ where /// Returns the [`ProviderFactory`] for the attached storage after executing a consistent check /// between the database and static files. **It may execute a pipeline unwind if it fails this /// check.** - pub async fn create_provider_factory>( - &self, - ) -> eyre::Result> { + pub async fn create_provider_factory(&self) -> eyre::Result> + where + N: ProviderNodeTypes, + N::Primitives: FullNodePrimitives, + { let factory = ProviderFactory::new( self.right().clone(), self.chain_spec(), @@ -467,9 +470,13 @@ where } /// Creates a new [`ProviderFactory`] and attaches it to the launch context. - pub async fn with_provider_factory>( + pub async fn with_provider_factory( self, - ) -> eyre::Result, ProviderFactory>>> { + ) -> eyre::Result, ProviderFactory>>> + where + N: ProviderNodeTypes, + N::Primitives: FullNodePrimitives, + { let factory = self.create_provider_factory().await?; let ctx = LaunchContextWith { inner: self.inner, @@ -482,7 +489,7 @@ where impl LaunchContextWith, ProviderFactory>> where - T: NodeTypesWithDB, + T: ProviderNodeTypes, { /// Returns access to the underlying database. pub const fn database(&self) -> &T::DB { @@ -509,6 +516,9 @@ where /// Starts the prometheus endpoint. pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { + // ensure recorder runs upkeep periodically + install_prometheus_recorder().spawn_upkeep(); + let listen_addr = self.node_config().metrics; if let Some(addr) = listen_addr { info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); @@ -524,7 +534,20 @@ where }, ChainSpecInfo { name: self.left().config.chain.chain().to_string() }, self.task_executor().clone(), - Hooks::new(self.database().clone(), self.static_file_provider()), + Hooks::builder() + .with_hook({ + let db = self.database().clone(); + move || db.report_metrics() + }) + .with_hook({ + let sfp = self.static_file_provider(); + move || { + if let Err(error) = sfp.report_metrics() { + error!(%error, "Failed to report metrics for the static file provider"); + } + } + }) + .build(), ); MetricServer::new(config).serve().await?; @@ -620,7 +643,7 @@ impl Attached::ChainSpec>, WithMeteredProviders>, > where - T: FullNodeTypes, + T: FullNodeTypes, { /// Returns access to the underlying database. pub const fn database(&self) -> &::DB { @@ -745,10 +768,7 @@ impl Attached::ChainSpec>, WithComponents>, > where - T: FullNodeTypes< - Provider: WithTree, - Types: NodeTypes, - >, + T: FullNodeTypes, CB: NodeComponentsBuilder, { /// Returns the configured `ProviderFactory`. @@ -819,7 +839,10 @@ where /// This checks for OP-Mainnet and ensures we have all the necessary data to progress (past /// bedrock height) fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { - if self.chain_id() == Chain::optimism_mainnet() { + if self.chain_spec().is_optimism() && + !self.is_dev() && + self.chain_id() == Chain::optimism_mainnet() + { let latest = self.blockchain_db().last_block_number()?; // bedrock height if latest < 105235063 { @@ -907,7 +930,7 @@ impl where T: FullNodeTypes< Provider: WithTree + StateProviderFactory + ChainSpecProvider, - Types: NodeTypes, + Types: ProviderNodeTypes, >, CB: NodeComponentsBuilder, { diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 86ab0b9a3d7..ef1edc899eb 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -17,10 +17,10 @@ use reth_engine_tree::{ use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; -use reth_network_api::{BlockDownloaderProvider, NetworkEventListenerProvider}; +use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ - BuiltPayload, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadBuilder, - PayloadTypes, + BuiltPayload, FullNodePrimitives, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, + PayloadBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -77,6 +77,7 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, + Types::Primitives: FullNodePrimitives, { type Node = NodeHandle, AO>; @@ -254,7 +255,6 @@ where info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( - ctx.components().network().event_listener().map(Into::into), beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 4f9e850c97f..a1819948ee4 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -17,18 +17,18 @@ use reth_beacon_consensus::{ BeaconConsensusEngine, }; use reth_blockchain_tree::{noop::NoopBlockchainTree, BlockchainTreeConfig}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; -use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider}; -use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; +use reth_network::BlockDownloaderProvider; +use reth_node_api::{AddOnsContext, FullNodePrimitives, FullNodeTypes, NodeTypesWithEngine}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_provider::providers::BlockchainProvider; +use reth_provider::providers::{BlockchainProvider, ProviderNodeTypes}; use reth_rpc::eth::RpcNodeCore; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; @@ -98,10 +98,11 @@ impl DefaultNodeLauncher { impl LaunchNode> for DefaultNodeLauncher where - Types: NodeTypesWithDB + NodeTypesWithEngine, + Types: ProviderNodeTypes + NodeTypesWithEngine, T: FullNodeTypes, Types = Types>, CB: NodeComponentsBuilder, AO: RethRpcAddOns>, + Types::Primitives: FullNodePrimitives, { type Node = NodeHandle, AO>; @@ -260,8 +261,6 @@ where info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( - ctx.components().network().event_listener().map(Into::into), - beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { Either::Left( diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 62c710ea802..ce7d12fee3d 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -71,6 +71,8 @@ where type ChainSpec = ::ChainSpec; type StateCommitment = ::StateCommitment; + + type Storage = ::Storage; } impl NodeTypesWithEngine for AnyNode diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index adee942748c..fda8b66f8d7 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -399,7 +399,7 @@ where } } -impl NodeAddOns for RpcAddOns +impl RpcAddOns where N: FullNodeComponents< Types: ProviderNodeTypes, @@ -408,9 +408,16 @@ where EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, { - type Handle = RpcHandle; - - async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { + /// Launches the RPC servers with the given context and an additional hook for extending + /// modules. + pub async fn launch_add_ons_with( + self, + ctx: AddOnsContext<'_, N>, + ext: F, + ) -> eyre::Result> + where + F: FnOnce(&mut TransportRpcModules) -> eyre::Result<()>, + { let Self { eth_api_builder, engine_validator_builder, hooks, _pd: _ } = self; let engine_validator = engine_validator_builder.build(&ctx).await?; @@ -467,6 +474,7 @@ where let RpcHooks { on_rpc_started, extend_rpc_modules } = hooks; + ext(ctx.modules)?; extend_rpc_modules.extend_rpc_modules(ctx)?; let server_config = config.rpc.rpc_server_config(); @@ -513,6 +521,22 @@ where } } +impl NodeAddOns for RpcAddOns +where + N: FullNodeComponents< + Types: ProviderNodeTypes, + PayloadBuilder: PayloadBuilder::Engine>, + >, + EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, + EV: EngineValidatorBuilder, +{ + type Handle = RpcHandle; + + async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { + self.launch_add_ons_with(ctx, |_| Ok(())).await + } +} + /// Helper trait implemented for add-ons producing [`RpcHandle`]. Used by common node launcher /// implementations. pub trait RethRpcAddOns: diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index db188402ca8..3258ba8fe54 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -14,6 +14,7 @@ use reth_exex::ExExManagerHandle; use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, EthBlockClient, }; +use reth_node_api::{FullNodePrimitives, NodePrimitives}; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -26,7 +27,7 @@ use tokio::sync::watch; pub fn build_networked_pipeline( config: &StageConfig, client: Client, - consensus: Arc, + consensus: Arc>, provider_factory: ProviderFactory, task_executor: &TaskExecutor, metrics_tx: reth_stages::MetricEventsSender, @@ -40,10 +41,11 @@ where N: ProviderNodeTypes, Client: EthBlockClient + 'static, Executor: BlockExecutorProvider, + N::Primitives: FullNodePrimitives, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) - .build(client.clone(), Arc::clone(&consensus)) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::new(config.bodies) @@ -85,8 +87,11 @@ pub fn build_pipeline( where N: ProviderNodeTypes, H: HeaderDownloader

+ 'static, - B: BodyDownloader + 'static, + B: BodyDownloader< + Body = <::Block as reth_node_api::Block>::Body, + > + 'static, Executor: BlockExecutorProvider, + N::Primitives: FullNodePrimitives, { let mut builder = Pipeline::::builder(); diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 4b4d912a27b..03f3ab17288 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -16,7 +16,7 @@ reth-storage-api.workspace = true reth-beacon-consensus.workspace = true reth-network-api.workspace = true reth-stages.workspace = true -reth-prune.workspace = true +reth-prune-types.workspace = true reth-static-file-types.workspace = true reth-primitives-traits.workspace = true reth-engine-primitives.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 285e28d0f2e..edd85501ec0 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -7,9 +7,9 @@ use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; use reth_beacon_consensus::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; use reth_engine_primitives::ForkchoiceStatus; -use reth_network_api::{NetworkEvent, PeersInfo}; +use reth_network_api::PeersInfo; use reth_primitives_traits::{format_gas, format_gas_throughput}; -use reth_prune::PrunerEvent; +use reth_prune_types::PrunerEvent; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; use reth_static_file_types::StaticFileProducerEvent; use std::{ @@ -211,12 +211,6 @@ impl NodeState { } } - fn handle_network_event(&self, _: NetworkEvent) { - // NOTE(onbjerg): This used to log established/disconnecting sessions, but this is already - // logged in the networking component. I kept this stub in case we want to catch other - // networking events later on. - } - fn handle_consensus_engine_event(&mut self, event: BeaconConsensusEngineEvent) { match event { BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status) => { @@ -307,7 +301,11 @@ impl NodeState { info!(tip_block_number, "Pruner started"); } PrunerEvent::Finished { tip_block_number, elapsed, stats } => { - info!(tip_block_number, ?elapsed, ?stats, "Pruner finished"); + let stats = format!( + "[{}]", + stats.iter().map(|item| item.to_string()).collect::>().join(", ") + ); + info!(tip_block_number, ?elapsed, %stats, "Pruner finished"); } } } @@ -354,8 +352,6 @@ struct CurrentStage { /// A node event. #[derive(Debug)] pub enum NodeEvent { - /// A network event. - Network(NetworkEvent), /// A sync pipeline event. Pipeline(PipelineEvent), /// A consensus engine event. @@ -371,12 +367,6 @@ pub enum NodeEvent { Other(String), } -impl From for NodeEvent { - fn from(event: NetworkEvent) -> Self { - Self::Network(event) - } -} - impl From for NodeEvent { fn from(event: PipelineEvent) -> Self { Self::Pipeline(event) @@ -523,9 +513,6 @@ where while let Poll::Ready(Some(event)) = this.events.as_mut().poll_next(cx) { match event { - NodeEvent::Network(event) => { - this.state.handle_network_event(event); - } NodeEvent::Pipeline(event) => { this.state.handle_pipeline_event(event); } diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml index a823db9b467..3d79d11db7d 100644 --- a/crates/node/metrics/Cargo.toml +++ b/crates/node/metrics/Cargo.toml @@ -8,9 +8,6 @@ homepage.workspace = true repository.workspace = true [dependencies] -reth-db-api.workspace = true -reth-primitives-traits.workspace = true -reth-provider.workspace = true reth-metrics.workspace = true reth-tasks.workspace = true @@ -21,7 +18,7 @@ metrics-util.workspace = true tokio.workspace = true -jsonrpsee = { workspace = true, features = ["server"] } +jsonrpsee-server.workspace = true http.workspace = true tower.workspace = true @@ -37,7 +34,6 @@ procfs = "0.16.0" [dev-dependencies] reqwest.workspace = true socket2 = { version = "0.5", default-features = false } -reth-provider = { workspace = true, features = ["test-utils"] } [lints] workspace = true diff --git a/crates/node/metrics/src/hooks.rs b/crates/node/metrics/src/hooks.rs index 21d12614f62..3b6d23a3900 100644 --- a/crates/node/metrics/src/hooks.rs +++ b/crates/node/metrics/src/hooks.rs @@ -1,20 +1,59 @@ use metrics_process::Collector; -use reth_db_api::database_metrics::DatabaseMetrics; -use reth_primitives_traits::NodePrimitives; -use reth_provider::providers::StaticFileProvider; -use std::{ - fmt::{self}, - sync::Arc, -}; +use std::{fmt, sync::Arc}; -pub(crate) trait Hook: Fn() + Send + Sync {} -impl Hook for T {} +/// The simple alias for function types that are `'static`, `Send`, and `Sync`. +pub trait Hook: Fn() + Send + Sync + 'static {} +impl Hook for T {} -impl fmt::Debug for Hooks { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let hooks_len = self.inner.len(); - f.debug_struct("Hooks") - .field("inner", &format!("Arc>>, len: {}", hooks_len)) +/// A builder-like type to create a new [`Hooks`] instance. +pub struct HooksBuilder { + hooks: Vec>>, +} + +impl HooksBuilder { + /// Registers a [`Hook`]. + pub fn with_hook(self, hook: impl Hook) -> Self { + self.with_boxed_hook(Box::new(hook)) + } + + /// Registers a [`Hook`] by calling the provided closure. + pub fn install_hook(self, f: F) -> Self + where + F: FnOnce() -> H, + H: Hook, + { + self.with_hook(f()) + } + + /// Registers a [`Hook`]. + #[inline] + pub fn with_boxed_hook(mut self, hook: Box>) -> Self { + self.hooks.push(hook); + self + } + + /// Builds the [`Hooks`] collection from the registered hooks. + pub fn build(self) -> Hooks { + Hooks { inner: Arc::new(self.hooks) } + } +} + +impl Default for HooksBuilder { + fn default() -> Self { + Self { + hooks: vec![ + Box::new(|| Collector::default().collect()), + Box::new(collect_memory_stats), + Box::new(collect_io_stats), + ], + } + } +} + +impl std::fmt::Debug for HooksBuilder { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("HooksBuilder") + .field("hooks", &format_args!("Vec>, len: {}", self.hooks.len())) .finish() } } @@ -26,24 +65,10 @@ pub struct Hooks { } impl Hooks { - /// Create a new set of hooks - pub fn new(db: Metrics, static_file_provider: StaticFileProvider) -> Self - where - Metrics: DatabaseMetrics + 'static + Send + Sync, - N: NodePrimitives, - { - let hooks: Vec>> = vec![ - Box::new(move || db.report_metrics()), - Box::new(move || { - let _ = static_file_provider.report_metrics().map_err( - |error| tracing::error!(%error, "Failed to report static file provider metrics"), - ); - }), - Box::new(move || Collector::default().collect()), - Box::new(collect_memory_stats), - Box::new(collect_io_stats), - ]; - Self { inner: Arc::new(hooks) } + /// Creates a new [`HooksBuilder`] instance. + #[inline] + pub fn builder() -> HooksBuilder { + HooksBuilder::default() } pub(crate) fn iter(&self) -> impl Iterator>> { @@ -51,6 +76,15 @@ impl Hooks { } } +impl fmt::Debug for Hooks { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let hooks_len = self.inner.len(); + f.debug_struct("Hooks") + .field("inner", &format_args!("Arc>>, len: {}", hooks_len)) + .finish() + } +} + #[cfg(all(feature = "jemalloc", unix))] fn collect_memory_stats() { use metrics::gauge; diff --git a/crates/node/metrics/src/recorder.rs b/crates/node/metrics/src/recorder.rs index a7421ab355c..e62b98c81cd 100644 --- a/crates/node/metrics/src/recorder.rs +++ b/crates/node/metrics/src/recorder.rs @@ -3,25 +3,78 @@ use eyre::WrapErr; use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; use metrics_util::layers::{PrefixLayer, Stack}; -use std::sync::LazyLock; +use std::sync::{atomic::AtomicBool, LazyLock}; /// Installs the Prometheus recorder as the global recorder. -pub fn install_prometheus_recorder() -> &'static PrometheusHandle { +/// +/// Note: This must be installed before any metrics are `described`. +/// +/// Caution: This only configures the global recorder and does not spawn the exporter. +/// Callers must run [`PrometheusRecorder::spawn_upkeep`] manually. +pub fn install_prometheus_recorder() -> &'static PrometheusRecorder { &PROMETHEUS_RECORDER_HANDLE } /// The default Prometheus recorder handle. We use a global static to ensure that it is only /// installed once. -static PROMETHEUS_RECORDER_HANDLE: LazyLock = +static PROMETHEUS_RECORDER_HANDLE: LazyLock = LazyLock::new(|| PrometheusRecorder::install().unwrap()); -/// Prometheus recorder installer +/// A handle to the Prometheus recorder. +/// +/// This is intended to be used as the global recorder. +/// Callers must ensure that [`PrometheusRecorder::spawn_upkeep`] is called once. #[derive(Debug)] -pub struct PrometheusRecorder; +pub struct PrometheusRecorder { + handle: PrometheusHandle, + upkeep: AtomicBool, +} impl PrometheusRecorder { + const fn new(handle: PrometheusHandle) -> Self { + Self { handle, upkeep: AtomicBool::new(false) } + } + + /// Returns a reference to the [`PrometheusHandle`]. + pub const fn handle(&self) -> &PrometheusHandle { + &self.handle + } + + /// Spawns the upkeep task if there hasn't been one spawned already. + /// + /// ## Panics + /// + /// This method must be called from within an existing Tokio runtime or it will panic. + /// + /// See also [`PrometheusHandle::run_upkeep`] + pub fn spawn_upkeep(&self) { + if self + .upkeep + .compare_exchange( + false, + true, + std::sync::atomic::Ordering::SeqCst, + std::sync::atomic::Ordering::Acquire, + ) + .is_err() + { + return; + } + + let handle = self.handle.clone(); + tokio::spawn(async move { + loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + handle.run_upkeep(); + } + }); + } + /// Installs Prometheus as the metrics recorder. - pub fn install() -> eyre::Result { + /// + /// Caution: This only configures the global recorder and does not spawn the exporter. + /// Callers must run [`Self::spawn_upkeep`] manually. + pub fn install() -> eyre::Result { let recorder = PrometheusBuilder::new().build_recorder(); let handle = recorder.handle(); @@ -31,7 +84,7 @@ impl PrometheusRecorder { .install() .wrap_err("Couldn't set metrics recorder.")?; - Ok(handle) + Ok(Self::new(handle)) } } @@ -52,7 +105,7 @@ mod tests { process.describe(); process.collect(); - let metrics = PROMETHEUS_RECORDER_HANDLE.render(); + let metrics = PROMETHEUS_RECORDER_HANDLE.handle.render(); assert!(metrics.contains("process_cpu_seconds_total"), "{metrics:?}"); } } diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index 87521349d4d..313b578f800 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -103,7 +103,7 @@ impl MetricServer { let hook = hook.clone(); let service = tower::service_fn(move |_| { (hook)(); - let metrics = handle.render(); + let metrics = handle.handle().render(); let mut response = Response::new(metrics); response .headers_mut() @@ -113,12 +113,12 @@ impl MetricServer { let mut shutdown = signal.clone().ignore_guard(); tokio::task::spawn(async move { - if let Err(error) = - jsonrpsee::server::serve_with_graceful_shutdown(io, service, &mut shutdown) + let _ = + jsonrpsee_server::serve_with_graceful_shutdown(io, service, &mut shutdown) .await - { - tracing::debug!(%error, "failed to serve request") - } + .inspect_err( + |error| tracing::debug!(%error, "failed to serve request"), + ); }); } }); @@ -206,7 +206,6 @@ const fn describe_io_stats() {} mod tests { use super::*; use reqwest::Client; - use reth_provider::{test_utils::create_test_provider_factory, StaticFileProviderFactory}; use reth_tasks::TaskManager; use socket2::{Domain, Socket, Type}; use std::net::{SocketAddr, TcpListener}; @@ -236,8 +235,7 @@ mod tests { let tasks = TaskManager::current(); let executor = tasks.executor(); - let factory = create_test_provider_factory(); - let hooks = Hooks::new(factory.db_ref().clone(), factory.static_file_provider()); + let hooks = Hooks::builder().build(); let listen_addr = get_random_available_addr(); let config = @@ -252,7 +250,7 @@ mod tests { // Check the response body let body = response.text().await.unwrap(); - assert!(body.contains("reth_db_table_size")); - assert!(body.contains("reth_jemalloc_metadata")); + assert!(body.contains("reth_process_cpu_seconds_total")); + assert!(body.contains("reth_process_start_time_seconds")); } } diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index f8770a3c014..a23b9bfe414 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -9,12 +9,11 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] +use core::{fmt::Debug, marker::PhantomData}; pub use reth_primitives_traits::{ Block, BlockBody, FullBlock, FullNodePrimitives, FullReceipt, FullSignedTx, NodePrimitives, }; -use core::marker::PhantomData; - use reth_chainspec::EthChainSpec; use reth_db_api::{ database_metrics::{DatabaseMetadata, DatabaseMetrics}, @@ -35,6 +34,8 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { type ChainSpec: EthChainSpec; /// The type used to perform state commitment operations. type StateCommitment: StateCommitment; + /// The type responsible for writing chain primitives to storage. + type Storage: Default + Send + Sync + Unpin + Debug + 'static; } /// The type that configures an Ethereum-like node with an engine for consensus. @@ -86,6 +87,7 @@ where type Primitives = Types::Primitives; type ChainSpec = Types::ChainSpec; type StateCommitment = Types::StateCommitment; + type Storage = Types::Storage; } impl NodeTypesWithEngine for NodeTypesWithDBAdapter @@ -105,86 +107,137 @@ where } /// A [`NodeTypes`] type builder. -#[derive(Default, Debug)] -pub struct AnyNodeTypes

(PhantomData

, PhantomData, PhantomData); +#[derive(Debug)] +pub struct AnyNodeTypes

( + PhantomData

, + PhantomData, + PhantomData, + PhantomData, +); + +impl Default for AnyNodeTypes { + fn default() -> Self { + Self::new() + } +} + +impl AnyNodeTypes { + /// Creates a new instance of [`AnyNodeTypes`]. + pub const fn new() -> Self { + Self(PhantomData, PhantomData, PhantomData, PhantomData) + } -impl AnyNodeTypes { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::, PhantomData::, PhantomData::) + pub const fn primitives(self) -> AnyNodeTypes { + AnyNodeTypes::new() } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) + pub const fn chain_spec(self) -> AnyNodeTypes { + AnyNodeTypes::new() } /// Sets the `StateCommitment` associated type. - pub const fn state_commitment(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) + pub const fn state_commitment(self) -> AnyNodeTypes { + AnyNodeTypes::new() + } + + /// Sets the `Storage` associated type. + pub const fn storage(self) -> AnyNodeTypes { + AnyNodeTypes::new() } } -impl NodeTypes for AnyNodeTypes +impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, C: EthChainSpec + 'static, - S: StateCommitment, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Primitives = P; type ChainSpec = C; - type StateCommitment = S; + type StateCommitment = SC; + type Storage = S; } /// A [`NodeTypesWithEngine`] type builder. -#[derive(Default, Debug)] -pub struct AnyNodeTypesWithEngine

{ +#[derive(Debug)] +pub struct AnyNodeTypesWithEngine

{ /// Embedding the basic node types. - base: AnyNodeTypes, + _base: AnyNodeTypes, /// Phantom data for the engine. _engine: PhantomData, } -impl AnyNodeTypesWithEngine { +impl Default for AnyNodeTypesWithEngine { + fn default() -> Self { + Self::new() + } +} + +impl AnyNodeTypesWithEngine { + /// Creates a new instance of [`AnyNodeTypesWithEngine`]. + pub const fn new() -> Self { + Self { _base: AnyNodeTypes::new(), _engine: PhantomData } + } + /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base.primitives::(), _engine: PhantomData } + pub const fn primitives(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } /// Sets the `Engine` associated type. - pub const fn engine(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base, _engine: PhantomData:: } + pub const fn engine(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base.chain_spec::(), _engine: PhantomData } + pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } /// Sets the `StateCommitment` associated type. - pub const fn state_commitment(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base.state_commitment::(), _engine: PhantomData } + pub const fn state_commitment(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() + } + + /// Sets the `Storage` associated type. + pub const fn storage(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } } -impl NodeTypes for AnyNodeTypesWithEngine +impl NodeTypes for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, - S: StateCommitment, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Primitives = P; type ChainSpec = C; - type StateCommitment = S; + type StateCommitment = SC; + type Storage = S; } -impl NodeTypesWithEngine for AnyNodeTypesWithEngine +impl NodeTypesWithEngine for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, - S: StateCommitment, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Engine = E; } + +/// Helper adapter type for accessing [`NodePrimitives::BlockHeader`] on [`NodeTypes`]. +pub type HeaderTy = <::Primitives as NodePrimitives>::BlockHeader; + +/// Helper adapter type for accessing [`NodePrimitives::BlockBody`] on [`NodeTypes`]. +pub type BodyTy = <::Primitives as NodePrimitives>::BlockBody; + +/// Helper adapter type for accessing [`NodePrimitives::SignedTx`] on [`NodeTypes`]. +pub type TxTy = <::Primitives as NodePrimitives>::SignedTx; diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 8337980d46f..efbea77454c 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -49,7 +49,8 @@ optimism = [ scroll = [] dev = [ - "reth-optimism-cli/dev" + "reth-optimism-cli/dev", + "reth-optimism-primitives/arbitrary", ] min-error-logs = ["tracing/release_max_level_error"] diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index dc99e47f539..5eb0f0e99e0 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -119,6 +119,7 @@ serde = [ "alloy-primitives/serde", "op-alloy-consensus?/serde", "reth-execution-types/serde", - "reth-provider/serde" + "reth-provider/serde", + "reth-optimism-primitives/serde", ] scroll = [] diff --git a/crates/optimism/cli/src/commands/build_pipeline.rs b/crates/optimism/cli/src/commands/build_pipeline.rs index 88dc0989717..8ebefdcc0b4 100644 --- a/crates/optimism/cli/src/commands/build_pipeline.rs +++ b/crates/optimism/cli/src/commands/build_pipeline.rs @@ -1,5 +1,6 @@ use alloy_primitives::B256; use futures_util::{Stream, StreamExt}; +use reth_cli_commands::common::CliNodeTypes; use reth_config::Config; use reth_consensus::Consensus; use reth_downloaders::{ @@ -38,7 +39,7 @@ pub(crate) async fn build_import_pipeline( disable_exec: bool, ) -> eyre::Result<(Pipeline, impl Stream)> where - N: ProviderNodeTypes, + N: CliNodeTypes + ProviderNodeTypes, C: Consensus + 'static, { if !file_client.has_canonical_blocks() { diff --git a/crates/optimism/cli/src/ovm_file_codec.rs b/crates/optimism/cli/src/ovm_file_codec.rs index b29d30093ec..3d746d6d1e0 100644 --- a/crates/optimism/cli/src/ovm_file_codec.rs +++ b/crates/optimism/cli/src/ovm_file_codec.rs @@ -250,6 +250,7 @@ impl Encodable2718 for TransactionSigned { Transaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), } } + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { self.transaction.eip2718_encode(&self.signature, out) } diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 72f67dcb450..e8b7959dd27 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -12,7 +12,7 @@ use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; +use reth_consensus::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use reth_consensus_common::validation::{ validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, validate_against_parent_timestamp, @@ -47,6 +47,50 @@ impl OpBeaconConsensus { } impl Consensus for OpBeaconConsensus { + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header) + } + + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + // Check ommers hash + let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.body.ommers); + if block.header.ommers_hash != ommers_hash { + return Err(ConsensusError::BodyOmmersHashDiff( + GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), + )) + } + + // Check transaction root + if let Err(error) = block.ensure_transaction_root_valid() { + return Err(ConsensusError::BodyTransactionRootDiff(error.into())) + } + + // EIP-4895: Beacon chain push withdrawals as operations + if self.chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { + validate_shanghai_withdrawals(block)?; + } + + if self.chain_spec.is_cancun_active_at_timestamp(block.timestamp) { + validate_cancun_gas(block)?; + } + + Ok(()) + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts) + } +} + +impl HeaderValidator for OpBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header)?; validate_header_base_fee(header, &self.chain_spec) @@ -118,46 +162,4 @@ impl Consensus for OpBeaconConsensus { Ok(()) } - - fn validate_body_against_header( - &self, - body: &BlockBody, - header: &SealedHeader, - ) -> Result<(), ConsensusError> { - validate_body_against_header(body, header) - } - - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - // Check ommers hash - let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.body.ommers); - if block.header.ommers_hash != ommers_hash { - return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), - )) - } - - // Check transaction root - if let Err(error) = block.ensure_transaction_root_valid() { - return Err(ConsensusError::BodyTransactionRootDiff(error.into())) - } - - // EIP-4895: Beacon chain push withdrawals as operations - if self.chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { - validate_shanghai_withdrawals(block)?; - } - - if self.chain_spec.is_cancun_active_at_timestamp(block.timestamp) { - validate_cancun_gas(block)?; - } - - Ok(()) - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts) - } } diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index cb1715a08fd..987b3d3ac4e 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -48,6 +48,7 @@ reth-primitives = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec.workspace = true alloy-genesis.workspace = true alloy-consensus.workspace = true +reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } [features] default = ["std"] diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index b4c2e16f593..042b8e29193 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -15,7 +15,7 @@ use reth_evm::{ }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, + ConfigureEvm, TxEnvOverrides, }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; @@ -78,6 +78,8 @@ where chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Optional overrides for the transactions environment. + tx_env_overrides: Option>, /// Current state for block execution. state: State, /// Utility to call system smart contracts. @@ -91,7 +93,7 @@ where /// Creates a new [`OpExecutionStrategy`] pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); - Self { state, chain_spec, evm_config, system_caller } + Self { state, chain_spec, evm_config, system_caller, tx_env_overrides: None } } } @@ -119,6 +121,10 @@ where { type Error = BlockExecutionError; + fn init(&mut self, tx_env_overrides: Box) { + self.tx_env_overrides = Some(tx_env_overrides); + } + fn apply_pre_execution_changes( &mut self, block: &BlockWithSenders, @@ -197,6 +203,10 @@ where self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); + if let Some(tx_env_overrides) = &mut self.tx_env_overrides { + tx_env_overrides.apply(evm.tx_mut()); + } + // Execute transaction. let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); @@ -367,7 +377,7 @@ mod tests { let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().regolith_activated().build()); - let tx = TransactionSigned::from_transaction_and_signature( + let tx = TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce: 0, @@ -378,7 +388,7 @@ mod tests { Signature::test_signature(), ); - let tx_deposit = TransactionSigned::from_transaction_and_signature( + let tx_deposit = TransactionSigned::new_unhashed( Transaction::Deposit(op_alloy_consensus::TxDeposit { from: addr, to: addr.into(), @@ -451,7 +461,7 @@ mod tests { let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().canyon_activated().build()); - let tx = TransactionSigned::from_transaction_and_signature( + let tx = TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce: 0, @@ -462,7 +472,7 @@ mod tests { Signature::test_signature(), ); - let tx_deposit = TransactionSigned::from_transaction_and_signature( + let tx_deposit = TransactionSigned::new_unhashed( Transaction::Deposit(op_alloy_consensus::TxDeposit { from: addr, to: addr.into(), diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 9d3e76fb442..ef8c3f3b3db 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -2,6 +2,7 @@ use crate::OpBlockExecutionError; use alloc::{string::ToString, sync::Arc}; +use alloy_consensus::Transaction; use alloy_primitives::{address, b256, hex, Address, Bytes, B256, U256}; use reth_chainspec::ChainSpec; use reth_execution_errors::BlockExecutionError; diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 2d5884fa902..78aa706eaf6 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -53,7 +53,7 @@ impl OpEvmConfig { } /// Returns the chain spec associated with this configuration. - pub fn chain_spec(&self) -> &OpChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } } @@ -214,6 +214,7 @@ mod tests { AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit, }; use reth_optimism_chainspec::BASE_MAINNET; + use reth_optimism_primitives::OpPrimitives; use reth_primitives::{Account, Log, Receipt, Receipts, SealedBlockWithSenders, TxType}; use reth_revm::{ @@ -553,7 +554,7 @@ mod tests { #[test] fn receipts_by_block_hash() { // Create a default SealedBlockWithSenders object - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); // Define block hashes for block1 and block2 let block1_hash = B256::new([0x01; 32]); @@ -605,7 +606,8 @@ mod tests { // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, // including block1_hash and block2_hash, and the execution_outcome - let chain = Chain::new([block1, block2], execution_outcome.clone(), None); + let chain: Chain = + Chain::new([block1, block2], execution_outcome.clone(), None); // Assert that the proper receipt vector is returned for block1_hash assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1])); diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index bce0eb3a16c..30378755973 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-db.workspace = true reth-engine-local.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true @@ -29,6 +30,7 @@ reth-evm.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-beacon-consensus.workspace = true reth-trie-db.workspace = true +reth-rpc-server-types.workspace = true # op-reth reth-optimism-payload-builder.workspace = true @@ -37,6 +39,7 @@ reth-optimism-rpc.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-forks.workspace = true +reth-optimism-primitives.workspace = true # revm with required optimism features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -116,5 +119,10 @@ test-utils = [ "reth-trie-db/test-utils", "revm/test-utils", "reth-optimism-node/test-utils", + "reth-optimism-primitives/arbitrary", +] +reth-codec = [ + "reth-primitives/reth-codec", + "reth-optimism-primitives/reth-codec", ] scroll = [] diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 0c2186c7268..82b2ce2ebc2 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -5,10 +5,11 @@ use std::sync::Arc; use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::{EthChainSpec, Hardforks}; +use reth_db::transaction::{DbTx, DbTxMut}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, PayloadBuilder, + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, PayloadBuilder, }; use reth_node_builder::{ components::{ @@ -23,10 +24,18 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; use reth_optimism_payload_builder::builder::OpPayloadTransactions; -use reth_optimism_rpc::OpEthApi; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_rpc::{ + witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, + OpEthApi, +}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Receipt, TransactionSigned, TxType}; -use reth_provider::CanonStateSubscriptions; +use reth_primitives::BlockBody; +use reth_provider::{ + providers::ChainStorage, BlockBodyWriter, CanonStateSubscriptions, DBProvider, EthStorage, + ProviderResult, +}; +use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, TransactionPool, @@ -41,17 +50,39 @@ use crate::{ OpEngineTypes, }; -/// Optimism primitive types. +/// Storage implementation for Optimism. #[derive(Debug, Default, Clone)] -pub struct OpPrimitives; +pub struct OpStorage(EthStorage); + +impl> BlockBodyWriter for OpStorage { + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(u64, Option)>, + ) -> ProviderResult<()> { + self.0.write_block_bodies(provider, bodies) + } -impl NodePrimitives for OpPrimitives { - type Block = Block; - type SignedTx = TransactionSigned; - type TxType = TxType; - type Receipt = Receipt; + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: alloy_primitives::BlockNumber, + ) -> ProviderResult<()> { + self.0.remove_block_bodies_above(provider, block) + } } +impl ChainStorage for OpStorage { + fn writer( + &self, + ) -> impl reth_provider::ChainStorageWriter, OpPrimitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypes, + { + self + } +} /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] #[non_exhaustive] @@ -98,7 +129,14 @@ impl OpNode { impl Node for OpNode where - N: FullNodeTypes>, + N: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Storage = OpStorage, + >, + >, { type ComponentsBuilder = ComponentsBuilder< N, @@ -126,6 +164,7 @@ impl NodeTypes for OpNode { type Primitives = OpPrimitives; type ChainSpec = OpChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = OpStorage; } impl NodeTypesWithEngine for OpNode { @@ -152,7 +191,7 @@ impl OpAddOns { impl NodeAddOns for OpAddOns where N: FullNodeComponents< - Types: NodeTypes, + Types: NodeTypes, PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, @@ -163,14 +202,24 @@ where self, ctx: reth_node_api::AddOnsContext<'_, N>, ) -> eyre::Result { - self.0.launch_add_ons(ctx).await + // install additional OP specific rpc methods + let debug_ext = + OpDebugWitnessApi::new(ctx.node.provider().clone(), ctx.node.evm_config().clone()); + + self.0 + .launch_add_ons_with(ctx, move |modules| { + debug!(target: "reth::cli", "Installing debug payload witness rpc endpoint"); + modules.merge_if_module_configured(RethRpcModule::Debug, debug_ext.into_rpc())?; + Ok(()) + }) + .await } } impl RethRpcAddOns for OpAddOns where N: FullNodeComponents< - Types: NodeTypes, + Types: NodeTypes, PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, @@ -231,6 +280,7 @@ where let validator = TransactionValidationTaskExecutor::eth_builder(Arc::new( ctx.chain_spec().inner.clone(), )) + .no_eip4844() .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) .with_additional_tasks( diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 0edfeec7322..a5616569c86 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -3,7 +3,9 @@ use alloy_eips::eip2718::Encodable2718; use parking_lot::RwLock; use reth_chainspec::ChainSpec; use reth_optimism_evm::RethL1BlockInfo; -use reth_primitives::{Block, GotExpected, InvalidTransactionError, SealedBlock}; +use reth_primitives::{ + Block, GotExpected, InvalidTransactionError, SealedBlock, TransactionSigned, +}; use reth_provider::{BlockReaderIdExt, StateProviderFactory}; use reth_revm::L1BlockInfo; use reth_transaction_pool::{ @@ -140,7 +142,8 @@ where let l1_block_info = self.block_info.l1_block_info.read().clone(); let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); - valid_tx.transaction().clone().into_consensus().into().encode_2718(&mut encoded); + let tx: TransactionSigned = valid_tx.transaction().clone().into_consensus().into(); + tx.encode_2718(&mut encoded); let cost_addition = match l1_block_info.l1_tx_data_fee( &self.chain_spec(), @@ -262,7 +265,7 @@ mod tests { input: Default::default(), }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(deposit_tx, signature); + let signed_tx = TransactionSigned::new_unhashed(deposit_tx, signature); let signed_recovered = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, signer); let len = signed_recovered.encode_2718_len(); diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index f1260d2da01..c1df9180ce3 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -63,10 +63,7 @@ impl OpPayloadTransactions for CustomTxPriority { }; let signature = sender.sign_transaction_sync(&mut end_of_block_tx).unwrap(); let end_of_block_tx = TransactionSignedEcRecovered::from_signed_transaction( - TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(end_of_block_tx), - signature, - ), + TransactionSigned::new_unhashed(Transaction::Eip1559(end_of_block_tx), signature), sender.address(), ); diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 3644d8f71a5..132c2649272 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -3,13 +3,13 @@ use std::{fmt::Display, sync::Arc}; use alloy_consensus::{Header, Transaction, EMPTY_OMMER_ROOT_HASH}; -use alloy_eips::merge::BEACON_NONCE; -use alloy_primitives::{Address, Bytes, U256}; +use alloy_eips::{eip4895::Withdrawals, merge::BEACON_NONCE}; +use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; -use reth_chainspec::ChainSpecProvider; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, NextBlockEnvAttributes}; use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; @@ -318,13 +318,13 @@ where } } - let withdrawals_outcome = ctx.commit_withdrawals(state)?; + let withdrawals_root = ctx.commit_withdrawals(state)?; // merge all transitions into bundle state, this would apply the withdrawal balance changes // and 4788 contract call state.merge_transitions(BundleRetention::Reverts); - Ok(BuildOutcomeKind::Better { payload: ExecutedPayload { info, withdrawals_outcome } }) + Ok(BuildOutcomeKind::Better { payload: ExecutedPayload { info, withdrawals_root } }) } /// Builds the payload on top of the state. @@ -338,10 +338,7 @@ where DB: Database + AsRef

, P: StateRootProvider, { - let ExecutedPayload { - info, - withdrawals_outcome: WithdrawalsOutcome { withdrawals, withdrawals_root }, - } = match self.execute(&mut state, &ctx)? { + let ExecutedPayload { info, withdrawals_root } = match self.execute(&mut state, &ctx)? { BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, BuildOutcomeKind::Cancelled => return Ok(BuildOutcomeKind::Cancelled), BuildOutcomeKind::Aborted { fees } => return Ok(BuildOutcomeKind::Aborted { fees }), @@ -350,7 +347,7 @@ where let block_number = ctx.block_number(); let execution_outcome = ExecutionOutcome::new( state.take_bundle(), - vec![info.receipts.clone()].into(), + vec![info.receipts].into(), block_number, Vec::new(), ); @@ -419,12 +416,12 @@ where body: BlockBody { transactions: info.executed_transactions, ommers: vec![], - withdrawals, + withdrawals: ctx.withdrawals().cloned(), }, }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", ?sealed_block, "sealed built block"); + debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header, "sealed built block"); // create the executed block data let executed = ExecutedBlock { @@ -501,8 +498,8 @@ impl OpPayloadTransactions for () { pub struct ExecutedPayload { /// Tracked execution info pub info: ExecutionInfo, - /// Outcome after committing withdrawals. - pub withdrawals_outcome: WithdrawalsOutcome, + /// Withdrawal hash. + pub withdrawals_root: Option, } /// This acts as the container for executed transactions and its byproducts (receipts, gas used) @@ -563,6 +560,13 @@ impl OpPayloadBuilderCtx { &self.config.attributes } + /// Returns the withdrawals if shanghai is active. + pub fn withdrawals(&self) -> Option<&Withdrawals> { + self.chain_spec + .is_shanghai_active_at_timestamp(self.attributes().timestamp()) + .then(|| &self.attributes().payload_attributes.withdrawals) + } + /// Returns the block gas limit to target. pub fn block_gas_limit(&self) -> u64 { self.attributes() @@ -652,10 +656,7 @@ impl OpPayloadBuilderCtx { } /// Commits the withdrawals from the payload attributes to the state. - pub fn commit_withdrawals( - &self, - db: &mut State, - ) -> Result + pub fn commit_withdrawals(&self, db: &mut State) -> Result, ProviderError> where DB: Database, { @@ -663,7 +664,7 @@ impl OpPayloadBuilderCtx { db, &self.chain_spec, self.attributes().payload_attributes.timestamp, - self.attributes().payload_attributes.withdrawals.clone(), + &self.attributes().payload_attributes.withdrawals, ) } diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 36f11ee628b..1a951abadca 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -7,7 +7,7 @@ use alloy_eips::{ use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; -use op_alloy_consensus::eip1559::{decode_holocene_extra_data, EIP1559ParamError}; +use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 216e559a201..e7200c40ed8 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -12,21 +12,70 @@ description = "OP primitive types" workspace = true [dependencies] +# reth +reth-node-types.workspace = true +reth-primitives.workspace = true +reth-primitives-traits.workspace = true +reth-codecs = { workspace = true, optional = true, features = ["optimism"] } + +# ethereum alloy-primitives.workspace = true alloy-consensus.workspace = true -op-alloy-consensus.workspace = true alloy-eips.workspace = true alloy-rlp.workspace = true -derive_more.workspace = true + +# op +op-alloy-consensus.workspace = true + +# codec bytes.workspace = true -reth-primitives-traits.workspace = true -reth-codecs = { workspace = true, optional = true } -reth-primitives = { workspace = true, features = ["reth-codec"], optional = true } +serde = { workspace = true, optional = true } -[features] -default = ["reth-codec"] -reth-codec = ["dep:reth-codecs", "dep:reth-primitives"] +# misc +derive_more.workspace = true + +# test +arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] reth-codecs = { workspace = true, features = ["test-utils"] } -rstest.workspace = true \ No newline at end of file +rstest.workspace = true +arbitrary.workspace = true + +[features] +default = ["std", "reth-codec"] +std = [ + "reth-primitives-traits/std", + "reth-primitives/std", + "reth-node-types/std", + "reth-codecs/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "serde/std", +] +reth-codec = [ + "dep:reth-codecs", + "reth-primitives/reth-codec", + "reth-primitives-traits/reth-codec", +] +serde = [ + "dep:serde", + "reth-primitives-traits/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "bytes/serde", + "reth-codecs?/serde", + "op-alloy-consensus/serde", +] +arbitrary = [ + "dep:arbitrary", + "reth-primitives-traits/arbitrary", + "reth-primitives/arbitrary", + "reth-codecs?/arbitrary", + "op-alloy-consensus/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", +] diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index a0745e7ac7d..0f4608a8ebe 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -6,6 +6,26 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] pub mod bedrock; -pub mod tx_type; +pub mod transaction; + +pub use transaction::{tx_type::OpTxType, OpTransaction}; + +use alloy_consensus::Header; +use reth_node_types::NodePrimitives; +use reth_primitives::{Block, BlockBody, Receipt, TransactionSigned}; + +/// Optimism primitive types. +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct OpPrimitives; + +impl NodePrimitives for OpPrimitives { + type Block = Block; + type BlockHeader = Header; + type BlockBody = BlockBody; + type SignedTx = TransactionSigned; + type TxType = OpTxType; + type Receipt = Receipt; +} diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs new file mode 100644 index 00000000000..070b3d984e0 --- /dev/null +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -0,0 +1,173 @@ +//! Wrapper of [`OpTypedTransaction`], that implements reth database encoding [`Compact`]. + +pub mod tx_type; + +use alloy_primitives::{bytes, Bytes, TxKind, Uint, B256}; + +use alloy_consensus::{constants::EIP7702_TX_TYPE_ID, TxLegacy}; +use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; +use derive_more::{Deref, From}; +use op_alloy_consensus::{OpTypedTransaction, DEPOSIT_TX_TYPE_ID}; +use reth_codecs::Compact; +use reth_primitives::transaction::{ + COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, + COMPACT_IDENTIFIER_LEGACY, +}; +use reth_primitives_traits::InMemorySize; + +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Deref, Hash, From)] +/// Optimistic transaction. +pub struct OpTransaction(OpTypedTransaction); + +impl Default for OpTransaction { + fn default() -> Self { + Self(OpTypedTransaction::Legacy(TxLegacy::default())) + } +} + +impl Compact for OpTransaction { + fn to_compact(&self, out: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + match &self.0 { + OpTypedTransaction::Legacy(tx) => tx.to_compact(out), + OpTypedTransaction::Eip2930(tx) => tx.to_compact(out), + OpTypedTransaction::Eip1559(tx) => tx.to_compact(out), + OpTypedTransaction::Eip7702(tx) => tx.to_compact(out), + OpTypedTransaction::Deposit(tx) => tx.to_compact(out), + } + } + + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + + match identifier { + COMPACT_IDENTIFIER_LEGACY => { + let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Legacy(tx)), buf) + } + COMPACT_IDENTIFIER_EIP2930 => { + let (tx, buf) = + alloy_consensus::transaction::TxEip2930::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip2930(tx)), buf) + } + COMPACT_IDENTIFIER_EIP1559 => { + let (tx, buf) = + alloy_consensus::transaction::TxEip1559::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip1559(tx)), buf) + } + COMPACT_EXTENDED_IDENTIFIER_FLAG => { + // An identifier of 3 indicates that the transaction type did not fit into + // the backwards compatible 2 bit identifier, their transaction types are + // larger than 2 bits (eg. 4844 and Deposit Transactions). In this case, + // we need to read the concrete transaction type from the buffer by + // reading the full 8 bits (single byte) and match on this transaction type. + let identifier = buf.get_u8(); + match identifier { + EIP7702_TX_TYPE_ID => { + let (tx, buf) = + alloy_consensus::transaction::TxEip7702::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip7702(tx)), buf) + } + DEPOSIT_TX_TYPE_ID => { + let (tx, buf) = op_alloy_consensus::TxDeposit::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Deposit(tx)), buf) + } + _ => unreachable!( + "Junk data in database: unknown Transaction variant: {identifier}" + ), + } + } + _ => unreachable!("Junk data in database: unknown Transaction variant: {identifier}"), + } + } +} + +impl alloy_consensus::Transaction for OpTransaction { + fn chain_id(&self) -> Option { + self.0.chain_id() + } + + fn nonce(&self) -> u64 { + self.0.nonce() + } + + fn gas_limit(&self) -> u64 { + self.0.gas_limit() + } + + fn gas_price(&self) -> Option { + self.0.gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.0.max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.0.max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.0.max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.0.priority_fee_or_price() + } + + fn kind(&self) -> TxKind { + self.0.kind() + } + + fn value(&self) -> Uint<256, 4> { + self.0.value() + } + + fn input(&self) -> &Bytes { + self.0.input() + } + + fn ty(&self) -> u8 { + self.0.ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.0.access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.0.blob_versioned_hashes() + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.0.authorization_list() + } + + fn is_dynamic_fee(&self) -> bool { + self.0.is_dynamic_fee() + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.0.effective_gas_price(base_fee) + } + + fn effective_tip_per_gas(&self, base_fee: u64) -> Option { + self.0.effective_tip_per_gas(base_fee) + } +} + +impl InMemorySize for OpTransaction { + fn size(&self) -> usize { + match &self.0 { + OpTypedTransaction::Legacy(tx) => tx.size(), + OpTypedTransaction::Eip2930(tx) => tx.size(), + OpTypedTransaction::Eip1559(tx) => tx.size(), + OpTypedTransaction::Eip7702(tx) => tx.size(), + OpTypedTransaction::Deposit(tx) => tx.size(), + } + } +} diff --git a/crates/optimism/primitives/src/tx_type.rs b/crates/optimism/primitives/src/transaction/tx_type.rs similarity index 86% rename from crates/optimism/primitives/src/tx_type.rs rename to crates/optimism/primitives/src/transaction/tx_type.rs index 1b505920120..9976221b424 100644 --- a/crates/optimism/primitives/src/tx_type.rs +++ b/crates/optimism/primitives/src/transaction/tx_type.rs @@ -3,7 +3,6 @@ //! This type is required because a `Compact` impl is needed on the deposit tx type. use core::fmt::Debug; -use std::convert::TryFrom; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable, Error}; @@ -13,20 +12,12 @@ use derive_more::{ Display, }; use op_alloy_consensus::OpTxType as AlloyOpTxType; -use reth_primitives_traits::TxType; - -#[cfg(feature = "reth-codec")] -use alloy_consensus::constants::EIP7702_TX_TYPE_ID; -#[cfg(feature = "reth-codec")] -use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; -#[cfg(feature = "reth-codec")] -use reth_primitives::transaction::{ - COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, - COMPACT_IDENTIFIER_LEGACY, -}; +use reth_primitives_traits::{InMemorySize, TxType}; -/// Wrapper type for `AlloyOpTxType` to implement `TxType` trait. +/// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement +/// [`TxType`] trait. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[into(u8)] pub struct OpTxType(AlloyOpTxType); @@ -57,6 +48,14 @@ impl TxType for OpTxType { } } +impl InMemorySize for OpTxType { + /// Calculates a heuristic for the in-memory size of the [`OpTxType`]. + #[inline] + fn size(&self) -> usize { + core::mem::size_of::() + } +} + impl From for U8 { fn from(tx_type: OpTxType) -> Self { Self::from(u8::from(tx_type)) @@ -139,16 +138,17 @@ impl reth_codecs::Compact for OpTxType { where B: bytes::BufMut + AsMut<[u8]>, { + use reth_codecs::txtype::*; match self.0 { AlloyOpTxType::Legacy => COMPACT_IDENTIFIER_LEGACY, AlloyOpTxType::Eip2930 => COMPACT_IDENTIFIER_EIP2930, AlloyOpTxType::Eip1559 => COMPACT_IDENTIFIER_EIP1559, AlloyOpTxType::Eip7702 => { - buf.put_u8(EIP7702_TX_TYPE_ID); + buf.put_u8(alloy_consensus::constants::EIP7702_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } AlloyOpTxType::Deposit => { - buf.put_u8(DEPOSIT_TX_TYPE_ID); + buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } } @@ -158,14 +158,16 @@ impl reth_codecs::Compact for OpTxType { use bytes::Buf; ( match identifier { - COMPACT_IDENTIFIER_LEGACY => Self(AlloyOpTxType::Legacy), - COMPACT_IDENTIFIER_EIP2930 => Self(AlloyOpTxType::Eip2930), - COMPACT_IDENTIFIER_EIP1559 => Self(AlloyOpTxType::Eip1559), - COMPACT_EXTENDED_IDENTIFIER_FLAG => { + reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => Self(AlloyOpTxType::Legacy), + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => Self(AlloyOpTxType::Eip2930), + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => Self(AlloyOpTxType::Eip1559), + reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { let extended_identifier = buf.get_u8(); match extended_identifier { - EIP7702_TX_TYPE_ID => Self(AlloyOpTxType::Eip7702), - DEPOSIT_TX_TYPE_ID => Self(AlloyOpTxType::Deposit), + alloy_consensus::constants::EIP7702_TX_TYPE_ID => { + Self(AlloyOpTxType::Eip7702) + } + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self(AlloyOpTxType::Deposit), _ => panic!("Unsupported OpTxType identifier: {extended_identifier}"), } } @@ -179,8 +181,10 @@ impl reth_codecs::Compact for OpTxType { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::constants::EIP7702_TX_TYPE_ID; use bytes::BytesMut; - use reth_codecs::Compact; + use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; + use reth_codecs::{txtype::*, Compact}; use rstest::rstest; #[test] diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 6678fbe5df4..22d26e824b3 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -48,7 +48,7 @@ where .enumerate() .map(|(idx, (ref tx, receipt))| -> Result<_, _> { let meta = TransactionMeta { - tx_hash: tx.hash, + tx_hash: tx.hash(), index: idx as u64, block_hash, block_number, diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index f3d16b4adb5..5064c9ed5cf 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -11,7 +11,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OpHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; -use reth_provider::ChainSpecProvider; +use reth_provider::{ChainSpecProvider, TransactionsProvider}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; @@ -21,6 +21,7 @@ impl LoadReceipt for OpEthApi where Self: Send + Sync, N: FullNodeComponents>, + Self::Provider: TransactionsProvider, { async fn build_transaction_receipt( &self, @@ -54,10 +55,10 @@ where /// L1 fee and data gas for a non-deposit transaction, or deposit nonce and receipt version for a /// deposit transaction. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] pub struct OpReceiptFieldsBuilder { /// Block timestamp. - pub l1_block_timestamp: u64, + pub block_timestamp: u64, /// The L1 fee for transaction. pub l1_fee: Option, /// L1 gas used by transaction. @@ -84,8 +85,19 @@ pub struct OpReceiptFieldsBuilder { impl OpReceiptFieldsBuilder { /// Returns a new builder. - pub fn new(block_timestamp: u64) -> Self { - Self { l1_block_timestamp: block_timestamp, ..Default::default() } + pub const fn new(block_timestamp: u64) -> Self { + Self { + block_timestamp, + l1_fee: None, + l1_data_gas: None, + l1_fee_scalar: None, + l1_base_fee: None, + deposit_nonce: None, + deposit_receipt_version: None, + l1_base_fee_scalar: None, + l1_blob_base_fee: None, + l1_blob_base_fee_scalar: None, + } } /// Applies [`L1BlockInfo`](revm::L1BlockInfo). @@ -96,7 +108,7 @@ impl OpReceiptFieldsBuilder { l1_block_info: revm::L1BlockInfo, ) -> Result { let raw_tx = tx.encoded_2718(); - let timestamp = self.l1_block_timestamp; + let timestamp = self.block_timestamp; self.l1_fee = Some( l1_block_info @@ -140,7 +152,7 @@ impl OpReceiptFieldsBuilder { /// Builds the [`OpTransactionReceiptFields`] object. pub const fn build(self) -> OpTransactionReceiptFields { let Self { - l1_block_timestamp: _, // used to compute other fields + block_timestamp: _, // used to compute other fields l1_fee, l1_data_gas: l1_gas_used, l1_fee_scalar, @@ -187,6 +199,7 @@ impl OpReceiptBuilder { all_receipts: &[Receipt], l1_block_info: revm::L1BlockInfo, ) -> Result { + let timestamp = meta.timestamp; let core_receipt = build_receipt(transaction, meta, receipt, all_receipts, |receipt_with_bloom| { match receipt.tx_type { @@ -211,7 +224,7 @@ impl OpReceiptBuilder { } })?; - let op_receipt_fields = OpReceiptFieldsBuilder::default() + let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp) .l1_block_info(chain_spec, transaction, l1_block_info)? .deposit_nonce(receipt.deposit_nonce) .deposit_version(receipt.deposit_receipt_version) @@ -233,13 +246,12 @@ impl OpReceiptBuilder { #[cfg(test)] mod test { + use super::*; use alloy_primitives::hex; use op_alloy_network::eip2718::Decodable2718; - use reth_optimism_chainspec::OP_MAINNET; + use reth_optimism_chainspec::{BASE_MAINNET, OP_MAINNET}; use reth_primitives::{Block, BlockBody}; - use super::*; - /// OP Mainnet transaction at index 0 in block 124665056. /// /// @@ -342,4 +354,46 @@ mod test { "incorrect l1 blob base fee scalar" ); } + + // + #[test] + fn base_receipt_gas_fields() { + // https://basescan.org/tx/0x510fd4c47d78ba9f97c91b0f2ace954d5384c169c9545a77a373cf3ef8254e6e + let system = hex!("7ef8f8a0389e292420bcbf9330741f72074e39562a09ff5a00fd22e4e9eee7e34b81bca494deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000008dd00101c120000000000000004000000006721035b00000000014189960000000000000000000000000000000000000000000000000000000349b4dcdc000000000000000000000000000000000000000000000000000000004ef9325cc5991ce750960f636ca2ffbb6e209bb3ba91412f21dd78c14ff154d1930f1f9a0000000000000000000000005050f69a9786f081509234f1a7f4684b5e5b76c9"); + let tx_0 = TransactionSigned::decode_2718(&mut &system[..]).unwrap(); + + let block = Block { + body: BlockBody { transactions: vec![tx_0], ..Default::default() }, + ..Default::default() + }; + let l1_block_info = + reth_optimism_evm::extract_l1_info(&block.body).expect("should extract l1 info"); + + // https://basescan.org/tx/0xf9420cbaf66a2dda75a015488d37262cbfd4abd0aad7bb2be8a63e14b1fa7a94 + let tx = hex!("02f86c8221058034839a4ae283021528942f16386bb37709016023232523ff6d9daf444be380841249c58bc080a001b927eda2af9b00b52a57be0885e0303c39dd2831732e14051c2336470fd468a0681bf120baf562915841a48601c2b54a6742511e535cf8f71c95115af7ff63bd"); + let tx_1 = TransactionSigned::decode_2718(&mut &tx[..]).unwrap(); + + let receipt_meta = OpReceiptFieldsBuilder::new(1730216981) + .l1_block_info(&BASE_MAINNET, &tx_1, l1_block_info) + .expect("should parse revm l1 info") + .build(); + + let L1BlockInfo { + l1_gas_price, + l1_gas_used, + l1_fee, + l1_fee_scalar, + l1_base_fee_scalar, + l1_blob_base_fee, + l1_blob_base_fee_scalar, + } = receipt_meta.l1_block_info; + + assert_eq!(l1_gas_price, Some(14121491676), "incorrect l1 base fee (former gas price)"); + assert_eq!(l1_gas_used, Some(1600), "incorrect l1 gas used"); + assert_eq!(l1_fee, Some(191150293412), "incorrect l1 fee"); + assert!(l1_fee_scalar.is_none(), "incorrect l1 fee scalar"); + assert_eq!(l1_base_fee_scalar, Some(2269), "incorrect l1 base fee scalar"); + assert_eq!(l1_blob_base_fee, Some(1324954204), "incorrect l1 blob base fee"); + assert_eq!(l1_blob_base_fee_scalar, Some(1055762), "incorrect l1 blob base fee scalar"); + } } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 11e33817229..19bcd31dacc 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -58,6 +58,7 @@ impl LoadTransaction for OpEthApi where Self: SpawnBlocking + FullEthApiTypes, N: RpcNodeCore, + Self::Pool: TransactionPool, { } @@ -84,7 +85,8 @@ where tx_info: TransactionInfo, ) -> Result { let from = tx.signer(); - let TransactionSigned { transaction, signature, hash } = tx.into_signed(); + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx.into_signed(); let mut deposit_receipt_version = None; let mut deposit_nonce = None; diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs index 0521fa9025d..ed9d77e73e8 100644 --- a/crates/optimism/rpc/src/witness.rs +++ b/crates/optimism/rpc/src/witness.rs @@ -11,7 +11,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_payload_builder::OpPayloadBuilder; use reth_primitives::SealedHeader; use reth_provider::{BlockReaderIdExt, ProviderError, ProviderResult, StateProviderFactory}; -use reth_rpc_api::DebugExecutionWitnessApiServer; +pub use reth_rpc_api::DebugExecutionWitnessApiServer; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use std::{fmt::Debug, sync::Arc}; diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index c3b8a71feea..391f26093ba 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -16,7 +16,7 @@ mod tests { CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }; - use reth_primitives::{Account, Receipt, ReceiptWithBloom}; + use reth_primitives::{Account, Receipt}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, @@ -40,7 +40,6 @@ mod tests { assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); assert_eq!(Receipt::bitflag_encoded_bytes(), 2); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); @@ -65,7 +64,6 @@ mod tests { validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero); validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); validate_bitflag_backwards_compat!(Receipt, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(ReceiptWithBloom, UnusedBits::Zero); validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index e3193ec6deb..0ab411d3e60 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -979,31 +979,6 @@ impl Default for MissingPayloadBehaviour { } } -/// Represents the outcome of committing withdrawals to the runtime database and post state. -/// Pre-shanghai these are `None` values. -#[derive(Default, Debug)] -pub struct WithdrawalsOutcome { - /// committed withdrawals, if any. - pub withdrawals: Option, - /// withdrawals root if any. - pub withdrawals_root: Option, -} - -impl WithdrawalsOutcome { - /// No withdrawals pre shanghai - pub const fn pre_shanghai() -> Self { - Self { withdrawals: None, withdrawals_root: None } - } - - /// No withdrawals - pub fn empty() -> Self { - Self { - withdrawals: Some(Withdrawals::default()), - withdrawals_root: Some(EMPTY_WITHDRAWALS), - } - } -} - /// Executes the withdrawals and commits them to the _runtime_ Database and `BundleState`. /// /// Returns the withdrawals root. @@ -1013,32 +988,26 @@ pub fn commit_withdrawals( db: &mut State, chain_spec: &ChainSpec, timestamp: u64, - withdrawals: Withdrawals, -) -> Result + withdrawals: &Withdrawals, +) -> Result, DB::Error> where DB: Database, ChainSpec: EthereumHardforks, { if !chain_spec.is_shanghai_active_at_timestamp(timestamp) { - return Ok(WithdrawalsOutcome::pre_shanghai()) + return Ok(None) } if withdrawals.is_empty() { - return Ok(WithdrawalsOutcome::empty()) + return Ok(Some(EMPTY_WITHDRAWALS)) } let balance_increments = - post_block_withdrawals_balance_increments(chain_spec, timestamp, &withdrawals); + post_block_withdrawals_balance_increments(chain_spec, timestamp, withdrawals); db.increment_balances(balance_increments)?; - let withdrawals_root = proofs::calculate_withdrawals_root(&withdrawals); - - // calculate withdrawals root - Ok(WithdrawalsOutcome { - withdrawals: Some(withdrawals), - withdrawals_root: Some(withdrawals_root), - }) + Ok(Some(proofs::calculate_withdrawals_root(withdrawals))) } /// Checks if the new payload is better than the current best. diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 6ad16c4090c..27aa92f4edb 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -12,9 +12,11 @@ description = "Common types in reth." workspace = true [dependencies] -reth-codecs.workspace = true +# reth +reth-codecs = { workspace = true, optional = true } -alloy-consensus = { workspace = true, features = ["serde"] } +# ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true @@ -25,16 +27,16 @@ revm-primitives = { package = "reth-scroll-revm", path = "../scroll/revm", featu reth-scroll-primitives = { workspace = true, optional = true } # misc -byteorder = "1" +byteorder = { workspace = true, optional = true } +bytes.workspace = true derive_more.workspace = true roaring = "0.10.2" serde_with = { workspace = true, optional = true } auto_impl.workspace = true # required by reth-codecs -bytes.workspace = true -modular-bitfield.workspace = true -serde.workspace = true +modular-bitfield = { workspace = true, optional = true } +serde = { workspace = true, optional = true} # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } @@ -51,6 +53,8 @@ proptest.workspace = true rand.workspace = true serde_json.workspace = true test-fuzz.workspace = true +modular-bitfield.workspace = true +serde.workspace = true [features] default = ["std"] @@ -60,11 +64,11 @@ std = [ "alloy-genesis/std", "alloy-primitives/std", "revm-primitives/std", - "serde/std" + "serde?/std" ] test-utils = [ "arbitrary", - "reth-codecs/test-utils", + "reth-codecs?/test-utils", "revm-primitives/test-utils" ] arbitrary = [ @@ -76,14 +80,33 @@ arbitrary = [ "dep:proptest-arbitrary-interop", "alloy-eips/arbitrary", "revm-primitives/arbitrary", - "reth-codecs/arbitrary", + "reth-codecs?/arbitrary", "reth-scroll-primitives?/arbitrary" ] serde-bincode-compat = [ + "serde", "serde_with", "alloy-consensus/serde-bincode-compat", "alloy-eips/serde-bincode-compat" ] +serde = [ + "dep:serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "reth-codecs?/serde", + "revm-primitives/serde", + "roaring/serde", + "revm-primitives/serde", +] +reth-codec = [ + "dep:reth-codecs", + "dep:modular-bitfield", + "dep:byteorder", +] + scroll = [ "reth-scroll-primitives", "revm-primitives/scroll" diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index ffdd4d29887..900d77fb976 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -1,32 +1,34 @@ use alloy_consensus::constants::KECCAK_EMPTY; use alloy_genesis::GenesisAccount; use alloy_primitives::{keccak256, Bytes, B256, U256}; -use byteorder::{BigEndian, ReadBytesExt}; -use bytes::Buf; use derive_more::Deref; -use reth_codecs::{add_arbitrary_tests, Compact}; -use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, BytecodeDecodeError, JumpTable}; -use serde::{Deserialize, Serialize}; +use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, BytecodeDecodeError}; -/// Identifier for [`LegacyRaw`](RevmBytecode::LegacyRaw). -const LEGACY_RAW_BYTECODE_ID: u8 = 0; +#[cfg(any(test, feature = "reth-codec"))] +/// Identifiers used in [`Compact`](reth_codecs::Compact) encoding of [`Bytecode`]. +pub mod compact_ids { + /// Identifier for [`LegacyRaw`](revm_primitives::Bytecode::LegacyRaw). + pub const LEGACY_RAW_BYTECODE_ID: u8 = 0; -/// Identifier for removed bytecode variant. -const REMOVED_BYTECODE_ID: u8 = 1; + /// Identifier for removed bytecode variant. + pub const REMOVED_BYTECODE_ID: u8 = 1; -/// Identifier for [`LegacyAnalyzed`](RevmBytecode::LegacyAnalyzed). -const LEGACY_ANALYZED_BYTECODE_ID: u8 = 2; + /// Identifier for [`LegacyAnalyzed`](revm_primitives::Bytecode::LegacyAnalyzed). + pub const LEGACY_ANALYZED_BYTECODE_ID: u8 = 2; -/// Identifier for [`Eof`](RevmBytecode::Eof). -const EOF_BYTECODE_ID: u8 = 3; + /// Identifier for [`Eof`](revm_primitives::Bytecode::Eof). + pub const EOF_BYTECODE_ID: u8 = 3; -/// Identifier for [`Eip7702`](RevmBytecode::Eip7702). -const EIP7702_BYTECODE_ID: u8 = 4; + /// Identifier for [`Eip7702`](revm_primitives::Bytecode::Eip7702). + pub const EIP7702_BYTECODE_ID: u8 = 4; +} /// An Ethereum account. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct Account { /// Account nonce. pub nonce: u64, @@ -69,7 +71,8 @@ impl Account { /// Bytecode for an account. /// /// A wrapper around [`revm::primitives::Bytecode`][RevmBytecode] with encoding/decoding support. -#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, Deref)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, Default, PartialEq, Eq, Deref)] pub struct Bytecode(pub RevmBytecode); impl Bytecode { @@ -93,11 +96,17 @@ impl Bytecode { } } -impl Compact for Bytecode { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for Bytecode { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, { + use compact_ids::{ + EIP7702_BYTECODE_ID, EOF_BYTECODE_ID, LEGACY_ANALYZED_BYTECODE_ID, + LEGACY_RAW_BYTECODE_ID, + }; + let bytecode = match &self.0 { RevmBytecode::LegacyRaw(bytes) => bytes, RevmBytecode::LegacyAnalyzed(analyzed) => analyzed.bytecode(), @@ -136,7 +145,12 @@ impl Compact for Bytecode { // A panic will be triggered if a bytecode variant of 1 or greater than 2 is passed from the // database. fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { - let len = buf.read_u32::().expect("could not read bytecode length"); + use byteorder::ReadBytesExt; + use bytes::Buf; + + use compact_ids::*; + + let len = buf.read_u32::().expect("could not read bytecode length"); let bytes = Bytes::from(buf.copy_to_bytes(len as usize)); let variant = buf.read_u8().expect("could not read bytecode variant"); let decoded = match variant { @@ -147,8 +161,8 @@ impl Compact for Bytecode { LEGACY_ANALYZED_BYTECODE_ID => Self(unsafe { RevmBytecode::new_analyzed( bytes, - buf.read_u64::().unwrap() as usize, - JumpTable::from_slice(buf), + buf.read_u64::().unwrap() as usize, + revm_primitives::JumpTable::from_slice(buf), ) }), EOF_BYTECODE_ID | EIP7702_BYTECODE_ID => { @@ -234,9 +248,11 @@ impl From for Account { #[cfg(test)] mod tests { - use super::*; use alloy_primitives::{hex_literal::hex, B256, U256}; - use revm_primitives::LegacyAnalyzedBytecode; + use reth_codecs::Compact; + use revm_primitives::{JumpTable, LegacyAnalyzedBytecode}; + + use super::*; #[test] fn test_account() { diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index e9aadf40957..fd7f7f1c631 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,9 +1,16 @@ //! Block body abstraction. -use crate::InMemorySize; use alloc::fmt; + use alloy_consensus::Transaction; +use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde}; + +/// Helper trait that unifies all behaviour required by transaction to support full node operations. +pub trait FullBlockBody: BlockBody {} + +impl FullBlockBody for T where T: BlockBody {} + /// Abstraction for block's body. #[auto_impl::auto_impl(&, Arc)] pub trait BlockBody: @@ -15,14 +22,13 @@ pub trait BlockBody: + fmt::Debug + PartialEq + Eq - + serde::Serialize - + for<'de> serde::Deserialize<'de> + alloy_rlp::Encodable + alloy_rlp::Decodable + InMemorySize + + MaybeSerde + + MaybeArbitrary { /// Ordered list of signed transactions as committed in block. - // todo: requires trait for signed transaction type Transaction: Transaction; /// Returns reference to transactions in block. diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 779df442538..26806808532 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -1,15 +1,16 @@ //! Block header data primitive. -use crate::InMemorySize; -use alloy_primitives::Sealable; use core::fmt; -use reth_codecs::Compact; + +use alloy_primitives::Sealable; + +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by block header to support full node /// operations. -pub trait FullBlockHeader: BlockHeader + Compact {} +pub trait FullBlockHeader: BlockHeader + MaybeCompact {} -impl FullBlockHeader for T where T: BlockHeader + Compact {} +impl FullBlockHeader for T where T: BlockHeader + MaybeCompact {} /// Abstraction of a block header. pub trait BlockHeader: @@ -26,6 +27,8 @@ pub trait BlockHeader: + alloy_consensus::BlockHeader + Sealable + InMemorySize + + MaybeSerde + + MaybeArbitrary { } @@ -38,12 +41,12 @@ impl BlockHeader for T where + fmt::Debug + PartialEq + Eq - + serde::Serialize - + for<'de> serde::Deserialize<'de> + alloy_rlp::Encodable + alloy_rlp::Decodable + alloy_consensus::BlockHeader + Sealable + InMemorySize + + MaybeSerde + + MaybeArbitrary { } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 6bef9ea167f..c0f5a1ffc63 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -5,14 +5,22 @@ pub mod header; use alloc::fmt; -use reth_codecs::Compact; - -use crate::{BlockHeader, FullBlockHeader, InMemorySize}; +use crate::{ + BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeArbitrary, MaybeSerde, +}; /// Helper trait that unifies all behaviour required by block to support full node operations. -pub trait FullBlock: Block + Compact {} +pub trait FullBlock: + Block + alloy_rlp::Encodable + alloy_rlp::Decodable +{ +} -impl FullBlock for T where T: Block + Compact {} +impl FullBlock for T where + T: Block + + alloy_rlp::Encodable + + alloy_rlp::Decodable +{ +} /// Abstraction of block data type. // todo: make sealable super-trait, depends on @@ -28,9 +36,9 @@ pub trait Block: + fmt::Debug + PartialEq + Eq - + serde::Serialize - + for<'a> serde::Deserialize<'a> + InMemorySize + + MaybeSerde + + MaybeArbitrary { /// Header part of the block. type Header: BlockHeader + 'static; diff --git a/crates/primitives-traits/src/encoded.rs b/crates/primitives-traits/src/encoded.rs new file mode 100644 index 00000000000..b162fc93343 --- /dev/null +++ b/crates/primitives-traits/src/encoded.rs @@ -0,0 +1,55 @@ +use alloy_primitives::Bytes; + +/// Generic wrapper with encoded Bytes, such as transaction data. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct WithEncoded(Bytes, pub T); + +impl From<(Bytes, T)> for WithEncoded { + fn from(value: (Bytes, T)) -> Self { + Self(value.0, value.1) + } +} + +impl WithEncoded { + /// Wraps the value with the bytes. + pub const fn new(bytes: Bytes, value: T) -> Self { + Self(bytes, value) + } + + /// Get the encoded bytes + pub fn encoded_bytes(&self) -> Bytes { + self.0.clone() + } + + /// Get the underlying value + pub const fn value(&self) -> &T { + &self.1 + } + + /// Returns ownership of the underlying value. + pub fn into_value(self) -> T { + self.1 + } + + /// Transform the value + pub fn transform>(self) -> WithEncoded { + WithEncoded(self.0, self.1.into()) + } + + /// Split the wrapper into [`Bytes`] and value tuple + pub fn split(self) -> (Bytes, T) { + (self.0, self.1) + } + + /// Maps the inner value to a new value using the given function. + pub fn map U>(self, op: F) -> WithEncoded { + WithEncoded(self.0, op(self.1)) + } +} + +impl WithEncoded> { + /// returns `None` if the inner value is `None`, otherwise returns `Some(WithEncoded)`. + pub fn transpose(self) -> Option> { + self.1.map(|v| WithEncoded(self.0, v)) + } +} diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index b36a74471ff..ea5f7eafb51 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -1,5 +1,5 @@ mod sealed; -pub use sealed::{BlockWithParent, SealedHeader}; +pub use sealed::{BlockWithParent, Header, SealedHeader}; mod error; pub use error::HeaderError; diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index f4a365e1512..08add0ac3c1 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,18 +1,19 @@ +pub use alloy_consensus::Header; + use core::mem; -use alloy_consensus::{Header, Sealed}; +use alloy_consensus::Sealed; use alloy_eips::BlockNumHash; use alloy_primitives::{keccak256, BlockHash, Sealable, B256}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; use derive_more::{AsRef, Deref}; -use reth_codecs::add_arbitrary_tests; -use serde::{Deserialize, Serialize}; use crate::InMemorySize; /// A helper struct to store the block number/hash and its parent hash. -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct BlockWithParent { /// Parent hash. pub parent: B256, @@ -22,8 +23,9 @@ pub struct BlockWithParent { /// A [`Header`] that is sealed at a precalculated hash, use [`SealedHeader::unseal()`] if you want /// to modify header. -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] -#[add_arbitrary_tests(rlp)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] pub struct SealedHeader { /// Locked Header hash. hash: BlockHash, @@ -157,9 +159,12 @@ impl From> for Sealed { } #[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { +impl<'a, H> arbitrary::Arbitrary<'a> for SealedHeader +where + H: for<'b> arbitrary::Arbitrary<'b> + Sealable, +{ fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let header = Header::arbitrary(u)?; + let header = H::arbitrary(u)?; Ok(Self::seal(header)) } diff --git a/crates/primitives-traits/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs index 682fa0cf822..6fc6d75899c 100644 --- a/crates/primitives-traits/src/integer_list.rs +++ b/crates/primitives-traits/src/integer_list.rs @@ -1,13 +1,9 @@ use alloc::vec::Vec; -use bytes::BufMut; use core::fmt; + +use bytes::BufMut; use derive_more::Deref; use roaring::RoaringTreemap; -use serde::{ - de::{SeqAccess, Visitor}, - ser::SerializeSeq, - Deserialize, Deserializer, Serialize, Serializer, -}; /// A data structure that uses Roaring Bitmaps to efficiently store a list of integers. /// @@ -90,11 +86,14 @@ impl IntegerList { } } -impl Serialize for IntegerList { +#[cfg(feature = "serde")] +impl serde::Serialize for IntegerList { fn serialize(&self, serializer: S) -> Result where - S: Serializer, + S: serde::Serializer, { + use serde::ser::SerializeSeq; + let mut seq = serializer.serialize_seq(Some(self.len() as usize))?; for e in &self.0 { seq.serialize_element(&e)?; @@ -103,8 +102,11 @@ impl Serialize for IntegerList { } } +#[cfg(feature = "serde")] struct IntegerListVisitor; -impl<'de> Visitor<'de> for IntegerListVisitor { + +#[cfg(feature = "serde")] +impl<'de> serde::de::Visitor<'de> for IntegerListVisitor { type Value = IntegerList; fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -113,7 +115,7 @@ impl<'de> Visitor<'de> for IntegerListVisitor { fn visit_seq(self, mut seq: E) -> Result where - E: SeqAccess<'de>, + E: serde::de::SeqAccess<'de>, { let mut list = IntegerList::empty(); while let Some(item) = seq.next_element()? { @@ -123,10 +125,11 @@ impl<'de> Visitor<'de> for IntegerListVisitor { } } -impl<'de> Deserialize<'de> for IntegerList { +#[cfg(feature = "serde")] +impl<'de> serde::Deserialize<'de> for IntegerList { fn deserialize(deserializer: D) -> Result where - D: Deserializer<'de>, + D: serde::Deserializer<'de>, { deserializer.deserialize_byte_buf(IntegerListVisitor) } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 584181f2c95..4d068b2ff4d 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -14,7 +14,6 @@ extern crate alloc; /// Common constants. pub mod constants; - pub use constants::gas_units::{format_gas, format_gas_throughput}; /// Minimal account @@ -26,8 +25,10 @@ pub use receipt::{FullReceipt, Receipt}; pub mod transaction; pub use transaction::{ + execute::FillTxEnv, signed::{FullSignedTx, SignedTransaction}, - FullTransaction, Transaction, TransactionExt, + tx_type::{FullTxType, TxType}, + FullTransaction, Transaction, }; mod integer_list; @@ -35,12 +36,14 @@ pub use integer_list::{IntegerList, IntegerListError}; pub mod block; pub use block::{ - body::BlockBody, + body::{BlockBody, FullBlockBody}, header::{BlockHeader, FullBlockHeader}, Block, FullBlock, }; +mod encoded; mod withdrawal; +pub use encoded::WithEncoded; mod error; pub use error::{GotExpected, GotExpectedBoxed}; @@ -51,15 +54,11 @@ pub use alloy_primitives::{logs_bloom, Log, LogData}; mod storage; pub use storage::StorageEntry; -/// Transaction types -pub mod tx_type; -pub use tx_type::{FullTxType, TxType}; - /// Common header types pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; -pub use header::{BlockWithParent, HeaderError, SealedHeader}; +pub use header::{BlockWithParent, Header, HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. /// @@ -79,7 +78,7 @@ pub use size::InMemorySize; /// Node traits pub mod node; -pub use node::{FullNodePrimitives, NodePrimitives}; +pub use node::{FullNodePrimitives, NodePrimitives, ReceiptTy}; /// Helper trait that requires arbitrary implementation if the feature is enabled. #[cfg(any(feature = "test-utils", feature = "arbitrary"))] @@ -92,3 +91,30 @@ pub trait MaybeArbitrary {} impl MaybeArbitrary for T where T: for<'a> arbitrary::Arbitrary<'a> {} #[cfg(not(any(feature = "test-utils", feature = "arbitrary")))] impl MaybeArbitrary for T {} + +/// Helper trait that requires de-/serialize implementation since `serde` feature is enabled. +#[cfg(feature = "serde")] +pub trait MaybeSerde: serde::Serialize + for<'de> serde::Deserialize<'de> {} +/// Noop. Helper trait that would require de-/serialize implementation if `serde` feature were +/// enabled. +#[cfg(not(feature = "serde"))] +pub trait MaybeSerde {} + +#[cfg(feature = "serde")] +impl MaybeSerde for T where T: serde::Serialize + for<'de> serde::Deserialize<'de> {} +#[cfg(not(feature = "serde"))] +impl MaybeSerde for T {} + +/// Helper trait that requires database encoding implementation since `reth-codec` feature is +/// enabled. +#[cfg(feature = "reth-codec")] +pub trait MaybeCompact: reth_codecs::Compact {} +/// Noop. Helper trait that would require database encoding implementation if `reth-codec` feature +/// were enabled. +#[cfg(not(feature = "reth-codec"))] +pub trait MaybeCompact {} + +#[cfg(feature = "reth-codec")] +impl MaybeCompact for T where T: reth_codecs::Compact {} +#[cfg(not(feature = "reth-codec"))] +impl MaybeCompact for T {} diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index ca490ac15aa..904ed7d12f1 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,44 +1,113 @@ use core::fmt; -use crate::{BlockBody, FullBlock, FullReceipt, FullSignedTx, FullTxType}; +use crate::{ + FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, FullSignedTx, FullTxType, MaybeSerde, +}; /// Configures all the primitive types of the node. -pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static { +pub trait NodePrimitives: + Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static +{ /// Block primitive. - type Block: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + type Block: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + MaybeSerde + + 'static; + /// Block header primitive. + type BlockHeader: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + MaybeSerde + + 'static; + /// Block body primitive. + type BlockBody: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + MaybeSerde + + 'static; /// Signed version of the transaction type. - type SignedTx: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + type SignedTx: Send + Sync + Unpin + Clone + fmt::Debug + PartialEq + Eq + MaybeSerde + 'static; /// Transaction envelope type ID. - type TxType: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + type TxType: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static; /// A receipt. - type Receipt: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + type Receipt: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + MaybeSerde + + 'static; } impl NodePrimitives for () { type Block = (); + type BlockHeader = (); + type BlockBody = (); type SignedTx = (); type TxType = (); type Receipt = (); } /// Helper trait that sets trait bounds on [`NodePrimitives`]. -pub trait FullNodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static { - /// Block primitive. - type Block: FullBlock>; - /// Signed version of the transaction type. - type SignedTx: FullSignedTx; - /// Transaction envelope type ID. - type TxType: FullTxType; - /// A receipt. - type Receipt: FullReceipt; +pub trait FullNodePrimitives +where + Self: NodePrimitives< + Block: FullBlock

, + BlockHeader: FullBlockHeader, + BlockBody: FullBlockBody, + SignedTx: FullSignedTx, + TxType: FullTxType, + Receipt: FullReceipt, + > + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + 'static, +{ } -impl NodePrimitives for T -where - T: FullNodePrimitives, +impl FullNodePrimitives for T where + T: NodePrimitives< + Block: FullBlock
, + BlockHeader: FullBlockHeader, + BlockBody: FullBlockBody, + SignedTx: FullSignedTx, + TxType: FullTxType, + Receipt: FullReceipt, + > + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + 'static { - type Block = T::Block; - type SignedTx = T::SignedTx; - type TxType = T::TxType; - type Receipt = T::Receipt; } + +/// Helper adapter type for accessing [`NodePrimitives`] receipt type. +pub type ReceiptTy = ::Receipt; diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 31bded015d4..e2af40c447e 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,16 +1,17 @@ //! Receipt abstraction use alloc::vec::Vec; +use core::fmt; + use alloy_consensus::TxReceipt; use alloy_primitives::B256; -use core::fmt; -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; + +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by receipt to support full node operations. -pub trait FullReceipt: Receipt + Compact {} +pub trait FullReceipt: Receipt + MaybeCompact {} -impl FullReceipt for T where T: ReceiptExt + Compact {} +impl FullReceipt for T where T: ReceiptExt + MaybeCompact {} /// Abstraction of a receipt. #[auto_impl::auto_impl(&, Arc)] @@ -24,8 +25,9 @@ pub trait Receipt: + TxReceipt + alloy_rlp::Encodable + alloy_rlp::Decodable - + Serialize - + for<'de> Deserialize<'de> + + MaybeSerde + + InMemorySize + + MaybeArbitrary { /// Returns transaction type. fn tx_type(&self) -> u8; diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs index 7d83a8af8c4..4d721dd00b3 100644 --- a/crates/primitives-traits/src/size.rs +++ b/crates/primitives-traits/src/size.rs @@ -1,3 +1,6 @@ +use alloy_consensus::{Header, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; +use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; + /// Trait for calculating a heuristic for the in-memory size of a struct. #[auto_impl::auto_impl(&, Arc, Box)] pub trait InMemorySize { @@ -5,8 +8,59 @@ pub trait InMemorySize { fn size(&self) -> usize; } -impl InMemorySize for alloy_consensus::Header { +impl InMemorySize for alloy_consensus::Signed { fn size(&self) -> usize { - self.size() + T::size(self.tx()) + self.signature().size() + self.hash().size() + } +} + +/// Implement `InMemorySize` for a type with `size_of` +macro_rules! impl_in_mem_size_size_of { + ($($ty:ty),*) => { + $( + impl InMemorySize for $ty { + #[inline] + fn size(&self) -> usize { + core::mem::size_of::() + } + } + )* + }; +} + +impl_in_mem_size_size_of!(Signature, TxHash); + +/// Implement `InMemorySize` for a type with a native `size` method. +macro_rules! impl_in_mem_size { + ($($ty:ty),*) => { + $( + impl InMemorySize for $ty { + #[inline] + fn size(&self) -> usize { + Self::size(self) + } + } + )* + }; +} + +impl_in_mem_size!(Header, TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844); + +#[cfg(test)] +mod tests { + use super::*; + + // ensures we don't have any recursion in the `InMemorySize` impls + #[test] + fn no_in_memory_no_recursion() { + fn assert_no_recursion() { + let _ = T::default().size(); + } + assert_no_recursion::
(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); } } diff --git a/crates/primitives-traits/src/storage.rs b/crates/primitives-traits/src/storage.rs index 39b6155ee28..c6b9b1e11c7 100644 --- a/crates/primitives-traits/src/storage.rs +++ b/crates/primitives-traits/src/storage.rs @@ -1,13 +1,12 @@ use alloy_primitives::{B256, U256}; -use reth_codecs::{add_arbitrary_tests, Compact}; -use serde::{Deserialize, Serialize}; /// Account storage entry. /// /// `key` is the subkey when used as a value in the `StorageChangeSets` table. -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct StorageEntry { /// Storage key. pub key: B256, @@ -31,7 +30,8 @@ impl From<(B256, U256)> for StorageEntry { // NOTE: Removing reth_codec and manually encode subkey // and compress second part of the value. If we have compression // over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey -impl Compact for StorageEntry { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StorageEntry { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, diff --git a/crates/primitives-traits/src/transaction/execute.rs b/crates/primitives-traits/src/transaction/execute.rs new file mode 100644 index 00000000000..c7350f1941b --- /dev/null +++ b/crates/primitives-traits/src/transaction/execute.rs @@ -0,0 +1,10 @@ +//! Abstraction of an executable transaction. + +use alloy_primitives::Address; +use revm_primitives::TxEnv; + +/// Loads transaction into execution environment. +pub trait FillTxEnv { + /// Fills [`TxEnv`] with an [`Address`] and transaction. + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); +} diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 33ee36090ac..b67e51024bf 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -1,19 +1,16 @@ //! Transaction abstraction +pub mod execute; pub mod signed; +pub mod tx_type; +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; use core::{fmt, hash::Hash}; -use alloy_primitives::B256; -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; - -use crate::{FullTxType, InMemorySize, MaybeArbitrary, TxType}; - /// Helper trait that unifies all behaviour required by transaction to support full node operations. -pub trait FullTransaction: Transaction + Compact {} +pub trait FullTransaction: Transaction + MaybeCompact {} -impl FullTransaction for T where T: Transaction + Compact {} +impl FullTransaction for T where T: Transaction + MaybeCompact {} /// Abstraction of a transaction. pub trait Transaction: @@ -21,15 +18,13 @@ pub trait Transaction: + Sync + Unpin + Clone - + Default + fmt::Debug + Eq + PartialEq + Hash - + Serialize - + for<'de> Deserialize<'de> - + TransactionExt + + alloy_consensus::Transaction + InMemorySize + + MaybeSerde + MaybeArbitrary { } @@ -39,31 +34,13 @@ impl Transaction for T where + Sync + Unpin + Clone - + Default + fmt::Debug + Eq + PartialEq + Hash - + Serialize - + for<'de> Deserialize<'de> - + TransactionExt + + alloy_consensus::Transaction + InMemorySize + + MaybeSerde + MaybeArbitrary { } - -/// Extension trait of [`alloy_consensus::Transaction`]. -#[auto_impl::auto_impl(&, Arc)] -pub trait TransactionExt: alloy_consensus::Transaction { - /// Transaction envelope type ID. - type Type: TxType; - - /// Heavy operation that return signature hash over rlp encoded transaction. - /// It is only for signature signing or signer recovery. - fn signature_hash(&self) -> B256; - - /// Returns the transaction type. - fn tx_type(&self) -> Self::Type { - Self::Type::try_from(self.ty()).expect("should decode tx type id") - } -} diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 958d5cd6c77..ae9a8f0d2ac 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -1,19 +1,15 @@ //! API of a signed transaction. +use crate::{FillTxEnv, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, TxType}; use alloc::fmt; -use core::hash::Hash; - use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; -use reth_codecs::Compact; -use revm_primitives::TxEnv; - -use crate::{FullTransaction, MaybeArbitrary, Transaction}; +use core::hash::Hash; /// Helper trait that unifies all behaviour required by block to support full node operations. -pub trait FullSignedTx: SignedTransaction + Compact {} +pub trait FullSignedTx: SignedTransaction + FillTxEnv + MaybeCompact {} -impl FullSignedTx for T where T: SignedTransaction + Compact {} +impl FullSignedTx for T where T: SignedTransaction + FillTxEnv + MaybeCompact {} /// A signed transaction. #[auto_impl::auto_impl(&, Arc)] @@ -22,29 +18,30 @@ pub trait SignedTransaction: + Sync + Unpin + Clone - + Default + fmt::Debug + PartialEq + Eq + Hash - + serde::Serialize - + for<'a> serde::Deserialize<'a> + alloy_rlp::Encodable + alloy_rlp::Decodable + Encodable2718 + Decodable2718 + alloy_consensus::Transaction + + MaybeSerde + MaybeArbitrary + + InMemorySize { - /// Transaction type that is signed. - type Transaction: Transaction; + /// Transaction envelope type ID. + type Type: TxType; + + /// Returns the transaction type. + fn tx_type(&self) -> Self::Type { + Self::Type::try_from(self.ty()).expect("should decode tx type id") + } /// Returns reference to transaction hash. fn tx_hash(&self) -> &TxHash; - /// Returns reference to transaction. - fn transaction(&self) -> &Self::Transaction; - /// Returns reference to signature. fn signature(&self) -> &PrimitiveSignature; @@ -71,19 +68,4 @@ pub trait SignedTransaction: fn recalculate_hash(&self) -> B256 { keccak256(self.encoded_2718()) } - - /// Fills [`TxEnv`] with an [`Address`] and transaction. - fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); -} - -/// Helper trait used in testing. -#[cfg(feature = "test-utils")] -pub trait SignedTransactionTesting: SignedTransaction { - /// Create a new signed transaction from a transaction and its signature. - /// - /// This will also calculate the transaction hash using its encoding. - fn from_transaction_and_signature( - transaction: Self::Transaction, - signature: PrimitiveSignature, - ) -> Self; } diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs similarity index 59% rename from crates/primitives-traits/src/tx_type.rs rename to crates/primitives-traits/src/transaction/tx_type.rs index b1828ad57d9..d2caebe4c9f 100644 --- a/crates/primitives-traits/src/tx_type.rs +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -1,13 +1,16 @@ +//! Abstraction of transaction envelope type ID. + use core::fmt; use alloy_primitives::{U64, U8}; -use reth_codecs::Compact; + +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact}; /// Helper trait that unifies all behaviour required by transaction type ID to support full node /// operations. -pub trait FullTxType: TxType + Compact {} +pub trait FullTxType: TxType + MaybeCompact {} -impl FullTxType for T where T: TxType + Compact {} +impl FullTxType for T where T: TxType + MaybeCompact {} /// Trait representing the behavior of a transaction type. pub trait TxType: @@ -29,6 +32,8 @@ pub trait TxType: + TryFrom + alloy_rlp::Encodable + alloy_rlp::Decodable + + InMemorySize + + MaybeArbitrary { /// Returns `true` if this is a legacy transaction. fn is_legacy(&self) -> bool; @@ -44,4 +49,14 @@ pub trait TxType: /// Returns `true` if this is an eip-7702 transaction. fn is_eip7702(&self) -> bool; + + /// Returns whether this transaction type can be __broadcasted__ as full transaction over the + /// network. + /// + /// Some transactions are not broadcastable as objects and only allowed to be broadcasted as + /// hashes, e.g. because they missing context (e.g. blob sidecar). + fn is_broadcastable_in_full(&self) -> bool { + // EIP-4844 transactions are not broadcastable in full, only hashes are allowed. + !self.is_eip4844() + } } diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 3507952699f..7e187c6fb00 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde"] } reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true reth-codecs = { workspace = true, optional = true } @@ -64,11 +64,11 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] # eth -reth-chainspec.workspace = true +reth-chainspec = { workspace = true, features = ["arbitrary"] } reth-codecs = { workspace = true, features = ["test-utils"] } reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true -reth-trie-common.workspace = true +reth-trie-common = { workspace = true, features = ["arbitrary"] } revm-primitives = { package = "reth-scroll-revm", path = "../scroll/revm", features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } @@ -107,7 +107,12 @@ std = [ "serde/std", "alloy-trie/std" ] -reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] +reth-codec = [ + "dep:reth-codecs", + "dep:zstd", + "dep:modular-bitfield", "std", + "reth-primitives-traits/reth-codec", +] asm-keccak = ["alloy-primitives/asm-keccak", "revm-primitives/asm-keccak"] arbitrary = [ "dep:arbitrary", diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 462b27f9c73..a72c83996c0 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -156,7 +156,7 @@ impl TryFrom for TransactionSigned { _ => return Err(ConversionError::Custom("unknown transaction type".to_string())), }; - Ok(Self { transaction, signature, hash }) + Ok(Self { transaction, signature, hash: hash.into() }) } } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 94dd578493c..a93b1cf538a 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -2,7 +2,7 @@ use crate::{GotExpected, SealedHeader, TransactionSigned, TransactionSignedEcRec use alloc::vec::Vec; use alloy_consensus::Header; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; -use alloy_primitives::{Address, Bytes, B256}; +use alloy_primitives::{Address, Bytes, Sealable, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] @@ -69,11 +69,11 @@ impl Block { let senders = if self.body.transactions.len() == senders.len() { senders } else { - let Some(senders) = self.body.recover_signers() else { return Err(self) }; + let Some(senders) = self.body.recover_signers_unchecked() else { return Err(self) }; senders }; - Ok(BlockWithSenders { block: self, senders }) + Ok(BlockWithSenders::new_unchecked(self, senders)) } /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained @@ -82,7 +82,7 @@ impl Block { /// Returns `None` if a transaction is invalid. pub fn with_recovered_senders(self) -> Option { let senders = self.senders()?; - Some(BlockWithSenders { block: self, senders }) + Some(BlockWithSenders::new_unchecked(self, senders)) } } @@ -214,6 +214,11 @@ pub struct BlockWithSenders { } impl BlockWithSenders { + /// New block with senders + pub const fn new_unchecked(block: Block, senders: Vec
) -> Self { + Self { block, senders } + } + /// New block with senders. Return none if len of tx and senders does not match pub fn new(block: Block, senders: Vec
) -> Option { (block.body.transactions.len() == senders.len()).then_some(Self { block, senders }) @@ -379,7 +384,7 @@ impl SealedBlock { let senders = if self.body.transactions.len() == senders.len() { senders } else { - let Some(senders) = self.body.recover_signers() else { return Err(self) }; + let Some(senders) = self.body.recover_signers_unchecked() else { return Err(self) }; senders }; @@ -493,22 +498,30 @@ where } /// Sealed block with senders recovered from transactions. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)] -pub struct SealedBlockWithSenders { +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] +pub struct SealedBlockWithSenders { /// Sealed block #[deref] #[deref_mut] - pub block: SealedBlock, + pub block: SealedBlock, /// List of senders that match transactions from block. pub senders: Vec
, } -impl SealedBlockWithSenders { +impl Default for SealedBlockWithSenders { + fn default() -> Self { + Self { block: SealedBlock::default(), senders: Default::default() } + } +} + +impl SealedBlockWithSenders { /// New sealed block with sender. Return none if len of tx and senders does not match - pub fn new(block: SealedBlock, senders: Vec
) -> Option { - (block.body.transactions.len() == senders.len()).then_some(Self { block, senders }) + pub fn new(block: SealedBlock, senders: Vec
) -> Option { + (block.body.transactions().len() == senders.len()).then_some(Self { block, senders }) } +} +impl SealedBlockWithSenders { /// Split Structure to its components #[inline] pub fn into_components(self) -> (SealedBlock, Vec
) { @@ -519,7 +532,7 @@ impl SealedBlockWithSenders { #[inline] pub fn unseal(self) -> BlockWithSenders { let Self { block, senders } = self; - BlockWithSenders { block: block.unseal(), senders } + BlockWithSenders::new_unchecked(block.unseal(), senders) } /// Returns an iterator over all transactions in the block. @@ -616,6 +629,15 @@ impl BlockBody { TransactionSigned::recover_signers(&self.transactions, self.transactions.len()) } + /// Recover signer addresses for all transactions in the block body _without ensuring that the + /// signature has a low `s` value_. + /// + /// Returns `None`, if some transaction's signature is invalid, see also + /// [`TransactionSigned::recover_signer_unchecked`]. + pub fn recover_signers_unchecked(&self) -> Option> { + TransactionSigned::recover_signers_unchecked(&self.transactions, self.transactions.len()) + } + /// Returns whether or not the block body contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 45067d60079..203880209a2 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -39,8 +39,8 @@ pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; pub use reth_primitives_traits::{ - logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, HeaderError, Log, LogData, - SealedHeader, StorageEntry, + logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, + LogData, NodePrimitives, SealedHeader, StorageEntry, }; pub use static_file::StaticFileSegment; @@ -48,7 +48,7 @@ pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, BlobTransaction, InvalidTransactionError, PooledTransactionsElement, PooledTransactionsElementEcRecovered, Transaction, TransactionMeta, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxHashOrNumber, TxType, + TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, }; // Re-exports @@ -74,3 +74,16 @@ pub mod serde_bincode_compat { transaction::{serde_bincode_compat as transaction, serde_bincode_compat::*}, }; } + +/// Temp helper struct for integrating [`NodePrimitives`]. +#[derive(Debug, Clone, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct EthPrimitives; + +impl reth_primitives_traits::NodePrimitives for EthPrimitives { + type Block = crate::Block; + type BlockHeader = alloy_consensus::Header; + type BlockBody = crate::BlockBody; + type SignedTx = crate::TransactionSigned; + type TxType = crate::TxType; + type Receipt = crate::Receipt; +} diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index b61ee7c14d2..77a44dc39e5 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,5 +1,6 @@ use alloc::{vec, vec::Vec}; use core::cmp::Ordering; +use reth_primitives_traits::InMemorySize; use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, @@ -71,7 +72,6 @@ impl Receipt { } } -// todo: replace with alloy receipt impl TxReceipt for Receipt { fn status_or_post_state(&self) -> Eip658Value { self.success.into() @@ -109,6 +109,22 @@ impl ReceiptExt for Receipt { } } +impl InMemorySize for Receipt { + /// Calculates a heuristic for the in-memory size of the [Receipt]. + #[inline] + fn size(&self) -> usize { + let total_size = self.tx_type.size() + + core::mem::size_of::() + + core::mem::size_of::() + + self.logs.capacity() * core::mem::size_of::(); + + #[cfg(feature = "optimism")] + return total_size + 2 * core::mem::size_of::>(); + #[cfg(not(feature = "optimism"))] + total_size + } +} + /// A collection of receipts organized as a two-dimensional vector. #[derive( Clone, @@ -174,8 +190,6 @@ impl From for ReceiptWithBloom { /// [`Receipt`] with calculated bloom filter. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct ReceiptWithBloom { /// Bloom filter build from logs. pub bloom: Bloom, @@ -379,7 +393,7 @@ impl Decodable for ReceiptWithBloom { Self::decode_receipt(buf, TxType::Eip7702) } #[cfg(feature = "optimism")] - crate::transaction::DEPOSIT_TX_TYPE_ID => { + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => { buf.advance(1); Self::decode_receipt(buf, TxType::Deposit) } @@ -515,7 +529,7 @@ impl ReceiptWithBloomEncoder<'_> { } #[cfg(feature = "optimism")] TxType::Deposit => { - out.put_u8(crate::transaction::DEPOSIT_TX_TYPE_ID); + out.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); } } out.put_slice(payload.as_ref()); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 015621cdcce..b8a3f4a719b 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,13 +1,11 @@ //! Transaction types. -#[cfg(any(test, feature = "reth-codec"))] -use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; +use alloc::vec::Vec; use alloy_consensus::{ - transaction::RlpEcdsaTx, SignableTransaction, Transaction as _, TxEip1559, TxEip2930, - TxEip4844, TxEip7702, TxLegacy, + transaction::RlpEcdsaTx, SignableTransaction, Signed, Transaction as _, TxEip1559, TxEip2930, + TxEip4844, TxEip4844Variant, TxEip7702, TxLegacy, TypedTransaction, }; use alloy_eips::{ - eip1898::BlockHashOrNumber, eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, eip2930::AccessList, eip7702::SignedAuthorization, @@ -16,31 +14,33 @@ use alloy_primitives::{ keccak256, Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, }; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; -use core::mem; +use core::hash::{Hash, Hasher}; use derive_more::{AsRef, Deref}; use once_cell as _; #[cfg(not(feature = "std"))] -use once_cell::sync::Lazy as LazyLock; +use once_cell::sync::{Lazy as LazyLock, OnceCell as OnceLock}; #[cfg(feature = "optimism")] use op_alloy_consensus::DepositTransaction; +#[cfg(feature = "optimism")] +use op_alloy_consensus::TxDeposit; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; -use reth_primitives_traits::InMemorySize; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; +use revm_primitives::{AuthorizationList, TxEnv}; use serde::{Deserialize, Serialize}; use signature::decode_with_eip155_chain_id; #[cfg(feature = "std")] -use std::sync::LazyLock; +use std::sync::{LazyLock, OnceLock}; +pub use compat::FillTxEnv; pub use error::{ InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, }; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; +pub use reth_primitives_traits::WithEncoded; pub use sidecar::BlobTransaction; - -pub use compat::FillTxEnv; pub use signature::{recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; -pub use variant::TransactionSignedVariant; pub(crate) mod access_list; mod compat; @@ -55,25 +55,13 @@ mod tx_type; pub mod signature; pub(crate) mod util; -mod variant; -#[cfg(feature = "optimism")] -use op_alloy_consensus::TxDeposit; -#[cfg(feature = "optimism")] -pub use tx_type::DEPOSIT_TX_TYPE_ID; #[cfg(any(test, feature = "reth-codec"))] pub use tx_type::{ COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY, }; -use alloc::vec::Vec; -use reth_primitives_traits::{transaction::TransactionExt, SignedTransaction}; -use revm_primitives::{AuthorizationList, TxEnv}; - -/// Either a transaction hash or number. -pub type TxHashOrNumber = BlockHashOrNumber; - /// Expected number of transactions where we can expect a speed-up by recovering the senders in /// parallel. pub static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = @@ -235,29 +223,6 @@ impl Transaction { } } - /// Gets the transaction's [`TxKind`], which is the address of the recipient or - /// [`TxKind::Create`] if the transaction is a contract creation. - pub const fn kind(&self) -> TxKind { - match self { - Self::Legacy(TxLegacy { to, .. }) | - Self::Eip2930(TxEip2930 { to, .. }) | - Self::Eip1559(TxEip1559 { to, .. }) => *to, - Self::Eip4844(TxEip4844 { to, .. }) | Self::Eip7702(TxEip7702 { to, .. }) => { - TxKind::Call(*to) - } - #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { to, .. }) => *to, - } - } - - /// Get the transaction's address of the contract that will be called, or the address that will - /// receive the transfer. - /// - /// Returns `None` if this is a `CREATE` transaction. - pub fn to(&self) -> Option
{ - self.kind().to().copied() - } - /// Get the transaction's type pub const fn tx_type(&self) -> TxType { match self { @@ -271,56 +236,6 @@ impl Transaction { } } - /// Returns the [`AccessList`] of the transaction. - /// - /// Returns `None` for legacy transactions. - pub const fn access_list(&self) -> Option<&AccessList> { - match self { - Self::Legacy(_) => None, - Self::Eip2930(tx) => Some(&tx.access_list), - Self::Eip1559(tx) => Some(&tx.access_list), - Self::Eip4844(tx) => Some(&tx.access_list), - Self::Eip7702(tx) => Some(&tx.access_list), - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - - /// Returns the [`SignedAuthorization`] list of the transaction. - /// - /// Returns `None` if this transaction is not EIP-7702. - pub fn authorization_list(&self) -> Option<&[SignedAuthorization]> { - match self { - Self::Eip7702(tx) => Some(&tx.authorization_list), - _ => None, - } - } - - /// Returns true if the tx supports dynamic fees - pub const fn is_dynamic_fee(&self) -> bool { - match self { - Self::Legacy(_) | Self::Eip2930(_) => false, - Self::Eip1559(_) | Self::Eip4844(_) | Self::Eip7702(_) => true, - #[cfg(feature = "optimism")] - Self::Deposit(_) => false, - } - } - - /// Blob versioned hashes for eip4844 transaction, for legacy, eip1559, eip2930 and eip7702 - /// transactions this is `None` - /// - /// This is also commonly referred to as the "blob versioned hashes" (`BlobVersionedHashes`). - pub fn blob_versioned_hashes(&self) -> Option> { - match self { - Self::Legacy(_) | Self::Eip2930(_) | Self::Eip1559(_) | Self::Eip7702(_) => None, - Self::Eip4844(TxEip4844 { blob_versioned_hashes, .. }) => { - Some(blob_versioned_hashes.clone()) - } - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - /// Returns the blob gas used for all blobs of the EIP-4844 transaction if it is an EIP-4844 /// transaction. /// @@ -361,19 +276,6 @@ impl Transaction { } } - /// Get the transaction's input field. - pub const fn input(&self) -> &Bytes { - match self { - Self::Legacy(TxLegacy { input, .. }) | - Self::Eip2930(TxEip2930 { input, .. }) | - Self::Eip1559(TxEip1559 { input, .. }) | - Self::Eip4844(TxEip4844 { input, .. }) | - Self::Eip7702(TxEip7702 { input, .. }) => input, - #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { input, .. }) => input, - } - } - /// This encodes the transaction _without_ the signature, and is only suitable for creating a /// hash intended for signing. pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { @@ -594,19 +496,19 @@ impl reth_codecs::Compact for Transaction { use bytes::Buf; match identifier { - COMPACT_IDENTIFIER_LEGACY => { + reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => { let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); (Self::Legacy(tx), buf) } - COMPACT_IDENTIFIER_EIP2930 => { + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => { let (tx, buf) = TxEip2930::from_compact(buf, buf.len()); (Self::Eip2930(tx), buf) } - COMPACT_IDENTIFIER_EIP1559 => { + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => { let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); (Self::Eip1559(tx), buf) } - COMPACT_EXTENDED_IDENTIFIER_FLAG => { + reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { // An identifier of 3 indicates that the transaction type did not fit into // the backwards compatible 2 bit identifier, their transaction types are // larger than 2 bits (eg. 4844 and Deposit Transactions). In this case, @@ -614,16 +516,16 @@ impl reth_codecs::Compact for Transaction { // reading the full 8 bits (single byte) and match on this transaction type. let identifier = buf.get_u8(); match identifier { - EIP4844_TX_TYPE_ID => { + alloy_consensus::constants::EIP4844_TX_TYPE_ID => { let (tx, buf) = TxEip4844::from_compact(buf, buf.len()); (Self::Eip4844(tx), buf) } - EIP7702_TX_TYPE_ID => { + alloy_consensus::constants::EIP7702_TX_TYPE_ID => { let (tx, buf) = TxEip7702::from_compact(buf, buf.len()); (Self::Eip7702(tx), buf) } #[cfg(feature = "optimism")] - DEPOSIT_TX_TYPE_ID => { + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => { let (tx, buf) = TxDeposit::from_compact(buf, buf.len()); (Self::Deposit(tx), buf) } @@ -846,18 +748,23 @@ impl alloy_consensus::Transaction for Transaction { } } -impl TransactionExt for Transaction { - type Type = TxType; +impl From for Transaction { + fn from(value: TxEip4844Variant) -> Self { + match value { + TxEip4844Variant::TxEip4844(tx) => tx.into(), + TxEip4844Variant::TxEip4844WithSidecar(tx) => tx.tx.into(), + } + } +} - fn signature_hash(&self) -> B256 { - match self { - Self::Legacy(tx) => tx.signature_hash(), - Self::Eip2930(tx) => tx.signature_hash(), - Self::Eip1559(tx) => tx.signature_hash(), - Self::Eip4844(tx) => tx.signature_hash(), - Self::Eip7702(tx) => tx.signature_hash(), - #[cfg(feature = "optimism")] - _ => todo!("use op type for op"), +impl From for Transaction { + fn from(value: TypedTransaction) -> Self { + match value { + TypedTransaction::Legacy(tx) => tx.into(), + TypedTransaction::Eip2930(tx) => tx.into(), + TypedTransaction::Eip1559(tx) => tx.into(), + TypedTransaction::Eip4844(tx) => tx.into(), + TypedTransaction::Eip7702(tx) => tx.into(), } } } @@ -944,7 +851,7 @@ impl TransactionSignedNoHash { #[inline] pub fn with_hash(self) -> TransactionSigned { let Self { signature, transaction } = self; - TransactionSigned::from_transaction_and_signature(transaction, signature) + TransactionSigned::new_unhashed(transaction, signature) } /// Recovers a list of signers from a transaction list iterator @@ -1088,10 +995,11 @@ impl From for TransactionSignedNoHash { /// Signed transaction. #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] +#[derive(Debug, Clone, Eq, AsRef, Deref, Serialize, Deserialize)] pub struct TransactionSigned { /// Transaction hash - pub hash: TxHash, + #[serde(skip)] + pub hash: OnceLock, /// The transaction signature values pub signature: Signature, /// Raw transaction info @@ -1116,9 +1024,36 @@ impl AsRef for TransactionSigned { } } +impl Hash for TransactionSigned { + fn hash(&self, state: &mut H) { + self.signature.hash(state); + self.transaction.hash(state); + } +} + +impl PartialEq for TransactionSigned { + fn eq(&self, other: &Self) -> bool { + self.signature == other.signature && + self.transaction == other.transaction && + self.hash_ref() == other.hash_ref() + } +} + // === impl TransactionSigned === impl TransactionSigned { + /// Creates a new signed transaction from the given parts. + pub fn new(transaction: Transaction, signature: Signature, hash: B256) -> Self { + Self { hash: hash.into(), signature, transaction } + } + + /// Creates a new signed transaction from the given transaction and signature without the hash. + /// + /// Note: this only calculates the hash on the first [`TransactionSigned::hash`] call. + pub fn new_unhashed(transaction: Transaction, signature: Signature) -> Self { + Self { hash: Default::default(), signature, transaction } + } + /// Transaction signature. pub const fn signature(&self) -> &Signature { &self.signature @@ -1130,13 +1065,13 @@ impl TransactionSigned { } /// Transaction hash. Used to identify transaction. - pub const fn hash(&self) -> TxHash { - self.hash + pub fn hash(&self) -> TxHash { + *self.hash_ref() } /// Reference to transaction hash. Used to identify transaction. - pub const fn hash_ref(&self) -> &TxHash { - &self.hash + pub fn hash_ref(&self) -> &TxHash { + self.hash.get_or_init(|| self.recalculate_hash()) } /// Recover signer from signature and hash. @@ -1265,19 +1200,10 @@ impl TransactionSigned { keccak256(self.encoded_2718()) } - /// Create a new signed transaction from a transaction and its signature. - /// - /// This will also calculate the transaction hash using its encoding. - pub fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { - let mut initial_tx = Self { transaction, hash: Default::default(), signature }; - initial_tx.hash = initial_tx.recalculate_hash(); - initial_tx - } - - /// Calculate a heuristic for the in-memory size of the [`TransactionSigned`]. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + self.transaction.size() + mem::size_of::() + /// Splits the transaction into parts. + pub fn into_parts(self) -> (Transaction, Signature, B256) { + let hash = self.hash(); + (self.transaction, self.signature, hash) } /// Decodes legacy transaction from the data buffer into a tuple. @@ -1337,20 +1263,17 @@ impl TransactionSigned { // so decoding methods do not need to manually advance the buffer pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?; - let signed = Self { transaction: Transaction::Legacy(transaction), hash, signature }; + let signed = + Self { transaction: Transaction::Legacy(transaction), hash: hash.into(), signature }; Ok(signed) } } impl SignedTransaction for TransactionSigned { - type Transaction = Transaction; + type Type = TxType; fn tx_hash(&self) -> &TxHash { - &self.hash - } - - fn transaction(&self) -> &Self::Transaction { - &self.transaction + self.hash_ref() } fn signature(&self) -> &Signature { @@ -1366,7 +1289,9 @@ impl SignedTransaction for TransactionSigned { let signature_hash = self.signature_hash(); recover_signer_unchecked(&self.signature, signature_hash) } +} +impl reth_primitives_traits::FillTxEnv for TransactionSigned { fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { tx_env.caller = sender; match self.as_ref() { @@ -1447,6 +1372,14 @@ impl SignedTransaction for TransactionSigned { } } +impl InMemorySize for TransactionSigned { + /// Calculate a heuristic for the in-memory size of the [`TransactionSigned`]. + #[inline] + fn size(&self) -> usize { + self.hash().size() + self.transaction.size() + self.signature().size() + } +} + impl alloy_consensus::Transaction for TransactionSigned { fn chain_id(&self) -> Option { self.deref().chain_id() @@ -1606,6 +1539,10 @@ impl Encodable2718 for TransactionSigned { fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { self.transaction.eip2718_encode(&self.signature, out) } + + fn trie_hash(&self) -> B256 { + self.hash() + } } impl Decodable2718 for TransactionSigned { @@ -1614,22 +1551,22 @@ impl Decodable2718 for TransactionSigned { TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), TxType::Eip2930 => { let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash: hash.into() }) } TxType::Eip1559 => { let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash: hash.into() }) } TxType::Eip7702 => { let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash: hash.into() }) } TxType::Eip4844 => { let (tx, signature, hash) = TxEip4844::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash: hash.into() }) } #[cfg(feature = "optimism")] - TxType::Deposit => Ok(Self::from_transaction_and_signature( + TxType::Deposit => Ok(Self::new_unhashed( Transaction::Deposit(TxDeposit::rlp_decode(buf)?), TxDeposit::signature(), )), @@ -1641,6 +1578,35 @@ impl Decodable2718 for TransactionSigned { } } +macro_rules! impl_from_signed { + ($($tx:ident),*) => { + $( + impl From> for TransactionSigned { + fn from(value: Signed<$tx>) -> Self { + let(tx,sig,hash) = value.into_parts(); + Self::new(tx.into(), sig, hash) + } + } + )* + }; +} + +impl_from_signed!(TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844, TypedTransaction); + +impl From> for TransactionSigned { + fn from(value: Signed) -> Self { + let (tx, sig, hash) = value.into_parts(); + Self::new(tx, sig, hash) + } +} + +impl From for Signed { + fn from(value: TransactionSigned) -> Self { + let (tx, sig, hash) = value.into_parts(); + Self::new_unchecked(tx, sig, hash) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -1667,57 +1633,53 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { #[cfg(feature = "optimism")] let signature = if transaction.is_deposit() { TxDeposit::signature() } else { signature }; - - Ok(Self::from_transaction_and_signature(transaction, signature)) + Ok(Self::new_unhashed(transaction, signature)) } } /// Signed transaction with recovered signer. #[derive(Debug, Clone, PartialEq, Hash, Eq, AsRef, Deref)] -pub struct TransactionSignedEcRecovered { +pub struct TransactionSignedEcRecovered { /// Signer of the transaction signer: Address, /// Signed transaction #[deref] #[as_ref] - signed_transaction: TransactionSigned, + signed_transaction: T, } // === impl TransactionSignedEcRecovered === -impl TransactionSignedEcRecovered { +impl TransactionSignedEcRecovered { /// Signer of transaction recovered from signature pub const fn signer(&self) -> Address { self.signer } /// Returns a reference to [`TransactionSigned`] - pub const fn as_signed(&self) -> &TransactionSigned { + pub const fn as_signed(&self) -> &T { &self.signed_transaction } /// Transform back to [`TransactionSigned`] - pub fn into_signed(self) -> TransactionSigned { + pub fn into_signed(self) -> T { self.signed_transaction } /// Dissolve Self to its component - pub fn to_components(self) -> (TransactionSigned, Address) { + pub fn to_components(self) -> (T, Address) { (self.signed_transaction, self.signer) } /// Create [`TransactionSignedEcRecovered`] from [`TransactionSigned`] and [`Address`] of the /// signer. #[inline] - pub const fn from_signed_transaction( - signed_transaction: TransactionSigned, - signer: Address, - ) -> Self { + pub const fn from_signed_transaction(signed_transaction: T, signer: Address) -> Self { Self { signed_transaction, signer } } } -impl Encodable for TransactionSignedEcRecovered { +impl Encodable for TransactionSignedEcRecovered { /// This encodes the transaction _with_ the signature, and an rlp header. /// /// Refer to docs for [`TransactionSigned::encode`] for details on the exact format. @@ -1730,9 +1692,9 @@ impl Encodable for TransactionSignedEcRecovered { } } -impl Decodable for TransactionSignedEcRecovered { +impl Decodable for TransactionSignedEcRecovered { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let signed_transaction = TransactionSigned::decode(buf)?; + let signed_transaction = T::decode(buf)?; let signer = signed_transaction .recover_signer() .ok_or(RlpError::Custom("Unable to recover decoded transaction signer."))?; @@ -1740,59 +1702,19 @@ impl Decodable for TransactionSignedEcRecovered { } } -/// Generic wrapper with encoded Bytes, such as transaction data. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct WithEncoded(Bytes, pub T); - -impl From<(Bytes, T)> for WithEncoded { - fn from(value: (Bytes, T)) -> Self { - Self(value.0, value.1) - } -} - -impl WithEncoded { - /// Wraps the value with the bytes. - pub const fn new(bytes: Bytes, value: T) -> Self { - Self(bytes, value) - } - - /// Get the encoded bytes - pub fn encoded_bytes(&self) -> Bytes { - self.0.clone() - } - - /// Get the underlying value - pub const fn value(&self) -> &T { - &self.1 - } - - /// Returns ownership of the underlying value. - pub fn into_value(self) -> T { - self.1 - } - - /// Transform the value - pub fn transform>(self) -> WithEncoded { - WithEncoded(self.0, self.1.into()) - } - - /// Split the wrapper into [`Bytes`] and value tuple - pub fn split(self) -> (Bytes, T) { - (self.0, self.1) - } - - /// Maps the inner value to a new value using the given function. - pub fn map U>(self, op: F) -> WithEncoded { - WithEncoded(self.0, op(self.1)) +/// Extension trait for [`SignedTransaction`] to convert it into [`TransactionSignedEcRecovered`]. +pub trait SignedTransactionIntoRecoveredExt: SignedTransaction { + /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] _without + /// ensuring that the signature has a low `s` value_ (EIP-2). + /// + /// Returns `None` if the transaction's signature is invalid. + fn into_ecrecovered_unchecked(self) -> Option> { + let signer = self.recover_signer_unchecked()?; + Some(TransactionSignedEcRecovered::from_signed_transaction(self, signer)) } } -impl WithEncoded> { - /// returns `None` if the inner value is `None`, otherwise returns `Some(WithEncoded)`. - pub fn transpose(self) -> Option> { - self.1.map(|v| WithEncoded(self.0, v)) - } -} +impl SignedTransactionIntoRecoveredExt for T where T: SignedTransaction {} /// Bincode-compatible transaction type serde implementations. #[cfg(feature = "serde-bincode-compat")] @@ -1906,7 +1828,7 @@ pub mod serde_bincode_compat { impl<'a> From<&'a super::TransactionSigned> for TransactionSigned<'a> { fn from(value: &'a super::TransactionSigned) -> Self { Self { - hash: value.hash, + hash: value.hash(), signature: value.signature, transaction: Transaction::from(&value.transaction), } @@ -1916,7 +1838,7 @@ pub mod serde_bincode_compat { impl<'a> From> for super::TransactionSigned { fn from(value: TransactionSigned<'a>) -> Self { Self { - hash: value.hash, + hash: value.hash.into(), signature: value.signature, transaction: value.transaction.into(), } @@ -1999,11 +1921,26 @@ pub mod serde_bincode_compat { } } +/// Recovers a list of signers from a transaction list iterator. +/// +/// Returns `None`, if some transaction's signature is invalid +pub fn recover_signers<'a, I, T>(txes: I, num_txes: usize) -> Option> +where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, +{ + if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { + txes.into_iter().map(|tx| tx.recover_signer()).collect() + } else { + txes.into_par_iter().map(|tx| tx.recover_signer()).collect() + } +} + #[cfg(test)] mod tests { use crate::{ transaction::{TxEip1559, TxKind, TxLegacy}, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, + Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use alloy_consensus::Transaction as _; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; @@ -2078,13 +2015,15 @@ mod tests { assert_eq!( tx.blob_versioned_hashes(), - Some(vec![ - b256!("012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921a"), - b256!("0152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4"), - b256!("013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7"), - b256!("01148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1"), - b256!("011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e6549"), - ]) + Some( + &[ + b256!("012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921a"), + b256!("0152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4"), + b256!("013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7"), + b256!("01148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1"), + b256!("011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e6549"), + ][..] + ) ); } @@ -2207,9 +2146,9 @@ mod tests { signature: Signature, hash: Option, ) { - let expected = TransactionSigned::from_transaction_and_signature(transaction, signature); + let expected = TransactionSigned::new_unhashed(transaction, signature); if let Some(hash) = hash { - assert_eq!(hash, expected.hash); + assert_eq!(hash, expected.hash()); } assert_eq!(bytes.len(), expected.length()); @@ -2304,7 +2243,7 @@ mod tests { let signature = crate::sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); - TransactionSigned::from_transaction_and_signature(tx, signature) + TransactionSigned::new_unhashed(tx, signature) }).collect(); let parallel_senders = TransactionSigned::recover_signers(&txes, txes.len()).unwrap(); @@ -2386,17 +2325,17 @@ mod tests { input: Bytes::from(input), }); - let tx_signed_no_hash = TransactionSignedNoHash { signature, transaction }; - test_transaction_signed_to_from_compact(tx_signed_no_hash); + let tx = TransactionSigned::new_unhashed(transaction, signature); + test_transaction_signed_to_from_compact(tx); } } - fn test_transaction_signed_to_from_compact(tx_signed_no_hash: TransactionSignedNoHash) { + fn test_transaction_signed_to_from_compact(tx: TransactionSigned) { // zstd aware `to_compact` let mut buff: Vec = Vec::new(); - let written_bytes = tx_signed_no_hash.to_compact(&mut buff); - let (decoded, _) = TransactionSignedNoHash::from_compact(&buff, written_bytes); - assert_eq!(tx_signed_no_hash, decoded); + let written_bytes = tx.to_compact(&mut buff); + let (decoded, _) = TransactionSigned::from_compact(&buff, written_bytes); + assert_eq!(tx, decoded); } #[test] diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 86cd40a8fe6..2bd344ea2a1 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -1,20 +1,32 @@ //! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a //! response to `GetPooledTransactions`. -use super::{error::TransactionConversionError, signature::recover_signer, TxEip7702}; -use crate::{BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered}; -use alloy_eips::eip4844::BlobTransactionSidecar; - +use super::{ + error::TransactionConversionError, recover_signer_unchecked, signature::recover_signer, + TxEip7702, +}; +use crate::{ + BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, +}; use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, - transaction::{RlpEcdsaTx, TxEip1559, TxEip2930, TxEip4844, TxLegacy}, - SignableTransaction, TxEip4844WithSidecar, + transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, + Signed, TxEip4844WithSidecar, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Result, Encodable2718}, + eip2930::AccessList, + eip4844::BlobTransactionSidecar, + eip7702::SignedAuthorization, +}; +use alloy_primitives::{ + Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, }; -use alloy_eips::eip2718::{Decodable2718, Eip2718Result, Encodable2718}; -use alloy_primitives::{Address, PrimitiveSignature as Signature, TxHash, B256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; +use core::hash::{Hash, Hasher}; use derive_more::{AsRef, Deref}; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions`. This can include either a blob transaction, or a @@ -22,42 +34,14 @@ use serde::{Deserialize, Serialize}; #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum PooledTransactionsElement { - /// A legacy transaction - Legacy { - /// The inner transaction - transaction: TxLegacy, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-2930 typed transaction - Eip2930 { - /// The inner transaction - transaction: TxEip2930, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-1559 typed transaction - Eip1559 { - /// The inner transaction - transaction: TxEip1559, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-7702 typed transaction - Eip7702 { - /// The inner transaction - transaction: TxEip7702, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, + /// An untagged [`TxLegacy`]. + Legacy(Signed), + /// A [`TxEip2930`] tagged with type 1. + Eip2930(Signed), + /// A [`TxEip1559`] tagged with type 2. + Eip1559(Signed), + /// A [`TxEip7702`] tagged with type 4. + Eip7702(Signed), /// A blob transaction, which includes the transaction, blob data, commitments, and proofs. BlobTransaction(BlobTransaction), } @@ -69,18 +53,19 @@ impl PooledTransactionsElement { /// [`PooledTransactionsElement`]. Since [`BlobTransaction`] is disallowed to be broadcasted on /// p2p, return an err if `tx` is [`Transaction::Eip4844`]. pub fn try_from_broadcast(tx: TransactionSigned) -> Result { + let hash = tx.hash(); match tx { - TransactionSigned { transaction: Transaction::Legacy(tx), signature, hash } => { - Ok(Self::Legacy { transaction: tx, signature, hash }) + TransactionSigned { transaction: Transaction::Legacy(tx), signature, .. } => { + Ok(Self::Legacy(Signed::new_unchecked(tx, signature, hash))) } - TransactionSigned { transaction: Transaction::Eip2930(tx), signature, hash } => { - Ok(Self::Eip2930 { transaction: tx, signature, hash }) + TransactionSigned { transaction: Transaction::Eip2930(tx), signature, .. } => { + Ok(Self::Eip2930(Signed::new_unchecked(tx, signature, hash))) } - TransactionSigned { transaction: Transaction::Eip1559(tx), signature, hash } => { - Ok(Self::Eip1559 { transaction: tx, signature, hash }) + TransactionSigned { transaction: Transaction::Eip1559(tx), signature, .. } => { + Ok(Self::Eip1559(Signed::new_unchecked(tx, signature, hash))) } - TransactionSigned { transaction: Transaction::Eip7702(tx), signature, hash } => { - Ok(Self::Eip7702 { transaction: tx, signature, hash }) + TransactionSigned { transaction: Transaction::Eip7702(tx), signature, .. } => { + Ok(Self::Eip7702(Signed::new_unchecked(tx, signature, hash))) } // Not supported because missing blob sidecar tx @ TransactionSigned { transaction: Transaction::Eip4844(_), .. } => Err(tx), @@ -99,15 +84,16 @@ impl PooledTransactionsElement { tx: TransactionSigned, sidecar: BlobTransactionSidecar, ) -> Result { + let hash = tx.hash(); Ok(match tx { // If the transaction is an EIP-4844 transaction... - TransactionSigned { transaction: Transaction::Eip4844(tx), signature, hash } => { + TransactionSigned { transaction: Transaction::Eip4844(tx), signature, .. } => { // Construct a `PooledTransactionsElement::BlobTransaction` with provided sidecar. - Self::BlobTransaction(BlobTransaction { + Self::BlobTransaction(BlobTransaction(Signed::new_unchecked( + TxEip4844WithSidecar { tx, sidecar }, signature, hash, - transaction: TxEip4844WithSidecar { tx, sidecar }, - }) + ))) } // If the transaction is not EIP-4844, return an error with the original // transaction. @@ -119,44 +105,33 @@ impl PooledTransactionsElement { /// It is only for signature signing or signer recovery. pub fn signature_hash(&self) -> B256 { match self { - Self::Legacy { transaction, .. } => transaction.signature_hash(), - Self::Eip2930 { transaction, .. } => transaction.signature_hash(), - Self::Eip1559 { transaction, .. } => transaction.signature_hash(), - Self::Eip7702 { transaction, .. } => transaction.signature_hash(), - Self::BlobTransaction(blob_tx) => blob_tx.transaction.signature_hash(), + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), + Self::BlobTransaction(tx) => tx.signature_hash(), } } /// Reference to transaction hash. Used to identify transaction. pub const fn hash(&self) -> &TxHash { match self { - Self::Legacy { hash, .. } | - Self::Eip2930 { hash, .. } | - Self::Eip1559 { hash, .. } | - Self::Eip7702 { hash, .. } => hash, - Self::BlobTransaction(tx) => &tx.hash, + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + Self::BlobTransaction(tx) => tx.0.hash(), } } /// Returns the signature of the transaction. pub const fn signature(&self) -> &Signature { match self { - Self::Legacy { signature, .. } | - Self::Eip2930 { signature, .. } | - Self::Eip1559 { signature, .. } | - Self::Eip7702 { signature, .. } => signature, - Self::BlobTransaction(blob_tx) => &blob_tx.signature, - } - } - - /// Returns the transaction nonce. - pub const fn nonce(&self) -> u64 { - match self { - Self::Legacy { transaction, .. } => transaction.nonce, - Self::Eip2930 { transaction, .. } => transaction.nonce, - Self::Eip1559 { transaction, .. } => transaction.nonce, - Self::Eip7702 { transaction, .. } => transaction.nonce, - Self::BlobTransaction(blob_tx) => blob_tx.transaction.tx.nonce, + Self::Legacy(tx) => tx.signature(), + Self::Eip2930(tx) => tx.signature(), + Self::Eip1559(tx) => tx.signature(), + Self::Eip7702(tx) => tx.signature(), + Self::BlobTransaction(tx) => tx.0.signature(), } } @@ -187,24 +162,10 @@ impl PooledTransactionsElement { /// Returns the inner [`TransactionSigned`]. pub fn into_transaction(self) -> TransactionSigned { match self { - Self::Legacy { transaction, signature, hash } => { - TransactionSigned { transaction: Transaction::Legacy(transaction), signature, hash } - } - Self::Eip2930 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip2930(transaction), - signature, - hash, - }, - Self::Eip1559 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip1559(transaction), - signature, - hash, - }, - Self::Eip7702 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip7702(transaction), - signature, - hash, - }, + Self::Legacy(tx) => tx.into(), + Self::Eip2930(tx) => tx.into(), + Self::Eip1559(tx) => tx.into(), + Self::Eip7702(tx) => tx.into(), Self::BlobTransaction(blob_tx) => blob_tx.into_parts().0, } } @@ -218,7 +179,7 @@ impl PooledTransactionsElement { /// Returns the [`TxLegacy`] variant if the transaction is a legacy transaction. pub const fn as_legacy(&self) -> Option<&TxLegacy> { match self { - Self::Legacy { transaction, .. } => Some(transaction), + Self::Legacy(tx) => Some(tx.tx()), _ => None, } } @@ -226,7 +187,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip2930`] variant if the transaction is an EIP-2930 transaction. pub const fn as_eip2930(&self) -> Option<&TxEip2930> { match self { - Self::Eip2930 { transaction, .. } => Some(transaction), + Self::Eip2930(tx) => Some(tx.tx()), _ => None, } } @@ -234,7 +195,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip1559`] variant if the transaction is an EIP-1559 transaction. pub const fn as_eip1559(&self) -> Option<&TxEip1559> { match self { - Self::Eip1559 { transaction, .. } => Some(transaction), + Self::Eip1559(tx) => Some(tx.tx()), _ => None, } } @@ -242,7 +203,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip4844`] variant if the transaction is an EIP-4844 transaction. pub const fn as_eip4844(&self) -> Option<&TxEip4844> { match self { - Self::BlobTransaction(tx) => Some(&tx.transaction.tx), + Self::BlobTransaction(tx) => Some(tx.0.tx().tx()), _ => None, } } @@ -250,7 +211,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip7702`] variant if the transaction is an EIP-7702 transaction. pub const fn as_eip7702(&self) -> Option<&TxEip7702> { match self { - Self::Eip7702 { transaction, .. } => Some(transaction), + Self::Eip7702(tx) => Some(tx.tx()), _ => None, } } @@ -263,43 +224,11 @@ impl PooledTransactionsElement { pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } +} - /// Max fee per blob gas for eip4844 transaction [`TxEip4844`]. - /// - /// Returns `None` for non-eip4844 transactions. - /// - /// This is also commonly referred to as the "Blob Gas Fee Cap" (`BlobGasFeeCap`). - pub const fn max_fee_per_blob_gas(&self) -> Option { - match self { - Self::BlobTransaction(tx) => Some(tx.transaction.tx.max_fee_per_blob_gas), - _ => None, - } - } - - /// Max priority fee per gas for eip1559 transaction, for legacy and eip2930 transactions this - /// is `None` - /// - /// This is also commonly referred to as the "Gas Tip Cap" (`GasTipCap`). - pub const fn max_priority_fee_per_gas(&self) -> Option { - match self { - Self::Legacy { .. } | Self::Eip2930 { .. } => None, - Self::Eip1559 { transaction, .. } => Some(transaction.max_priority_fee_per_gas), - Self::Eip7702 { transaction, .. } => Some(transaction.max_priority_fee_per_gas), - Self::BlobTransaction(tx) => Some(tx.transaction.tx.max_priority_fee_per_gas), - } - } - - /// Max fee per gas for eip1559 transaction, for legacy transactions this is `gas_price`. - /// - /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). - pub const fn max_fee_per_gas(&self) -> u128 { - match self { - Self::Legacy { transaction, .. } => transaction.gas_price, - Self::Eip2930 { transaction, .. } => transaction.gas_price, - Self::Eip1559 { transaction, .. } => transaction.max_fee_per_gas, - Self::Eip7702 { transaction, .. } => transaction.max_fee_per_gas, - Self::BlobTransaction(tx) => tx.transaction.tx.max_fee_per_gas, - } +impl Hash for PooledTransactionsElement { + fn hash(&self, state: &mut H) { + self.trie_hash().hash(state); } } @@ -387,51 +316,31 @@ impl Decodable for PooledTransactionsElement { impl Encodable2718 for PooledTransactionsElement { fn type_flag(&self) -> Option { match self { - Self::Legacy { .. } => None, - Self::Eip2930 { .. } => Some(0x01), - Self::Eip1559 { .. } => Some(0x02), - Self::BlobTransaction { .. } => Some(0x03), - Self::Eip7702 { .. } => Some(0x04), + Self::Legacy(_) => None, + Self::Eip2930(_) => Some(0x01), + Self::Eip1559(_) => Some(0x02), + Self::BlobTransaction(_) => Some(0x03), + Self::Eip7702(_) => Some(0x04), } } fn encode_2718_len(&self) -> usize { match self { - Self::Legacy { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } - Self::Eip2930 { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } - Self::Eip1559 { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } - Self::Eip7702 { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } - Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { - transaction.eip2718_encoded_length(signature) - } + Self::Legacy(tx) => tx.eip2718_encoded_length(), + Self::Eip2930(tx) => tx.eip2718_encoded_length(), + Self::Eip1559(tx) => tx.eip2718_encoded_length(), + Self::Eip7702(tx) => tx.eip2718_encoded_length(), + Self::BlobTransaction(tx) => tx.eip2718_encoded_length(), } } fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { match self { - Self::Legacy { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } - Self::Eip2930 { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } - Self::Eip1559 { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } - Self::Eip7702 { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } - Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { - transaction.eip2718_encode(signature, out) - } + Self::Legacy(tx) => tx.eip2718_encode(out), + Self::Eip2930(tx) => tx.eip2718_encode(out), + Self::Eip1559(tx) => tx.eip2718_encode(out), + Self::Eip7702(tx) => tx.eip2718_encode(out), + Self::BlobTransaction(tx) => tx.eip2718_encode(out), } } @@ -460,7 +369,7 @@ impl Decodable2718 for PooledTransactionsElement { } tx_type => { let typed_tx = TransactionSigned::typed_decode(tx_type, buf)?; - + let hash = typed_tx.hash(); match typed_tx.transaction { Transaction::Legacy(_) => Err(RlpError::Custom( "legacy transactions should not be a result of typed decoding", @@ -470,21 +379,11 @@ impl Decodable2718 for PooledTransactionsElement { Transaction::Eip4844(_) => Err(RlpError::Custom( "EIP-4844 transactions can only be decoded with transaction type 0x03", ).into()), - Transaction::Eip2930(tx) => Ok(Self::Eip2930 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - }), - Transaction::Eip1559(tx) => Ok(Self::Eip1559 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - }), - Transaction::Eip7702(tx) => Ok(Self::Eip7702 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - }), + Transaction::Eip2930(tx) => Ok(Self::Eip2930 ( + Signed::new_unchecked(tx, typed_tx.signature, hash) + )), + Transaction::Eip1559(tx) => Ok(Self::Eip1559( Signed::new_unchecked(tx, typed_tx.signature, hash))), + Transaction::Eip7702(tx) => Ok(Self::Eip7702( Signed::new_unchecked(tx, typed_tx.signature, hash))), #[cfg(feature = "optimism")] Transaction::Deposit(_) => Err(RlpError::Custom("Optimism deposit transaction cannot be decoded to PooledTransactionsElement").into()) } @@ -497,7 +396,231 @@ impl Decodable2718 for PooledTransactionsElement { let (transaction, hash, signature) = TransactionSigned::decode_rlp_legacy_transaction_tuple(buf)?; - Ok(Self::Legacy { transaction, signature, hash }) + Ok(Self::Legacy(Signed::new_unchecked(transaction, signature, hash))) + } +} + +impl alloy_consensus::Transaction for PooledTransactionsElement { + fn chain_id(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().chain_id(), + Self::Eip2930(tx) => tx.tx().chain_id(), + Self::Eip1559(tx) => tx.tx().chain_id(), + Self::Eip7702(tx) => tx.tx().chain_id(), + Self::BlobTransaction(tx) => tx.tx().chain_id(), + } + } + + fn nonce(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().nonce(), + Self::Eip2930(tx) => tx.tx().nonce(), + Self::Eip1559(tx) => tx.tx().nonce(), + Self::Eip7702(tx) => tx.tx().nonce(), + Self::BlobTransaction(tx) => tx.tx().nonce(), + } + } + + fn gas_limit(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().gas_limit(), + Self::Eip2930(tx) => tx.tx().gas_limit(), + Self::Eip1559(tx) => tx.tx().gas_limit(), + Self::Eip7702(tx) => tx.tx().gas_limit(), + Self::BlobTransaction(tx) => tx.tx().gas_limit(), + } + } + + fn gas_price(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().gas_price(), + Self::Eip2930(tx) => tx.tx().gas_price(), + Self::Eip1559(tx) => tx.tx().gas_price(), + Self::Eip7702(tx) => tx.tx().gas_price(), + Self::BlobTransaction(tx) => tx.tx().gas_price(), + } + } + + fn max_fee_per_gas(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_gas(), + Self::BlobTransaction(tx) => tx.tx().max_fee_per_gas(), + } + } + + fn max_priority_fee_per_gas(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_priority_fee_per_gas(), + Self::BlobTransaction(tx) => tx.tx().max_priority_fee_per_gas(), + } + } + + fn max_fee_per_blob_gas(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_blob_gas(), + Self::BlobTransaction(tx) => tx.tx().max_fee_per_blob_gas(), + } + } + + fn priority_fee_or_price(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().priority_fee_or_price(), + Self::Eip2930(tx) => tx.tx().priority_fee_or_price(), + Self::Eip1559(tx) => tx.tx().priority_fee_or_price(), + Self::Eip7702(tx) => tx.tx().priority_fee_or_price(), + Self::BlobTransaction(tx) => tx.tx().priority_fee_or_price(), + } + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip2930(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip1559(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip7702(tx) => tx.tx().effective_gas_price(base_fee), + Self::BlobTransaction(tx) => tx.tx().effective_gas_price(base_fee), + } + } + + fn is_dynamic_fee(&self) -> bool { + match self { + Self::Legacy(tx) => tx.tx().is_dynamic_fee(), + Self::Eip2930(tx) => tx.tx().is_dynamic_fee(), + Self::Eip1559(tx) => tx.tx().is_dynamic_fee(), + Self::Eip7702(tx) => tx.tx().is_dynamic_fee(), + Self::BlobTransaction(tx) => tx.tx().is_dynamic_fee(), + } + } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.tx().kind(), + Self::Eip2930(tx) => tx.tx().kind(), + Self::Eip1559(tx) => tx.tx().kind(), + Self::Eip7702(tx) => tx.tx().kind(), + Self::BlobTransaction(tx) => tx.tx().kind(), + } + } + + fn value(&self) -> U256 { + match self { + Self::Legacy(tx) => tx.tx().value(), + Self::Eip2930(tx) => tx.tx().value(), + Self::Eip1559(tx) => tx.tx().value(), + Self::Eip7702(tx) => tx.tx().value(), + Self::BlobTransaction(tx) => tx.tx().value(), + } + } + + fn input(&self) -> &Bytes { + match self { + Self::Legacy(tx) => tx.tx().input(), + Self::Eip2930(tx) => tx.tx().input(), + Self::Eip1559(tx) => tx.tx().input(), + Self::Eip7702(tx) => tx.tx().input(), + Self::BlobTransaction(tx) => tx.tx().input(), + } + } + + fn ty(&self) -> u8 { + match self { + Self::Legacy(tx) => tx.tx().ty(), + Self::Eip2930(tx) => tx.tx().ty(), + Self::Eip1559(tx) => tx.tx().ty(), + Self::Eip7702(tx) => tx.tx().ty(), + Self::BlobTransaction(tx) => tx.tx().ty(), + } + } + + fn access_list(&self) -> Option<&AccessList> { + match self { + Self::Legacy(tx) => tx.tx().access_list(), + Self::Eip2930(tx) => tx.tx().access_list(), + Self::Eip1559(tx) => tx.tx().access_list(), + Self::Eip7702(tx) => tx.tx().access_list(), + Self::BlobTransaction(tx) => tx.tx().access_list(), + } + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + match self { + Self::Legacy(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip2930(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip1559(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip7702(tx) => tx.tx().blob_versioned_hashes(), + Self::BlobTransaction(tx) => tx.tx().blob_versioned_hashes(), + } + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + match self { + Self::Legacy(tx) => tx.tx().authorization_list(), + Self::Eip2930(tx) => tx.tx().authorization_list(), + Self::Eip1559(tx) => tx.tx().authorization_list(), + Self::Eip7702(tx) => tx.tx().authorization_list(), + Self::BlobTransaction(tx) => tx.tx().authorization_list(), + } + } +} + +impl SignedTransaction for PooledTransactionsElement { + type Type = TxType; + + fn tx_hash(&self) -> &TxHash { + match self { + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + Self::BlobTransaction(tx) => tx.hash(), + } + } + + fn signature(&self) -> &Signature { + match self { + Self::Legacy(tx) => tx.signature(), + Self::Eip2930(tx) => tx.signature(), + Self::Eip1559(tx) => tx.signature(), + Self::Eip7702(tx) => tx.signature(), + Self::BlobTransaction(tx) => tx.signature(), + } + } + + fn recover_signer(&self) -> Option
{ + let signature_hash = self.signature_hash(); + recover_signer(self.signature(), signature_hash) + } + + fn recover_signer_unchecked(&self) -> Option
{ + let signature_hash = self.signature_hash(); + recover_signer_unchecked(self.signature(), signature_hash) + } +} + +impl InMemorySize for PooledTransactionsElement { + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + Self::BlobTransaction(tx) => tx.size(), + } + } +} + +impl From for PooledTransactionsElement { + fn from(recovered: PooledTransactionsElementEcRecovered) -> Self { + recovered.into_transaction() } } @@ -522,15 +645,22 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { // Attempt to create a `TransactionSigned` with arbitrary data. let tx_signed = TransactionSigned::arbitrary(u)?; // Attempt to create a `PooledTransactionsElement` with arbitrary data, handling the Result. - match Self::try_from(tx_signed) { - Ok(Self::BlobTransaction(mut tx)) => { - // Successfully converted to a BlobTransaction, now generate a sidecar. - tx.transaction.sidecar = alloy_eips::eip4844::BlobTransactionSidecar::arbitrary(u)?; - Ok(Self::BlobTransaction(tx)) + match Self::try_from_broadcast(tx_signed) { + Ok(tx) => Ok(tx), + Err(tx) => { + let (tx, sig, hash) = tx.into_parts(); + match tx { + Transaction::Eip4844(tx) => { + let sidecar = BlobTransactionSidecar::arbitrary(u)?; + Ok(Self::BlobTransaction(BlobTransaction(Signed::new_unchecked( + TxEip4844WithSidecar { tx, sidecar }, + sig, + hash, + )))) + } + _ => Err(arbitrary::Error::IncorrectFormat), + } } - Ok(tx) => Ok(tx), // Successfully converted, but not a BlobTransaction. - Err(_) => Err(arbitrary::Error::IncorrectFormat), /* Conversion failed, return an - * arbitrary error. */ } } } @@ -612,6 +742,7 @@ impl TryFrom for PooledTransactionsElementEcRecove #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Transaction as _; use alloy_primitives::{address, hex}; use assert_matches::assert_matches; use bytes::Bytes; diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 48a02f4e740..2cf04bc8e74 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,9 +1,10 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] use crate::{Transaction, TransactionSigned}; -use alloy_consensus::{transaction::RlpEcdsaTx, TxEip4844WithSidecar}; +use alloy_consensus::{transaction::RlpEcdsaTx, Signed, TxEip4844WithSidecar}; use alloy_eips::eip4844::BlobTransactionSidecar; -use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; +use derive_more::Deref; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their @@ -11,16 +12,8 @@ use serde::{Deserialize, Serialize}; /// /// This is defined in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#networking) as an element /// of a `PooledTransactions` response. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlobTransaction { - /// The transaction hash. - pub hash: TxHash, - /// The transaction signature. - pub signature: Signature, - /// The transaction payload with the sidecar. - #[serde(flatten)] - pub transaction: TxEip4844WithSidecar, -} +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Deref)] +pub struct BlobTransaction(pub Signed); impl BlobTransaction { /// Constructs a new [`BlobTransaction`] from a [`TransactionSigned`] and a @@ -31,15 +24,16 @@ impl BlobTransaction { tx: TransactionSigned, sidecar: BlobTransactionSidecar, ) -> Result { - let TransactionSigned { transaction, signature, hash } = tx; + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx; match transaction { - Transaction::Eip4844(transaction) => Ok(Self { - hash, - transaction: TxEip4844WithSidecar { tx: transaction, sidecar }, + Transaction::Eip4844(transaction) => Ok(Self(Signed::new_unchecked( + TxEip4844WithSidecar { tx: transaction, sidecar }, signature, - }), + hash, + ))), transaction => { - let tx = TransactionSigned { transaction, signature, hash }; + let tx = TransactionSigned::new(transaction, signature, hash); Err((tx, sidecar)) } } @@ -53,19 +47,16 @@ impl BlobTransaction { &self, proof_settings: &c_kzg::KzgSettings, ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { - self.transaction.validate_blob(proof_settings) + self.tx().validate_blob(proof_settings) } /// Splits the [`BlobTransaction`] into its [`TransactionSigned`] and [`BlobTransactionSidecar`] /// components. pub fn into_parts(self) -> (TransactionSigned, BlobTransactionSidecar) { - let transaction = TransactionSigned { - transaction: Transaction::Eip4844(self.transaction.tx), - hash: self.hash, - signature: self.signature, - }; - - (transaction, self.transaction.sidecar) + let (transaction, signature, hash) = self.0.into_parts(); + let (transaction, sidecar) = transaction.into_parts(); + let transaction = TransactionSigned::new(transaction.into(), signature, hash); + (transaction, sidecar) } /// Decodes a [`BlobTransaction`] from RLP. This expects the encoding to be: @@ -79,8 +70,17 @@ impl BlobTransaction { pub(crate) fn decode_inner(data: &mut &[u8]) -> alloy_rlp::Result { let (transaction, signature, hash) = TxEip4844WithSidecar::rlp_decode_signed(data)?.into_parts(); + Ok(Self(Signed::new_unchecked(transaction, signature, hash))) + } +} - Ok(Self { transaction, hash, signature }) +impl InMemorySize for BlobTransaction { + fn size(&self) -> usize { + // TODO(mattsse): replace with next alloy bump + self.0.hash().size() + + self.0.signature().size() + + self.0.tx().tx().size() + + self.0.tx().sidecar.size() } } diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 3445cb184c1..784a976ab79 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -5,6 +5,7 @@ use alloy_consensus::constants::{ use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; use derive_more::Display; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; /// Identifier parameter for legacy transaction @@ -25,10 +26,6 @@ pub const COMPACT_IDENTIFIER_EIP1559: usize = 2; #[cfg(any(test, feature = "reth-codec"))] pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; -/// Identifier for [`TxDeposit`](op_alloy_consensus::TxDeposit) transaction. -#[cfg(feature = "optimism")] -pub const DEPOSIT_TX_TYPE_ID: u8 = 126; - /// Transaction Type /// /// Currently being used as 2-bit type when encoding it to `reth_codecs::Compact` on @@ -118,6 +115,14 @@ impl reth_primitives_traits::TxType for TxType { } } +impl InMemorySize for TxType { + /// Calculates a heuristic for the in-memory size of the [`TxType`]. + #[inline] + fn size(&self) -> usize { + core::mem::size_of::() + } +} + impl From for u8 { fn from(value: TxType) -> Self { match value { @@ -127,7 +132,7 @@ impl From for u8 { TxType::Eip4844 => EIP4844_TX_TYPE_ID, TxType::Eip7702 => EIP7702_TX_TYPE_ID, #[cfg(feature = "optimism")] - TxType::Deposit => DEPOSIT_TX_TYPE_ID, + TxType::Deposit => op_alloy_consensus::DEPOSIT_TX_TYPE_ID, } } } @@ -186,6 +191,8 @@ impl reth_codecs::Compact for TxType { where B: bytes::BufMut + AsMut<[u8]>, { + use reth_codecs::txtype::*; + match self { Self::Legacy => COMPACT_IDENTIFIER_LEGACY, Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, @@ -200,7 +207,7 @@ impl reth_codecs::Compact for TxType { } #[cfg(feature = "optimism")] Self::Deposit => { - buf.put_u8(DEPOSIT_TX_TYPE_ID); + buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } } @@ -213,16 +220,16 @@ impl reth_codecs::Compact for TxType { use bytes::Buf; ( match identifier { - COMPACT_IDENTIFIER_LEGACY => Self::Legacy, - COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, - COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, - COMPACT_EXTENDED_IDENTIFIER_FLAG => { + reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => Self::Legacy, + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, + reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { let extended_identifier = buf.get_u8(); match extended_identifier { EIP4844_TX_TYPE_ID => Self::Eip4844, EIP7702_TX_TYPE_ID => Self::Eip7702, #[cfg(feature = "optimism")] - DEPOSIT_TX_TYPE_ID => Self::Deposit, + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self::Deposit, _ => panic!("Unsupported TxType identifier: {extended_identifier}"), } } @@ -265,11 +272,18 @@ impl Decodable for TxType { #[cfg(test)] mod tests { + use super::*; use alloy_primitives::hex; use reth_codecs::Compact; + use reth_primitives_traits::TxType as _; use rstest::rstest; - use super::*; + #[test] + fn is_broadcastable() { + assert!(TxType::Legacy.is_broadcastable_in_full()); + assert!(TxType::Eip1559.is_broadcastable_in_full()); + assert!(!TxType::Eip4844.is_broadcastable_in_full()); + } #[rstest] #[case(U64::from(LEGACY_TX_TYPE_ID), Ok(TxType::Legacy))] @@ -277,7 +291,10 @@ mod tests { #[case(U64::from(EIP1559_TX_TYPE_ID), Ok(TxType::Eip1559))] #[case(U64::from(EIP4844_TX_TYPE_ID), Ok(TxType::Eip4844))] #[case(U64::from(EIP7702_TX_TYPE_ID), Ok(TxType::Eip7702))] - #[cfg_attr(feature = "optimism", case(U64::from(DEPOSIT_TX_TYPE_ID), Ok(TxType::Deposit)))] + #[cfg_attr( + feature = "optimism", + case(U64::from(op_alloy_consensus::DEPOSIT_TX_TYPE_ID), Ok(TxType::Deposit)) + )] #[case(U64::MAX, Err("invalid tx type"))] fn test_u64_to_tx_type(#[case] input: U64, #[case] expected: Result) { let tx_type_result = TxType::try_from(input); @@ -290,7 +307,7 @@ mod tests { #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] - #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]))] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![op_alloy_consensus::DEPOSIT_TX_TYPE_ID]))] fn test_txtype_to_compact( #[case] tx_type: TxType, #[case] expected_identifier: usize, @@ -309,7 +326,7 @@ mod tests { #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] - #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]))] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![op_alloy_consensus::DEPOSIT_TX_TYPE_ID]))] fn test_txtype_from_compact( #[case] expected_type: TxType, #[case] identifier: usize, @@ -328,7 +345,7 @@ mod tests { #[case(&[EIP4844_TX_TYPE_ID], Ok(TxType::Eip4844))] #[case(&[EIP7702_TX_TYPE_ID], Ok(TxType::Eip7702))] #[case(&[u8::MAX], Err(alloy_rlp::Error::InputTooShort))] - #[cfg_attr(feature = "optimism", case(&[DEPOSIT_TX_TYPE_ID], Ok(TxType::Deposit)))] + #[cfg_attr(feature = "optimism", case(&[op_alloy_consensus::DEPOSIT_TX_TYPE_ID], Ok(TxType::Deposit)))] fn decode_tx_type(#[case] input: &[u8], #[case] expected: Result) { let tx_type_result = TxType::decode(&mut &input[..]); assert_eq!(tx_type_result, expected) diff --git a/crates/primitives/src/transaction/variant.rs b/crates/primitives/src/transaction/variant.rs deleted file mode 100644 index 888c83946ca..00000000000 --- a/crates/primitives/src/transaction/variant.rs +++ /dev/null @@ -1,145 +0,0 @@ -//! Helper enum functions for `Transaction`, `TransactionSigned` and -//! `TransactionSignedEcRecovered` - -use crate::{ - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, -}; -use alloy_primitives::{Address, B256}; -use core::ops::Deref; - -/// Represents various different transaction formats used in reth. -/// -/// All variants are based on a the raw [Transaction] data and can contain additional information -/// extracted (expensive) from that transaction, like the hash and the signer. -#[derive(Debug, Clone, PartialEq, Eq, Hash, derive_more::From)] -pub enum TransactionSignedVariant { - /// A signed transaction without a hash. - SignedNoHash(TransactionSignedNoHash), - /// Contains the plain transaction data its signature and hash. - Signed(TransactionSigned), - /// Contains the plain transaction data its signature and hash and the successfully recovered - /// signer. - SignedEcRecovered(TransactionSignedEcRecovered), -} - -impl TransactionSignedVariant { - /// Returns the raw transaction object - pub const fn as_raw(&self) -> &Transaction { - match self { - Self::SignedNoHash(tx) => &tx.transaction, - Self::Signed(tx) => &tx.transaction, - Self::SignedEcRecovered(tx) => &tx.signed_transaction.transaction, - } - } - - /// Returns the hash of the transaction - pub fn hash(&self) -> B256 { - match self { - Self::SignedNoHash(tx) => tx.hash(), - Self::Signed(tx) => tx.hash, - Self::SignedEcRecovered(tx) => tx.hash, - } - } - - /// Returns the signer of the transaction. - /// - /// If the transaction is of not of [`TransactionSignedEcRecovered`] it will be recovered. - pub fn signer(&self) -> Option
{ - match self { - Self::SignedNoHash(tx) => tx.recover_signer(), - Self::Signed(tx) => tx.recover_signer(), - Self::SignedEcRecovered(tx) => Some(tx.signer), - } - } - - /// Returns [`TransactionSigned`] type - /// else None - pub const fn as_signed(&self) -> Option<&TransactionSigned> { - match self { - Self::Signed(tx) => Some(tx), - _ => None, - } - } - - /// Returns `TransactionSignedEcRecovered` type - /// else None - pub const fn as_signed_ec_recovered(&self) -> Option<&TransactionSignedEcRecovered> { - match self { - Self::SignedEcRecovered(tx) => Some(tx), - _ => None, - } - } - - /// Returns true if the transaction is of [`TransactionSigned`] variant - pub const fn is_signed(&self) -> bool { - matches!(self, Self::Signed(_)) - } - - /// Returns true if the transaction is of [`TransactionSignedNoHash`] variant - pub const fn is_signed_no_hash(&self) -> bool { - matches!(self, Self::SignedNoHash(_)) - } - - /// Returns true if the transaction is of [`TransactionSignedEcRecovered`] variant - pub const fn is_signed_ec_recovered(&self) -> bool { - matches!(self, Self::SignedEcRecovered(_)) - } - - /// Consumes the [`TransactionSignedVariant`] and returns the consumed [Transaction] - pub fn into_raw(self) -> Transaction { - match self { - Self::SignedNoHash(tx) => tx.transaction, - Self::Signed(tx) => tx.transaction, - Self::SignedEcRecovered(tx) => tx.signed_transaction.transaction, - } - } - - /// Consumes the [`TransactionSignedVariant`] and returns the consumed [`TransactionSigned`] - pub fn into_signed(self) -> TransactionSigned { - match self { - Self::SignedNoHash(tx) => tx.with_hash(), - Self::Signed(tx) => tx, - Self::SignedEcRecovered(tx) => tx.signed_transaction, - } - } - - /// Consumes the [`TransactionSignedVariant`] and converts it into a - /// [`TransactionSignedEcRecovered`] - /// - /// If the variants is not a [`TransactionSignedEcRecovered`] it will recover the sender. - /// - /// Returns `None` if the transaction's signature is invalid - pub fn into_signed_ec_recovered(self) -> Option { - self.try_into_signed_ec_recovered().ok() - } - - /// Consumes the [`TransactionSignedVariant`] and converts it into a - /// [`TransactionSignedEcRecovered`] - /// - /// If the variants is not a [`TransactionSignedEcRecovered`] it will recover the sender. - /// - /// Returns an error if the transaction's signature is invalid. - pub fn try_into_signed_ec_recovered( - self, - ) -> Result { - match self { - Self::SignedEcRecovered(tx) => Ok(tx), - Self::Signed(tx) => tx.try_into_ecrecovered(), - Self::SignedNoHash(tx) => tx.with_hash().try_into_ecrecovered(), - } - } -} - -impl AsRef for TransactionSignedVariant { - fn as_ref(&self) -> &Transaction { - self.as_raw() - } -} - -impl Deref for TransactionSignedVariant { - type Target = Transaction; - - fn deref(&self) -> &Self::Target { - self.as_raw() - } -} diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 4df9ace8133..41156d3e56b 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -24,6 +24,9 @@ reth-config.workspace = true reth-prune-types.workspace = true reth-static-file-types.workspace = true +# ethereum +alloy-eips.workspace = true + # metrics reth-metrics.workspace = true metrics.workspace = true diff --git a/crates/prune/prune/src/event.rs b/crates/prune/prune/src/event.rs deleted file mode 100644 index 4f5806e592e..00000000000 --- a/crates/prune/prune/src/event.rs +++ /dev/null @@ -1,12 +0,0 @@ -use alloy_primitives::BlockNumber; -use reth_prune_types::PrunedSegmentInfo; -use std::time::Duration; - -/// An event emitted by a [Pruner][crate::Pruner]. -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum PrunerEvent { - /// Emitted when pruner started running. - Started { tip_block_number: BlockNumber }, - /// Emitted when pruner finished running. - Finished { tip_block_number: BlockNumber, elapsed: Duration, stats: Vec }, -} diff --git a/crates/prune/prune/src/lib.rs b/crates/prune/prune/src/lib.rs index 5a43afeb502..e6bcbe5e812 100644 --- a/crates/prune/prune/src/lib.rs +++ b/crates/prune/prune/src/lib.rs @@ -12,7 +12,6 @@ mod builder; mod db_ext; mod error; -mod event; mod metrics; mod pruner; pub mod segments; @@ -20,7 +19,6 @@ pub mod segments; use crate::metrics::Metrics; pub use builder::PrunerBuilder; pub use error::PrunerError; -pub use event::PrunerEvent; pub use pruner::{Pruner, PrunerResult, PrunerWithFactory, PrunerWithResult}; // Re-export prune types diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index 2df8cccf305..ce9d90c291b 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -3,6 +3,7 @@ use crate::{ segments::{PruneInput, Segment, SegmentOutput}, PrunerError, }; +use alloy_eips::eip2718::Encodable2718; use rayon::prelude::*; use reth_db::{tables, transaction::DbTxMut}; use reth_provider::{BlockReader, DBProvider, TransactionsProvider}; @@ -58,7 +59,7 @@ where let hashes = provider .transactions_by_tx_range(tx_range.clone())? .into_par_iter() - .map(|transaction| transaction.hash()) + .map(|transaction| transaction.trie_hash()) .collect::>(); // Number of transactions retrieved from the database should match the tx range count @@ -142,7 +143,7 @@ mod tests { for block in &blocks { tx_hash_numbers.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { - tx_hash_numbers.push((transaction.hash, tx_hash_numbers.len() as u64)); + tx_hash_numbers.push((transaction.hash(), tx_hash_numbers.len() as u64)); } } let tx_hash_numbers_len = tx_hash_numbers.len(); diff --git a/crates/prune/types/src/event.rs b/crates/prune/types/src/event.rs new file mode 100644 index 00000000000..bac5f0d512c --- /dev/null +++ b/crates/prune/types/src/event.rs @@ -0,0 +1,22 @@ +use crate::PrunedSegmentInfo; +use alloy_primitives::BlockNumber; +use std::time::Duration; + +/// An event emitted by a pruner. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum PrunerEvent { + /// Emitted when pruner started running. + Started { + /// The tip block number before pruning. + tip_block_number: BlockNumber, + }, + /// Emitted when pruner finished running. + Finished { + /// The tip block number before pruning. + tip_block_number: BlockNumber, + /// The elapsed time for the pruning process. + elapsed: Duration, + /// Collected pruning stats. + stats: Vec, + }, +} diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 0722e760faf..82a41f0c2b1 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -9,6 +9,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod checkpoint; +mod event; mod limiter; mod mode; mod pruner; @@ -16,6 +17,7 @@ mod segment; mod target; pub use checkpoint::PruneCheckpoint; +pub use event::PrunerEvent; pub use limiter::PruneLimiter; pub use mode::PruneMode; pub use pruner::{ diff --git a/crates/prune/types/src/pruner.rs b/crates/prune/types/src/pruner.rs index 3046dda0679..fb907925729 100644 --- a/crates/prune/types/src/pruner.rs +++ b/crates/prune/types/src/pruner.rs @@ -1,5 +1,6 @@ use crate::{PruneCheckpoint, PruneLimiter, PruneMode, PruneSegment}; use alloy_primitives::{BlockNumber, TxNumber}; +use derive_more::Display; /// Pruner run output. #[derive(Debug)] @@ -17,7 +18,8 @@ impl From for PrunerOutput { } /// Represents information of a pruner run for a segment. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Display)] +#[display("(table={segment}, pruned={pruned}, status={progress})")] pub struct PrunedSegmentInfo { /// The pruned segment pub segment: PruneSegment, @@ -77,16 +79,18 @@ impl SegmentOutputCheckpoint { } /// Progress of pruning. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Display)] pub enum PruneProgress { /// There is more data to prune. + #[display("HasMoreData({_0})")] HasMoreData(PruneInterruptReason), /// Pruning has been finished. + #[display("Finished")] Finished, } /// Reason for interrupting a prune run. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Display)] pub enum PruneInterruptReason { /// Prune run timed out. Timeout, diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index d1202cd8b2c..4bc78b7b056 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -59,4 +59,5 @@ serde = [ "alloy-eips/serde", "alloy-primitives/serde", "alloy-consensus/serde", + "reth-primitives-traits/serde", ] diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ab68d3c88e4..ccf19ed1a0b 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -19,6 +19,7 @@ //! use alloy_consensus::Header; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; +//! use reth_primitives::TransactionSigned; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_builder::{ @@ -36,7 +37,8 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, +//! Provider: +//! FullRpcProvider + AccountReader + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, @@ -77,6 +79,7 @@ //! use reth_engine_primitives::EngineTypes; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; +//! use reth_primitives::TransactionSigned; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_api::EngineApiServer; @@ -109,7 +112,8 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, +//! Provider: +//! FullRpcProvider + AccountReader + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, @@ -1252,6 +1256,7 @@ where Arc::new(self.consensus.clone()), self.block_executor.clone(), self.config.flashbots.clone(), + Box::new(self.executor.clone()), ) } } @@ -1416,6 +1421,7 @@ where Arc::new(self.consensus.clone()), self.block_executor.clone(), self.config.flashbots.clone(), + Box::new(self.executor.clone()), ) .into_rpc() .into(), @@ -1999,6 +2005,29 @@ impl TransportRpcModules { &self.config } + /// Merge the given [`Methods`] in all configured transport modules if the given + /// [`RethRpcModule`] is configured for the transport. + /// + /// Fails if any of the methods in other is present already. + pub fn merge_if_module_configured( + &mut self, + module: RethRpcModule, + other: impl Into, + ) -> Result<(), RegisterMethodError> { + let other = other.into(); + if self.module_config().contains_http(&module) { + self.merge_http(other.clone())?; + } + if self.module_config().contains_ws(&module) { + self.merge_ws(other.clone())?; + } + if self.module_config().contains_ipc(&module) { + self.merge_ipc(other)?; + } + + Ok(()) + } + /// Merge the given [Methods] in the configured http methods. /// /// Fails if any of the methods in other is present already. diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 1eade554fc1..d7e74c37b56 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -628,7 +628,7 @@ pub trait Call: LoadState> + SpawnBlocking { cfg.clone(), block_env.clone(), block_txs, - tx.hash, + tx.hash(), )?; let env = EnvWithHandlerCfg::new_with_cfg_env( diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 48394f1cd6b..7e1992017d8 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -2,18 +2,21 @@ //! loads receipt data w.r.t. network. use futures::Future; -use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use reth_primitives::{Receipt, TransactionMeta}; +use reth_provider::TransactionsProvider; use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. -pub trait LoadReceipt: EthApiTypes + RpcNodeCoreExt + Send + Sync { +pub trait LoadReceipt: + EthApiTypes + RpcNodeCoreExt + Send + Sync +{ /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( &self, - tx: TransactionSigned, + tx: ::Transaction, meta: TransactionMeta, receipt: Receipt, ) -> impl Future, Self::Error>> + Send; diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 104042d17a2..a1e6084da55 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -204,7 +204,7 @@ pub trait Trace: LoadState> { cfg.clone(), block_env.clone(), block_txs, - tx.hash, + tx.hash(), )?; let env = EnvWithHandlerCfg::new_with_cfg_env( diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index afe1c513b69..2223ecdc9f7 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -9,7 +9,9 @@ use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned}; -use reth_provider::{BlockNumReader, BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; +use reth_provider::{ + BlockNumReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, TransactionsProvider, +}; use reth_rpc_eth_types::{ utils::{binary_search, recover_raw_transaction}, EthApiError, SignError, TransactionSource, @@ -60,10 +62,13 @@ pub trait EthTransactions: LoadTransaction { /// Checks the pool and state. /// /// Returns `Ok(None)` if no matching transaction was found. + #[expect(clippy::complexity)] fn transaction_by_hash( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future< + Output = Result>>, Self::Error>, + > + Send { LoadTransaction::transaction_by_hash(self, hash) } @@ -148,19 +153,22 @@ pub trait EthTransactions: LoadTransaction { } /// Helper method that loads a transaction and its receipt. + #[expect(clippy::complexity)] fn load_transaction_and_receipt( &self, hash: TxHash, ) -> impl Future< - Output = Result, Self::Error>, + Output = Result< + Option<(ProviderTx, TransactionMeta, Receipt)>, + Self::Error, + >, > + Send where Self: 'static, { - let this = self.clone(); + let provider = self.provider().clone(); self.spawn_blocking_io(move |_| { - let (tx, meta) = match this - .provider() + let (tx, meta) = match provider .transaction_by_hash_with_meta(hash) .map_err(Self::Error::from_eth_err)? { @@ -168,11 +176,10 @@ pub trait EthTransactions: LoadTransaction { None => return Ok(None), }; - let receipt = - match this.provider().receipt_by_hash(hash).map_err(Self::Error::from_eth_err)? { - Some(recpt) => recpt, - None => return Ok(None), - }; + let receipt = match provider.receipt_by_hash(hash).map_err(Self::Error::from_eth_err)? { + Some(recpt) => recpt, + None => return Ok(None), + }; Ok(Some((tx, meta, receipt))) }) @@ -325,7 +332,7 @@ pub trait EthTransactions: LoadTransaction { tx: Bytes, ) -> impl Future> + Send { async move { - let recovered = recover_raw_transaction(tx.clone())?; + let recovered = recover_raw_transaction(tx)?; let pool_transaction = ::Transaction::from_pooled(recovered.into()); @@ -477,10 +484,13 @@ pub trait LoadTransaction: /// Checks the pool and state. /// /// Returns `Ok(None)` if no matching transaction was found. + #[expect(clippy::complexity)] fn transaction_by_hash( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future< + Output = Result>>, Self::Error>, + > + Send { async move { // Try to find the transaction on disk let mut resp = self diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 12ff090d37c..994f9ac884d 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -7,9 +7,11 @@ use std::{ use alloy_network::Network; use alloy_rpc_types_eth::Block; +use reth_primitives::TransactionSigned; +use reth_provider::TransactionsProvider; use reth_rpc_types_compat::TransactionCompat; -use crate::{AsEthApiError, FromEthApiError, FromEvmError}; +use crate::{AsEthApiError, FromEthApiError, FromEvmError, RpcNodeCore}; /// Network specific `eth` API types. pub trait EthApiTypes: Send + Sync + Clone { @@ -43,22 +45,27 @@ pub type RpcReceipt = ::ReceiptResponse; pub type RpcError = ::Error; /// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API. -pub trait FullEthApiTypes: - EthApiTypes< - TransactionCompat: TransactionCompat< - Transaction = RpcTransaction, - Error = RpcError, - >, -> +pub trait FullEthApiTypes +where + Self: RpcNodeCore> + + EthApiTypes< + TransactionCompat: TransactionCompat< + ::Transaction, + Transaction = RpcTransaction, + Error = RpcError, + >, + >, { } impl FullEthApiTypes for T where - T: EthApiTypes< - TransactionCompat: TransactionCompat< - Transaction = RpcTransaction, - Error = RpcError, - >, - > + T: RpcNodeCore> + + EthApiTypes< + TransactionCompat: TransactionCompat< + ::Transaction, + Transaction = RpcTransaction, + Error = RpcError, + >, + > { } diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 9b38ed89724..98b9530d63c 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -19,6 +19,7 @@ reth-evm.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-revm.workspace = true reth-rpc-server-types.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 893bbdd6b9c..187e2d943f7 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -362,7 +362,7 @@ pub enum RpcInvalidTransactionError { SenderNoEOA, /// Gas limit was exceeded during execution. /// Contains the gas limit. - #[error("out of gas: gas required exceeds allowance: {0}")] + #[error("out of gas: gas required exceeds: {0}")] BasicOutOfGas(u64), /// Gas limit was exceeded during memory expansion. /// Contains the gas limit. diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 3e7c9db6d68..5ead11b7115 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -2,7 +2,7 @@ //! //! Log parsing for building filter. -use alloy_eips::BlockNumHash; +use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::TxHash; use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; @@ -110,7 +110,7 @@ pub fn append_matching_block_logs( ProviderError::TransactionNotFound(transaction_id.into()) })?; - Some(transaction.hash()) + Some(transaction.trie_hash()) } }; } diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index b24ec9e86bc..5a0daa1b42f 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,6 +1,6 @@ //! Utilities for serving `eth_simulateV1` -use alloy_consensus::{Transaction as _, TxEip4844Variant, TxType, TypedTransaction}; +use alloy_consensus::{Transaction as _, TxType}; use alloy_primitives::PrimitiveSignature as Signature; use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, @@ -10,7 +10,7 @@ use alloy_rpc_types_eth::{ use jsonrpsee_types::ErrorObject; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, - BlockBody, BlockWithSenders, Receipt, Transaction, TransactionSigned, TransactionSignedNoHash, + BlockBody, BlockWithSenders, Receipt, TransactionSigned, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_server_types::result::rpc_err; @@ -135,34 +135,7 @@ where // Create an empty signature for the transaction. let signature = Signature::new(Default::default(), Default::default(), false); - - let tx = match tx { - TypedTransaction::Legacy(tx) => { - TransactionSignedNoHash { transaction: Transaction::Legacy(tx), signature } - .with_hash() - } - TypedTransaction::Eip2930(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip2930(tx), signature } - .with_hash() - } - TypedTransaction::Eip1559(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip1559(tx), signature } - .with_hash() - } - TypedTransaction::Eip4844(tx) => { - let tx = match tx { - TxEip4844Variant::TxEip4844(tx) => tx, - TxEip4844Variant::TxEip4844WithSidecar(tx) => tx.tx, - }; - TransactionSignedNoHash { transaction: Transaction::Eip4844(tx), signature } - .with_hash() - } - TypedTransaction::Eip7702(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip7702(tx), signature } - .with_hash() - } - }; - + let tx = TransactionSigned::new_unhashed(tx.into(), signature); transactions.push(tx); } diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index a4ede0a1a4e..83ef97807de 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -4,7 +4,8 @@ use alloy_primitives::B256; use alloy_rpc_types_eth::TransactionInfo; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives_traits::SignedTransaction; use reth_rpc_types_compat::{ transaction::{from_recovered, from_recovered_with_block_context}, TransactionCompat, @@ -12,15 +13,15 @@ use reth_rpc_types_compat::{ /// Represents from where a transaction was fetched. #[derive(Debug, Clone, Eq, PartialEq)] -pub enum TransactionSource { +pub enum TransactionSource { /// Transaction exists in the pool (Pending) - Pool(TransactionSignedEcRecovered), + Pool(TransactionSignedEcRecovered), /// Transaction already included in a block /// /// This can be a historical block or a pending block (received from the CL) Block { /// Transaction fetched via provider - transaction: TransactionSignedEcRecovered, + transaction: TransactionSignedEcRecovered, /// Index of the transaction in the block index: u64, /// Hash of the block. @@ -34,22 +35,22 @@ pub enum TransactionSource { // === impl TransactionSource === -impl TransactionSource { +impl TransactionSource { /// Consumes the type and returns the wrapped transaction. - pub fn into_recovered(self) -> TransactionSignedEcRecovered { + pub fn into_recovered(self) -> TransactionSignedEcRecovered { self.into() } /// Conversion into network specific transaction type. - pub fn into_transaction( + pub fn into_transaction>( self, - resp_builder: &T, - ) -> Result { + resp_builder: &Builder, + ) -> Result { match self { Self::Pool(tx) => from_recovered(tx, resp_builder), Self::Block { transaction, index, block_hash, block_number, base_fee } => { let tx_info = TransactionInfo { - hash: Some(transaction.hash()), + hash: Some(transaction.trie_hash()), index: Some(index), block_hash: Some(block_hash), block_number: Some(block_number), @@ -62,14 +63,14 @@ impl TransactionSource { } /// Returns the transaction and block related info, if not pending - pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { + pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { match self { Self::Pool(tx) => { - let hash = tx.hash(); + let hash = tx.trie_hash(); (tx, TransactionInfo { hash: Some(hash), ..Default::default() }) } Self::Block { transaction, index, block_hash, block_number, base_fee } => { - let hash = transaction.hash(); + let hash = transaction.trie_hash(); ( transaction, TransactionInfo { @@ -85,8 +86,8 @@ impl TransactionSource { } } -impl From for TransactionSignedEcRecovered { - fn from(value: TransactionSource) -> Self { +impl From> for TransactionSignedEcRecovered { + fn from(value: TransactionSource) -> Self { match value { TransactionSource::Pool(tx) => tx, TransactionSource::Block { transaction, .. } => transaction, diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 43086b311bd..f2b1d93be83 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -7,7 +7,7 @@ use alloy_rlp::Encodable; use alloy_rpc_types_eth::{ Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; -use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders}; +use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders, TransactionSigned}; use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; @@ -87,7 +87,11 @@ pub fn from_block_full( index: Some(idx as u64), }; - from_recovered_with_block_context::(signed_tx_ec_recovered, tx_info, tx_resp_builder) + from_recovered_with_block_context::( + signed_tx_ec_recovered, + tx_info, + tx_resp_builder, + ) }) .collect::, T::Error>>()?; diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs index 9e8fae67096..b439b61d44e 100644 --- a/crates/rpc/rpc-types-compat/src/transaction.rs +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -8,7 +8,7 @@ use alloy_rpc_types_eth::{ request::{TransactionInput, TransactionRequest}, TransactionInfo, }; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; use serde::{Deserialize, Serialize}; /// Create a new rpc transaction result for a mined transaction, using the given block hash, @@ -16,8 +16,8 @@ use serde::{Deserialize, Serialize}; /// /// The block hash, number, and tx index fields should be from the original block where the /// transaction was mined. -pub fn from_recovered_with_block_context( - tx: TransactionSignedEcRecovered, +pub fn from_recovered_with_block_context>( + tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, resp_builder: &T, ) -> Result { @@ -26,15 +26,17 @@ pub fn from_recovered_with_block_context( /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. -pub fn from_recovered( - tx: TransactionSignedEcRecovered, +pub fn from_recovered>( + tx: TransactionSignedEcRecovered, resp_builder: &T, ) -> Result { resp_builder.fill(tx, TransactionInfo::default()) } /// Builds RPC transaction w.r.t. network. -pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { +pub trait TransactionCompat: + Send + Sync + Unpin + Clone + fmt::Debug +{ /// RPC transaction response type. type Transaction: Serialize + for<'de> Deserialize<'de> @@ -51,7 +53,7 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { /// environment related fields to `None`. fn fill( &self, - tx: TransactionSignedEcRecovered, + tx: TransactionSignedEcRecovered, tx_inf: TransactionInfo, ) -> Result; @@ -73,7 +75,7 @@ pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> Transact let access_list = tx.transaction.access_list().cloned(); let max_fee_per_blob_gas = tx.transaction.max_fee_per_blob_gas(); let authorization_list = tx.transaction.authorization_list().map(|l| l.to_vec()); - let blob_versioned_hashes = tx.transaction.blob_versioned_hashes(); + let blob_versioned_hashes = tx.transaction.blob_versioned_hashes().map(Vec::from); let tx_type = tx.transaction.tx_type(); // fees depending on the transaction type diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 5418cd1eb3a..804ecd11120 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -82,7 +82,7 @@ parking_lot.workspace = true # misc tracing.workspace = true -tracing-futures = "0.2" +tracing-futures.workspace = true futures.workspace = true rand.workspace = true serde.workspace = true diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 78040b48c5f..dd6bf9bbc24 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -107,7 +107,7 @@ where let mut transactions = block.transactions_with_sender().enumerate().peekable(); let mut inspector = None; while let Some((index, (signer, tx))) = transactions.next() { - let tx_hash = tx.hash; + let tx_hash = tx.hash(); let env = EnvWithHandlerCfg { env: Env::boxed( @@ -255,7 +255,7 @@ where cfg.clone(), block_env.clone(), block_txs, - tx.hash, + tx.hash(), )?; let env = EnvWithHandlerCfg { @@ -274,7 +274,7 @@ where Some(TransactionContext { block_hash: Some(block_hash), tx_index: Some(index), - tx_hash: Some(tx.hash), + tx_hash: Some(tx.hash()), }), &mut None, ) diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index f92bd075a3b..10eec4dbf97 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -1,11 +1,12 @@ //! `Eth` bundle implementation and helpers. +use alloy_consensus::Transaction as _; use alloy_primitives::{Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::PooledTransactionsElement; +use reth_primitives::{PooledTransactionsElement, Transaction}; use reth_provider::{ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_api::{ @@ -19,7 +20,7 @@ use revm::{ primitives::{ResultAndState, TxEnv}, }; use revm_primitives::{EnvKzgSettings, EnvWithHandlerCfg, SpecId, MAX_BLOB_GAS_PER_BLOCK}; -use std::sync::Arc; +use std::{ops::Deref, sync::Arc}; /// `Eth` bundle implementation. pub struct EthBundle { @@ -87,7 +88,7 @@ where .iter() .filter_map(|(tx, _)| { if let PooledTransactionsElement::BlobTransaction(tx) = tx { - Some(tx.transaction.tx.blob_gas()) + Some(tx.tx().tx().blob_gas()) } else { None } @@ -179,8 +180,7 @@ where let tx = tx.into_transaction(); hasher.update(tx.hash()); - let gas_price = tx - .effective_tip_per_gas(basefee) + let gas_price = Transaction::effective_tip_per_gas(tx.deref(), basefee) .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) .map_err(Eth::Error::from_eth_err)?; eth_api.evm_config().fill_tx_env(evm.tx_mut(), &tx, signer); diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index d6c8f522cda..dac37152942 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -407,7 +407,7 @@ impl EthApiInner LoadReceipt for EthApi where - Self: RpcNodeCoreExt, + Self: RpcNodeCoreExt>, { async fn build_transaction_receipt( &self, diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index e7e9c64447b..32645ba08d6 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -109,6 +109,7 @@ impl EthSigner for DevSigner { #[cfg(test)] mod tests { + use alloy_consensus::Transaction; use alloy_primitives::{Bytes, U256}; use alloy_rpc_types_eth::TransactionInput; use revm_primitives::TxKind; diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index d1ce84bc0b7..157213b54e6 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -4,11 +4,27 @@ use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; use alloy_network::{Ethereum, Network}; use alloy_rpc_types_eth::{Transaction, TransactionInfo}; use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_rpc_eth_api::EthApiTypes; use reth_rpc_eth_types::EthApiError; use reth_rpc_types_compat::TransactionCompat; +/// A standalone [`EthApiTypes`] implementation for Ethereum. +#[derive(Debug, Clone, Copy, Default)] +pub struct EthereumEthApiTypes(EthTxBuilder); + +impl EthApiTypes for EthereumEthApiTypes { + type Error = EthApiError; + type NetworkTypes = Ethereum; + type TransactionCompat = EthTxBuilder; + + fn tx_resp_builder(&self) -> &Self::TransactionCompat { + &self.0 + } +} + /// Builds RPC transaction response for l1. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Default)] +#[non_exhaustive] pub struct EthTxBuilder; impl TransactionCompat for EthTxBuilder @@ -25,7 +41,8 @@ where tx_info: TransactionInfo, ) -> Result { let from = tx.signer(); - let TransactionSigned { transaction, signature, hash } = tx.into_signed(); + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx.into_signed(); let inner: TxEnvelope = match transaction { reth_primitives::Transaction::Legacy(tx) => { diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 4d1833add3e..d8a5b95f55e 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -13,6 +13,9 @@ pub use core::EthApi; pub use filter::EthFilter; pub use pubsub::EthPubSub; -pub use helpers::{signer::DevSigner, types::EthTxBuilder}; +pub use helpers::{ + signer::DevSigner, + types::{EthTxBuilder, EthereumEthApiTypes}, +}; pub use reth_rpc_eth_api::{EthApiServer, EthApiTypes, FullEthApiServer, RpcNodeCore}; diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index b997dec1e01..a5e29bb739f 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -5,13 +5,13 @@ use alloy_rpc_types_beacon::relay::{ BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, }; use alloy_rpc_types_engine::{ - BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, + BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, PayloadError, }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_consensus::{Consensus, PostExecutionInput}; -use reth_errors::{BlockExecutionError, ConsensusError, ProviderError, RethError}; +use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_payload_validator::ExecutionPayloadValidator; @@ -22,16 +22,16 @@ use reth_provider::{ }; use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; use reth_rpc_api::BlockSubmissionValidationApiServer; -use reth_rpc_eth_types::EthApiError; -use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_rpc_server_types::result::internal_rpc_err; +use reth_tasks::TaskSpawner; use reth_trie::HashedPostState; use revm_primitives::{Address, B256, U256}; use serde::{Deserialize, Serialize}; use std::{collections::HashSet, sync::Arc}; -use tokio::sync::RwLock; +use tokio::sync::{oneshot, RwLock}; /// The type that implements the `validation` rpc namespace trait -#[derive(Debug, derive_more::Deref)] +#[derive(Clone, Debug, derive_more::Deref)] pub struct ValidationApi { #[deref] inner: Arc>, @@ -47,6 +47,7 @@ where consensus: Arc, executor_provider: E, config: ValidationApiConfig, + task_spawner: Box, ) -> Self { let ValidationApiConfig { disallow } = config; @@ -58,6 +59,7 @@ where executor_provider, disallow, cached_state: Default::default(), + task_spawner, }); Self { inner } @@ -338,55 +340,23 @@ where Ok(versioned_hashes) } -} - -#[async_trait] -impl BlockSubmissionValidationApiServer for ValidationApi -where - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory - + HeaderProvider - + AccountReader - + WithdrawalsProvider - + Clone - + 'static, - E: BlockExecutorProvider, -{ - async fn validate_builder_submission_v1( - &self, - _request: BuilderBlockValidationRequest, - ) -> RpcResult<()> { - Err(internal_rpc_err("unimplemented")) - } - async fn validate_builder_submission_v2( - &self, - _request: BuilderBlockValidationRequestV2, - ) -> RpcResult<()> { - Err(internal_rpc_err("unimplemented")) - } - - /// Validates a block submitted to the relay + /// Core logic for validating the builder submission v3 async fn validate_builder_submission_v3( &self, request: BuilderBlockValidationRequestV3, - ) -> RpcResult<()> { + ) -> Result<(), ValidationApiError> { let block = self .payload_validator .ensure_well_formed_payload( ExecutionPayload::V3(request.request.execution_payload), ExecutionPayloadSidecar::v3(CancunPayloadFields { parent_beacon_block_root: request.parent_beacon_block_root, - versioned_hashes: self - .validate_blobs_bundle(request.request.blobs_bundle) - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result()?, + versioned_hashes: self.validate_blobs_bundle(request.request.blobs_bundle)?, }), - ) - .to_rpc_result()? + )? .try_seal_with_senders() - .map_err(|_| EthApiError::InvalidTransactionSignature)?; + .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; self.validate_message_against_block( block, @@ -394,15 +364,13 @@ where request.registered_gas_limit, ) .await - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result() } - /// Validates a block submitted to the relay + /// Core logic for validating the builder submission v4 async fn validate_builder_submission_v4( &self, request: BuilderBlockValidationRequestV4, - ) -> RpcResult<()> { + ) -> Result<(), ValidationApiError> { let block = self .payload_validator .ensure_well_formed_payload( @@ -411,16 +379,13 @@ where CancunPayloadFields { parent_beacon_block_root: request.parent_beacon_block_root, versioned_hashes: self - .validate_blobs_bundle(request.request.blobs_bundle) - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result()?, + .validate_blobs_bundle(request.request.blobs_bundle)?, }, request.request.execution_requests.into(), ), - ) - .to_rpc_result()? + )? .try_seal_with_senders() - .map_err(|_| EthApiError::InvalidTransactionSignature)?; + .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; self.validate_message_against_block( block, @@ -428,8 +393,70 @@ where request.registered_gas_limit, ) .await - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result() + } +} + +#[async_trait] +impl BlockSubmissionValidationApiServer for ValidationApi +where + Provider: BlockReaderIdExt + + ChainSpecProvider + + StateProviderFactory + + HeaderProvider + + AccountReader + + WithdrawalsProvider + + Clone + + 'static, + E: BlockExecutorProvider, +{ + async fn validate_builder_submission_v1( + &self, + _request: BuilderBlockValidationRequest, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + async fn validate_builder_submission_v2( + &self, + _request: BuilderBlockValidationRequestV2, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> RpcResult<()> { + let this = self.clone(); + let (tx, rx) = oneshot::channel(); + + self.task_spawner.spawn_blocking(Box::pin(async move { + let result = Self::validate_builder_submission_v3(&this, request) + .await + .map_err(|err| internal_rpc_err(err.to_string())); + let _ = tx.send(result); + })); + + rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> RpcResult<()> { + let this = self.clone(); + let (tx, rx) = oneshot::channel(); + + self.task_spawner.spawn_blocking(Box::pin(async move { + let result = Self::validate_builder_submission_v4(&this, request) + .await + .map_err(|err| internal_rpc_err(err.to_string())); + let _ = tx.send(result); + })); + + rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? } } @@ -450,6 +477,8 @@ pub struct ValidationApiInner { /// latest head block state. Uses async `RwLock` to safely handle concurrent validation /// requests. cached_state: RwLock<(B256, CachedReads)>, + /// Task spawner for blocking operations + task_spawner: Box, } /// Configuration for validation API. @@ -476,6 +505,9 @@ pub enum ValidationApiError { ProposerPayment, #[error("invalid blobs bundle")] InvalidBlobsBundle, + /// When the transaction signature is invalid + #[error("invalid transaction signature")] + InvalidTransactionSignature, #[error("block accesses blacklisted address: {_0}")] Blacklist(Address), #[error(transparent)] @@ -486,4 +518,6 @@ pub enum ValidationApiError { Provider(#[from] ProviderError), #[error(transparent)] Execution(#[from] BlockExecutionError), + #[error(transparent)] + Payload(#[from] PayloadError), } diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 74ad23fbc7a..432af6f8a72 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -42,6 +42,7 @@ reth-testing-utils = { workspace = true, optional = true } # scroll reth-scroll-primitives = { workspace = true, optional = true } +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true diff --git a/crates/stages/stages/src/lib.rs b/crates/stages/stages/src/lib.rs index 38a0f209dbd..ce6a96cf349 100644 --- a/crates/stages/stages/src/lib.rs +++ b/crates/stages/stages/src/lib.rs @@ -37,7 +37,7 @@ //! # let consensus: Arc = Arc::new(TestConsensus::default()); //! # let headers_downloader = ReverseHeadersDownloaderBuilder::default().build( //! # Arc::new(TestHeadersClient::default()), -//! # consensus.clone() +//! # consensus.clone().as_header_validator() //! # ); //! # let provider_factory = create_test_provider_factory(); //! # let bodies_downloader = BodiesDownloaderBuilder::default().build( diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index a25fcd4e1e5..d04a96470a0 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -76,7 +76,11 @@ use tokio::sync::watch; /// - [`PruneStage`] (execute) /// - [`FinishStage`] #[derive(Debug)] -pub struct DefaultStages { +pub struct DefaultStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Configuration for the online stages online: OnlineStages, /// Executor factory needs for execution stage @@ -87,13 +91,17 @@ pub struct DefaultStages { prune_modes: PruneModes, } -impl DefaultStages { +impl DefaultStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Create a new set of default stages with default values. #[allow(clippy::too_many_arguments)] pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, header_downloader: H, body_downloader: B, executor_factory: E, @@ -122,6 +130,8 @@ impl DefaultStages { impl DefaultStages where E: BlockExecutorProvider, + H: HeaderDownloader, + B: BodyDownloader, { /// Appends the default offline stages and default finish stage to the given builder. pub fn add_offline_stages( @@ -164,13 +174,17 @@ where /// These stages *can* be run without network access if the specified downloaders are /// themselves offline. #[derive(Debug)] -pub struct OnlineStages { +pub struct OnlineStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Sync gap provider for the headers stage. provider: Provider, /// The tip for the headers stage. tip: watch::Receiver, /// The consensus engine used to validate incoming data. - consensus: Arc, + consensus: Arc>, /// The block header downloader header_downloader: H, /// The block body downloader @@ -179,12 +193,16 @@ pub struct OnlineStages { stages_config: StageConfig, } -impl OnlineStages { +impl OnlineStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Create a new set of online stages with default values. pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, header_downloader: H, body_downloader: B, stages_config: StageConfig, @@ -196,7 +214,7 @@ impl OnlineStages { impl OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader + 'static, + H: HeaderDownloader
+ 'static, B: BodyDownloader + 'static, { /// Create a new builder using the given headers stage. @@ -229,7 +247,7 @@ where provider, header_downloader, tip, - consensus.clone(), + consensus.clone().as_header_validator(), stages_config.etl, )) .add_stage(bodies) @@ -239,7 +257,7 @@ where impl StageSet for OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader + 'static, + H: HeaderDownloader
+ 'static, B: BodyDownloader + 'static, HeaderStage: Stage, BodyStage: Stage, @@ -250,7 +268,7 @@ where self.provider, self.header_downloader, self.tip, - self.consensus.clone(), + self.consensus.clone().as_header_validator(), self.stages_config.etl.clone(), )) .add_stage(BodyStage::new(self.body_downloader)) diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 07b97574972..b90729c7131 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -10,15 +10,13 @@ use tracing::*; use alloy_primitives::TxNumber; use reth_db::{tables, transaction::DbTx}; -use reth_db_api::{ - cursor::{DbCursorRO, DbCursorRW}, - transaction::DbTxMut, -}; +use reth_db_api::{cursor::DbCursorRO, transaction::DbTxMut}; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::StaticFileSegment; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, BlockReader, BlockWriter, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, + StorageLocation, }; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, @@ -69,6 +67,82 @@ impl BodyStage { pub const fn new(downloader: D) -> Self { Self { downloader, buffer: None } } + + /// Ensures that static files and database are in sync. + fn ensure_consistency( + &self, + provider: &Provider, + unwind_block: Option, + ) -> Result<(), StageError> + where + Provider: DBProvider + BlockReader + StaticFileProviderFactory, + { + // Get id for the next tx_num of zero if there are no transactions. + let next_tx_num = provider + .tx_ref() + .cursor_read::()? + .last()? + .map(|(id, _)| id + 1) + .unwrap_or_default(); + + let static_file_provider = provider.static_file_provider(); + + // Make sure Transactions static file is at the same height. If it's further, this + // input execution was interrupted previously and we need to unwind the static file. + let next_static_file_tx_num = static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Transactions) + .map(|id| id + 1) + .unwrap_or_default(); + + match next_static_file_tx_num.cmp(&next_tx_num) { + // If static files are ahead, we are currently unwinding the stage or we didn't reach + // the database commit in a previous stage run. So, our only solution is to unwind the + // static files and proceed from the database expected height. + Ordering::Greater => { + let highest_db_block = + provider.tx_ref().entries::()? as u64; + let mut static_file_producer = + static_file_provider.latest_writer(StaticFileSegment::Transactions)?; + static_file_producer + .prune_transactions(next_static_file_tx_num - next_tx_num, highest_db_block)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } + // If static files are behind, then there was some corruption or loss of files. This + // error will trigger an unwind, that will bring the database to the same height as the + // static files. + Ordering::Less => { + // If we are already in the process of unwind, this might be fine because we will + // fix the inconsistency right away. + if let Some(unwind_to) = unwind_block { + let next_tx_num_after_unwind = provider + .tx_ref() + .get::(unwind_to)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; + + // This means we need a deeper unwind. + if next_tx_num_after_unwind > next_static_file_tx_num { + return Err(missing_static_data_error( + next_static_file_tx_num.saturating_sub(1), + &static_file_provider, + provider, + )?) + } + } else { + return Err(missing_static_data_error( + next_static_file_tx_num.saturating_sub(1), + &static_file_provider, + provider, + )?) + } + } + Ordering::Equal => {} + } + + Ok(()) + } } impl Stage for BodyStage @@ -121,84 +195,22 @@ where } let (from_block, to_block) = input.next_block_range().into_inner(); - // Get id for the next tx_num of zero if there are no transactions. - let mut next_tx_num = provider - .tx_ref() - .cursor_read::()? - .last()? - .map(|(id, _)| id + 1) - .unwrap_or_default(); - - let static_file_provider = provider.static_file_provider(); - let mut static_file_producer = - static_file_provider.get_writer(from_block, StaticFileSegment::Transactions)?; - - // Make sure Transactions static file is at the same height. If it's further, this - // input execution was interrupted previously and we need to unwind the static file. - let next_static_file_tx_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Transactions) - .map(|id| id + 1) - .unwrap_or_default(); - - match next_static_file_tx_num.cmp(&next_tx_num) { - // If static files are ahead, then we didn't reach the database commit in a previous - // stage run. So, our only solution is to unwind the static files and proceed from the - // database expected height. - Ordering::Greater => { - static_file_producer - .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?; - // Since this is a database <-> static file inconsistency, we commit the change - // straight away. - static_file_producer.commit()?; - } - // If static files are behind, then there was some corruption or loss of files. This - // error will trigger an unwind, that will bring the database to the same height as the - // static files. - Ordering::Less => { - return Err(missing_static_data_error( - next_static_file_tx_num.saturating_sub(1), - &static_file_provider, - provider, - )?) - } - Ordering::Equal => {} - } + self.ensure_consistency(provider, None)?; - debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, start_tx_id = next_tx_num, "Commencing sync"); + debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, "Commencing sync"); let buffer = self.buffer.take().ok_or(StageError::MissingDownloadBuffer)?; trace!(target: "sync::stages::bodies", bodies_len = buffer.len(), "Writing blocks"); - let mut highest_block = from_block; - - // Firstly, write transactions to static files - for response in &buffer { - let block_number = response.block_number(); - - // Increment block on static file header. - if block_number > 0 { - static_file_producer.increment_block(block_number)?; - } - - match response { - BlockResponse::Full(block) => { - // Write transactions - for transaction in block.body.transactions() { - static_file_producer.append_transaction(next_tx_num, transaction)?; + let highest_block = buffer.last().map(|r| r.block_number()).unwrap_or(from_block); - // Increment transaction id for each transaction. - next_tx_num += 1; - } - } - BlockResponse::Empty(_) => {} - }; - - highest_block = block_number; - } - - // Write bodies to database. This will NOT write transactions to database as we've already - // written them directly to static files. + // Write bodies to database. provider.append_block_bodies( - buffer.into_iter().map(|response| (response.block_number(), response.into_body())), + buffer + .into_iter() + .map(|response| (response.block_number(), response.into_body())) + .collect(), + // We are writing transactions directly to static files. + StorageLocation::StaticFiles, )?; // The stage is "done" if: @@ -220,66 +232,8 @@ where ) -> Result { self.buffer.take(); - let static_file_provider = provider.static_file_provider(); - let tx = provider.tx_ref(); - // Cursors to unwind bodies, ommers - let mut body_cursor = tx.cursor_write::()?; - let mut ommers_cursor = tx.cursor_write::()?; - let mut withdrawals_cursor = tx.cursor_write::()?; - // Cursors to unwind transitions - let mut tx_block_cursor = tx.cursor_write::()?; - - let mut rev_walker = body_cursor.walk_back(None)?; - while let Some((number, block_meta)) = rev_walker.next().transpose()? { - if number <= input.unwind_to { - break - } - - // Delete the ommers entry if any - if ommers_cursor.seek_exact(number)?.is_some() { - ommers_cursor.delete_current()?; - } - - // Delete the withdrawals entry if any - if withdrawals_cursor.seek_exact(number)?.is_some() { - withdrawals_cursor.delete_current()?; - } - - // Delete all transaction to block values. - if !block_meta.is_empty() && - tx_block_cursor.seek_exact(block_meta.last_tx_num())?.is_some() - { - tx_block_cursor.delete_current()?; - } - - // Delete the current body value - rev_walker.delete_current()?; - } - - let mut static_file_producer = - static_file_provider.latest_writer(StaticFileSegment::Transactions)?; - - // Unwind from static files. Get the current last expected transaction from DB, and match it - // on static file - let db_tx_num = - body_cursor.last()?.map(|(_, block_meta)| block_meta.last_tx_num()).unwrap_or_default(); - let static_file_tx_num: u64 = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Transactions) - .unwrap_or_default(); - - // If there are more transactions on database, then we are missing static file data and we - // need to unwind further. - if db_tx_num > static_file_tx_num { - return Err(missing_static_data_error( - static_file_tx_num, - &static_file_provider, - provider, - )?) - } - - // Unwinds static file - static_file_producer - .prune_transactions(static_file_tx_num.saturating_sub(db_tx_num), input.unwind_to)?; + self.ensure_consistency(provider, Some(input.unwind_to))?; + provider.remove_bodies_above(input.unwind_to, StorageLocation::Both)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) @@ -288,6 +242,8 @@ where } } +/// Called when database is ahead of static files. Attempts to find the first block we are missing +/// transactions for. fn missing_static_data_error( last_tx_num: TxNumber, static_file_provider: &StaticFileProvider, @@ -567,9 +523,9 @@ mod tests { }, }; use alloy_consensus::Header; - use alloy_primitives::{BlockHash, BlockNumber, TxNumber, B256}; + use alloy_primitives::{BlockNumber, TxNumber, B256}; use futures_util::Stream; - use reth_db::{static_file::HeaderMask, tables}; + use reth_db::{static_file::HeaderWithHashMask, tables}; use reth_db_api::{ cursor::DbCursorRO, models::{StoredBlockBodyIndices, StoredBlockOmmers}, @@ -857,7 +813,7 @@ mod tests { for header in static_file_provider.fetch_range_iter( StaticFileSegment::Headers, *range.start()..*range.end() + 1, - |cursor, number| cursor.get_two::>(number.into()), + |cursor, number| cursor.get_two::>(number.into()), )? { let (header, hash) = header?; self.headers.push_back(SealedHeader::new(header, hash)); diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 8bdaf6ce557..895943b83a9 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -61,7 +61,11 @@ impl AccountHashingStage { pub fn seed( provider: &reth_provider::DatabaseProvider, opts: SeedOpts, - ) -> Result, StageError> { + ) -> Result, StageError> + where + N::Primitives: + reth_primitives_traits::FullNodePrimitives, + { use alloy_primitives::U256; use reth_db_api::models::AccountBeforeTx; use reth_provider::{StaticFileProviderFactory, StaticFileWriter}; diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 1ec55f7fd80..100fe4e979a 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -1,7 +1,7 @@ use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; -use reth_consensus::Consensus; +use reth_consensus::HeaderValidator; use reth_db::{tables, transaction::DbTx, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, @@ -48,7 +48,7 @@ pub struct HeaderStage { /// The tip for the stage. tip: watch::Receiver, /// Consensus client implementation - consensus: Arc, + consensus: Arc>, /// Current sync gap. sync_gap: Option, /// ETL collector with `HeaderHash` -> `BlockNumber` @@ -63,14 +63,14 @@ pub struct HeaderStage { impl HeaderStage where - Downloader: HeaderDownloader, + Downloader: HeaderDownloader
, { /// Create a new header stage pub fn new( database: Provider, downloader: Downloader, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, etl_config: EtlConfig, ) -> Self { Self { diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index 178fceb262b..d611062b565 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -667,11 +667,9 @@ mod tests { while let Some((_, body)) = body_cursor.next()? { for tx_id in body.tx_num_range() { let transaction: TransactionSigned = provider - .transaction_by_id_no_hash(tx_id)? - .map(|tx| TransactionSigned { - hash: Default::default(), // we don't require the hash - signature: tx.signature, - transaction: tx.transaction, + .transaction_by_id_unhashed(tx_id)? + .map(|tx| { + TransactionSigned::new_unhashed(tx.transaction, tx.signature) }) .expect("no transaction entry"); let signer = diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 3fdcbd0da64..fab10b0f953 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -1,12 +1,15 @@ +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{TxHash, TxNumber}; use num_traits::Zero; use reth_config::config::{EtlConfig, TransactionLookupConfig}; -use reth_db::{tables, RawKey, RawValue}; +use reth_db::{table::Value, tables, RawKey, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, transaction::{DbTx, DbTxMut}, }; use reth_etl::Collector; +use reth_primitives::NodePrimitives; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, StatsReader, TransactionsProvider, TransactionsProviderExt, @@ -60,7 +63,7 @@ where + BlockReader + PruneCheckpointReader + StatsReader - + StaticFileProviderFactory + + StaticFileProviderFactory> + TransactionsProviderExt, { /// Return the id of the stage @@ -206,7 +209,7 @@ where for tx_id in body.tx_num_range() { // First delete the transaction and hash to id mapping if let Some(transaction) = static_file_provider.transaction_by_id(tx_id)? { - if tx_hash_number_cursor.seek_exact(transaction.hash())?.is_some() { + if tx_hash_number_cursor.seek_exact(transaction.trie_hash())?.is_some() { tx_hash_number_cursor.delete_current()?; } } @@ -383,7 +386,7 @@ mod tests { for block in &blocks[..=max_processed_block] { for transaction in &block.body.transactions { if block.number > max_pruned_block { - tx_hash_numbers.push((transaction.hash, tx_hash_number)); + tx_hash_numbers.push((transaction.hash(), tx_hash_number)); } tx_hash_number += 1; } diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 772e9cb78d0..2f9712f8436 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -15,7 +15,7 @@ use reth_db_api::{ DatabaseError as DbError, }; use reth_primitives::{ - Account, Receipt, SealedBlock, SealedHeader, StaticFileSegment, StorageEntry, + Account, EthPrimitives, Receipt, SealedBlock, SealedHeader, StaticFileSegment, StorageEntry, }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, @@ -142,7 +142,7 @@ impl TestStageDB { /// Insert header to static file if `writer` exists, otherwise to DB. pub fn insert_header( - writer: Option<&mut StaticFileProviderRWRefMut<'_, ()>>, + writer: Option<&mut StaticFileProviderRWRefMut<'_, EthPrimitives>>, tx: &TX, header: &SealedHeader, td: U256, diff --git a/crates/static-file/static-file/Cargo.toml b/crates/static-file/static-file/Cargo.toml index d22b116cdc5..89f60687895 100644 --- a/crates/static-file/static-file/Cargo.toml +++ b/crates/static-file/static-file/Cargo.toml @@ -13,12 +13,14 @@ workspace = true [dependencies] # reth +reth-codecs.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true reth-tokio-util.workspace = true reth-prune-types.workspace = true +reth-primitives-traits.workspace = true reth-static-file-types.workspace = true reth-stages-types.workspace = true diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index 59ec94be9e4..168ae94817b 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -1,7 +1,9 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; -use reth_db::tables; +use reth_codecs::Compact; +use reth_db::{table::Value, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, }; @@ -13,8 +15,11 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Transactions; -impl Segment - for Transactions +impl Segment for Transactions +where + Provider: StaticFileProviderFactory> + + DBProvider + + BlockReader, { fn segment(&self) -> StaticFileSegment { StaticFileSegment::Transactions @@ -38,8 +43,9 @@ impl Segment()?; + let mut transactions_cursor = provider.tx_ref().cursor_read::::Primitives as NodePrimitives>::SignedTx, + >>()?; let transactions_walker = transactions_cursor.walk_range(block_body_indices.tx_num_range())?; diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 8959819e821..371a344d872 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -4,6 +4,9 @@ use crate::{segments, segments::Segment, StaticFileProducerEvent}; use alloy_primitives::BlockNumber; use parking_lot::Mutex; use rayon::prelude::*; +use reth_codecs::Compact; +use reth_db::table::Value; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileWriter, BlockReader, ChainStateBlockReader, DBProvider, DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, @@ -86,7 +89,10 @@ impl StaticFileProducerInner where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StaticFileProviderFactory + StageCheckpointReader + BlockReader, + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + + BlockReader, >, { /// Listen for events on the `static_file_producer`. diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 20a0673dff6..57fe9f726c7 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -81,7 +81,7 @@ serde = [ "alloy-primitives/serde", "alloy-trie?/serde", "bytes/serde", - "op-alloy-consensus?/serde" + "op-alloy-consensus?/serde", ] arbitrary = [ "alloy-consensus?/arbitrary", diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 284c6454f83..86d397ad24f 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -17,13 +17,14 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub use reth_codecs_derive::*; use serde as _; use alloy_primitives::{Address, Bloom, Bytes, FixedBytes, U256}; use bytes::{Buf, BufMut}; -extern crate alloc; use alloc::vec::Vec; #[cfg(feature = "test-utils")] @@ -33,6 +34,8 @@ pub mod alloy; #[cfg(any(test, feature = "alloy"))] mod alloy; +pub mod txtype; + #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/storage/codecs/src/txtype.rs b/crates/storage/codecs/src/txtype.rs new file mode 100644 index 00000000000..ce392b59cd0 --- /dev/null +++ b/crates/storage/codecs/src/txtype.rs @@ -0,0 +1,15 @@ +//! Commonly used constants for transaction types. + +/// Identifier parameter for legacy transaction +pub const COMPACT_IDENTIFIER_LEGACY: usize = 0; + +/// Identifier parameter for EIP-2930 transaction +pub const COMPACT_IDENTIFIER_EIP2930: usize = 1; + +/// Identifier parameter for EIP-1559 transaction +pub const COMPACT_IDENTIFIER_EIP1559: usize = 2; + +/// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier +/// parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type is +/// read from the buffer as a single byte. +pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 9b8589cb6aa..3aa908a6009 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-codecs.workspace = true reth-db-models.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 00787194c71..5d18711922e 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -8,7 +8,9 @@ use alloy_consensus::Header; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::{Account, Bytecode, Receipt, StorageEntry, TransactionSignedNoHash, TxType}; +use reth_primitives::{ + Account, Bytecode, Receipt, StorageEntry, TransactionSigned, TransactionSignedNoHash, TxType, +}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *}; @@ -225,6 +227,7 @@ impl_compression_for_compact!( Bytecode, AccountBeforeTx, TransactionSignedNoHash, + TransactionSigned, CompactU256, StageCheckpoint, PruneCheckpoint, @@ -312,7 +315,7 @@ mod tests { fn test_ensure_backwards_compatibility() { use super::*; use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; - use reth_primitives::{Account, Receipt, ReceiptWithBloom}; + use reth_primitives::{Account, Receipt}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, @@ -333,7 +336,6 @@ mod tests { assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); assert_eq!(Receipt::bitflag_encoded_bytes(), 1); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); @@ -355,7 +357,6 @@ mod tests { validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero); validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); validate_bitflag_backwards_compat!(Receipt, UnusedBits::Zero); - validate_bitflag_backwards_compat!(ReceiptWithBloom, UnusedBits::Zero); validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index 59d95c2263d..0997c08b784 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-codecs.workspace = true -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } # ethereum alloy-primitives.workspace = true diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 7400d94a89c..21891bce1f6 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-db-api.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } reth-fs-util.workspace = true reth-storage-errors.workspace = true reth-nippy-jar.workspace = true @@ -48,7 +48,6 @@ page_size = { version = "0.6.0", optional = true } thiserror.workspace = true tempfile = { workspace = true, optional = true } derive_more.workspace = true -paste.workspace = true rustc-hash = { workspace = true, optional = true } sysinfo = { version = "0.31", default-features = false, features = ["system"] } parking_lot = { workspace = true, optional = true } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 1d0be757466..e564a25e859 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -66,7 +66,7 @@ impl DatabaseEnvKind { } /// Arguments for database initialization. -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug)] pub struct DatabaseArguments { /// Client version that accesses the database. client_version: ClientVersion, @@ -99,6 +99,12 @@ pub struct DatabaseArguments { exclusive: Option, } +impl Default for DatabaseArguments { + fn default() -> Self { + Self::new(ClientVersion::default()) + } +} + impl DatabaseArguments { /// Create new database arguments with given client version. pub fn new(client_version: ClientVersion) -> Self { diff --git a/crates/storage/db/src/static_file/mask.rs b/crates/storage/db/src/static_file/mask.rs index f5d35a193d7..38831ea34ca 100644 --- a/crates/storage/db/src/static_file/mask.rs +++ b/crates/storage/db/src/static_file/mask.rs @@ -1,38 +1,5 @@ use reth_db_api::table::Decompress; -/// Generic Mask helper struct for selecting specific column values to read and decompress. -/// -/// #### Explanation: -/// -/// A `NippyJar` static file row can contain multiple column values. To specify the column values -/// to be read, a mask is utilized. -/// -/// For example, a static file with three columns, if the first and last columns are queried, the -/// mask `0b101` would be passed. To select only the second column, the mask `0b010` would be used. -/// -/// Since each static file has its own column distribution, different wrapper types are necessary. -/// For instance, `B256` might be the third column in the `Header` segment, while being the second -/// column in another segment. Hence, `Mask` would only be applicable to one of these -/// scenarios. -/// -/// Alongside, the column selector traits (eg. [`ColumnSelectorOne`]) this provides a structured way -/// to tie the types to be decoded to the mask necessary to query them. -#[derive(Debug)] -pub struct Mask(std::marker::PhantomData<(FIRST, SECOND, THIRD)>); - -macro_rules! add_segments { - ($($segment:tt),+) => { - paste::paste! { - $( - #[doc = concat!("Mask for ", stringify!($segment), " static file segment. See [`Mask`] for more.")] - #[derive(Debug)] - pub struct [<$segment Mask>](Mask); - )+ - } - }; -} -add_segments!(Header, Receipt, Transaction); - /// Trait for specifying a mask to select one column value. pub trait ColumnSelectorOne { /// First desired column value @@ -66,21 +33,45 @@ pub trait ColumnSelectorThree { #[macro_export] /// Add mask to select `N` column values from a specific static file segment row. macro_rules! add_static_file_mask { - ($mask_struct:tt, $type1:ty, $mask:expr) => { - impl ColumnSelectorOne for $mask_struct<$type1> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorOne for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; const MASK: usize = $mask; } }; - ($mask_struct:tt, $type1:ty, $type2:ty, $mask:expr) => { - impl ColumnSelectorTwo for $mask_struct<$type1, $type2> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $type2:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorTwo for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type2: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; type SECOND = $type2; const MASK: usize = $mask; } }; - ($mask_struct:tt, $type1:ty, $type2:ty, $type3:ty, $mask:expr) => { - impl ColumnSelectorThree for $mask_struct<$type1, $type2, $type3> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $type2:ty, $type3:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorThree for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type2: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type3: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; type SECOND = $type2; type THIRD = $type3; diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index 405606389ba..17833e7ee29 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -1,23 +1,44 @@ -use super::{ReceiptMask, TransactionMask}; use crate::{ add_static_file_mask, - static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask}, - HeaderTerminalDifficulties, RawValue, Receipts, Transactions, + static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo}, + HeaderTerminalDifficulties, }; -use alloy_consensus::Header; use alloy_primitives::BlockHash; use reth_db_api::table::Table; // HEADER MASKS -add_static_file_mask!(HeaderMask, Header, 0b001); -add_static_file_mask!(HeaderMask, ::Value, 0b010); -add_static_file_mask!(HeaderMask, BlockHash, 0b100); -add_static_file_mask!(HeaderMask, Header, BlockHash, 0b101); -add_static_file_mask!(HeaderMask, ::Value, BlockHash, 0b110); +add_static_file_mask! { + #[doc = "Mask for selecting a single header from Headers static file segment"] + HeaderMask, H, 0b001 +} +add_static_file_mask! { + #[doc = "Mask for selecting a total difficulty value from Headers static file segment"] + TotalDifficultyMask, ::Value, 0b010 +} +add_static_file_mask! { + #[doc = "Mask for selecting a block hash value from Headers static file segment"] + BlockHashMask, BlockHash, 0b100 +} +add_static_file_mask! { + #[doc = "Mask for selecting a header along with block hash from Headers static file segment"] + HeaderWithHashMask, H, BlockHash, 0b101 +} +add_static_file_mask! { + #[doc = "Mask for selecting a total difficulty along with block hash from Headers static file segment"] + TDWithHashMask, + ::Value, + BlockHash, + 0b110 +} // RECEIPT MASKS -add_static_file_mask!(ReceiptMask, ::Value, 0b1); +add_static_file_mask! { + #[doc = "Mask for selecting a single receipt from Receipts static file segment"] + ReceiptMask, R, 0b1 +} // TRANSACTION MASKS -add_static_file_mask!(TransactionMask, ::Value, 0b1); -add_static_file_mask!(TransactionMask, RawValue<::Value>, 0b1); +add_static_file_mask! { + #[doc = "Mask for selecting a single transaction from Transactions static file segment"] + TransactionMask, T, 0b1 +} diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index 071835f566b..8491bd6ed77 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -17,6 +17,7 @@ use reth_primitives::{ }; mod masks; +pub use masks::*; /// Alias type for a map of [`StaticFileSegment`] and sorted lists of existing static file ranges. type SortedStaticFiles = diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index b6fcee545d5..9e6720b8440 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,13 +1,9 @@ use crate::{db::DatabaseError, lockfile::StorageLockError, writer::UnifiedStorageWriterError}; -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256, U256}; -use derive_more::Display; -use reth_primitives::{GotExpected, StaticFileSegment, TxHashOrNumber}; - -#[cfg(feature = "std")] -use std::path::PathBuf; - use alloc::{boxed::Box, string::String}; +use alloy_eips::{BlockHashOrNumber, HashOrNumber}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; +use derive_more::Display; +use reth_primitives::{GotExpected, StaticFileSegment}; /// Provider result type. pub type ProviderResult = Result; @@ -66,12 +62,12 @@ pub enum ProviderError { /// when required header related data was not found but was required. #[display("no header found for {_0:?}")] HeaderNotFound(BlockHashOrNumber), - /// The specific transaction is missing. + /// The specific transaction identified by hash or id is missing. #[display("no transaction found for {_0:?}")] - TransactionNotFound(TxHashOrNumber), - /// The specific receipt is missing + TransactionNotFound(HashOrNumber), + /// The specific receipt for a transaction identified by hash or id is missing #[display("no receipt found for {_0:?}")] - ReceiptNotFound(TxHashOrNumber), + ReceiptNotFound(HashOrNumber), /// Unable to find the best block. #[display("best block does not exist")] BestBlockNotFound, @@ -81,15 +77,6 @@ pub enum ProviderError { /// Unable to find the safe block. #[display("safe block does not exist")] SafeBlockNotFound, - /// Mismatch of sender and transaction. - #[display("mismatch of sender and transaction id {tx_id}")] - MismatchOfTransactionAndSenderId { - /// The transaction ID. - tx_id: TxNumber, - }, - /// Block body wrong transaction count. - #[display("stored block indices does not match transaction count")] - BlockBodyTransactionCount, /// Thrown when the cache service task dropped. #[display("cache service task stopped")] CacheServiceUnavailable, @@ -120,7 +107,7 @@ pub enum ProviderError { /// Static File is not found at specified path. #[cfg(feature = "std")] #[display("not able to find {_0} static file at {_1:?}")] - MissingStaticFilePath(StaticFileSegment, PathBuf), + MissingStaticFilePath(StaticFileSegment, std::path::PathBuf), /// Static File is not found for requested block. #[display("not able to find {_0} static file for block number {_1}")] MissingStaticFileBlock(StaticFileSegment, BlockNumber), @@ -139,9 +126,6 @@ pub enum ProviderError { /// Static File Provider was initialized as read-only. #[display("cannot get a writer on a read-only environment.")] ReadOnlyStaticFileAccess, - /// Error encountered when the block number conversion from U256 to u64 causes an overflow. - #[display("failed to convert block number U256 to u64: {_0}")] - BlockNumberOverflow(U256), /// Consistent view error. #[display("failed to initialize consistent view: {_0}")] ConsistentView(Box), diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h index 43960abfb4c..dfcba66063a 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h @@ -136,7 +136,7 @@ are only a few cases of changing data. | _DELETING_||| |Key is absent → Error since no such key |\ref mdbx_del() or \ref mdbx_replace()|Error \ref MDBX_NOTFOUND| |Key exist → Delete by key |\ref mdbx_del() with the parameter `data = NULL`|Deletion| -|Key exist → Delete by key with with data matching check|\ref mdbx_del() with the parameter `data` filled with the value which should be match for deletion|Deletion or \ref MDBX_NOTFOUND if the value does not match| +|Key exist → Delete by key with data matching check|\ref mdbx_del() with the parameter `data` filled with the value which should be match for deletion|Deletion or \ref MDBX_NOTFOUND if the value does not match| |Delete at the current cursor position |\ref mdbx_cursor_del() with \ref MDBX_CURRENT flag|Deletion| |Extract (read & delete) value by the key |\ref mdbx_replace() with zero flag and parameter `new_data = NULL`|Returning a deleted value| @@ -5264,7 +5264,7 @@ LIBMDBX_API int mdbx_dbi_sequence(MDBX_txn *txn, MDBX_dbi dbi, uint64_t *result, * This returns a comparison as if the two data items were keys in the * specified database. * - * \warning There ss a Undefined behavior if one of arguments is invalid. + * \warning There is a Undefined behavior if one of arguments is invalid. * * \param [in] txn A transaction handle returned by \ref mdbx_txn_begin(). * \param [in] dbi A database handle returned by \ref mdbx_dbi_open(). diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index b1d174feb2c..98eddf22ee9 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -20,11 +20,6 @@ use std::{ ops::Range, path::{Path, PathBuf}, }; - -// Windows specific extension for std::fs -#[cfg(windows)] -use std::os::windows::prelude::OpenOptionsExt; - use tracing::*; /// Compression algorithms supported by `NippyJar`. diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 499536eb2cb..6f65d990dfd 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -17,6 +17,7 @@ reth-chainspec.workspace = true reth-blockchain-tree-api.workspace = true reth-execution-types.workspace = true reth-primitives = { workspace = true, features = ["reth-codec", "secp256k1"] } +reth-primitives-traits = { workspace = true, features = ["reth-codec"] } reth-fs-util.workspace = true reth-errors.workspace = true reth-storage-errors.workspace = true @@ -115,6 +116,8 @@ serde = [ "rand/serde", "revm/serde", "reth-codecs/serde", + "reth-optimism-primitives?/serde", + "reth-primitives-traits/serde", ] test-utils = [ "reth-db/test-utils", @@ -126,11 +129,13 @@ test-utils = [ "reth-evm/test-utils", "reth-network-p2p/test-utils", "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", "reth-codecs/test-utils", "reth-db-api/test-utils", "reth-trie-db/test-utils", "revm/test-utils", "reth-prune-types/test-utils", "reth-stages-types/test-utils", + "reth-optimism-primitives?/arbitrary", ] scroll = ["reth-scroll-primitives"] diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 083e7fb596b..967ac785b47 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -25,7 +25,7 @@ use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{NodeTypesWithDB, TxTy}; use reth_primitives::{ Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, @@ -331,29 +331,31 @@ impl BlockReader for BlockchainProvider2 { } impl TransactionsProvider for BlockchainProvider2 { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.consistent_provider()?.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.consistent_provider()?.transaction_by_id(id) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { - self.consistent_provider()?.transaction_by_id_no_hash(id) + ) -> ProviderResult> { + self.consistent_provider()?.transaction_by_id_unhashed(id) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.consistent_provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.consistent_provider()?.transaction_by_hash_with_meta(tx_hash) } @@ -364,21 +366,21 @@ impl TransactionsProvider for BlockchainProvider2 { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.consistent_provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.consistent_provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.consistent_provider()?.transactions_by_tx_range(range) } @@ -889,8 +891,7 @@ mod tests { static_file_provider.latest_writer(StaticFileSegment::Transactions)?; transactions_writer.increment_block(block.number)?; for tx in block.body.transactions() { - let tx: TransactionSignedNoHash = tx.clone().into(); - transactions_writer.append_transaction(tx_num, &tx)?; + transactions_writer.append_transaction(tx_num, tx)?; tx_num += 1; } @@ -2245,9 +2246,7 @@ mod tests { (transactions_by_tx_range, |block: &SealedBlock, _: &Vec>| block .body .transactions - .iter() - .map(|tx| Into::::into(tx.clone())) - .collect::>()), + .clone()), (receipts_by_tx_range, |block: &SealedBlock, receipts: &Vec>| receipts [block.number as usize] .clone()) @@ -2589,12 +2588,10 @@ mod tests { ), ( ONE, - transaction_by_id_no_hash, + transaction_by_id_unhashed, |block: &SealedBlock, tx_num: TxNumber, _: B256, _: &Vec>| ( tx_num, - Some(Into::::into( - block.body.transactions[test_tx_index].clone() - )) + Some(block.body.transactions[test_tx_index].clone()) ), u64::MAX ), diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 3b2599f4999..fc9d739b0fe 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -18,9 +18,10 @@ use reth_db::models::BlockNumberAddress; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; +use reth_node_types::TxTy; use reth_primitives::{ Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + StorageEntry, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -927,6 +928,8 @@ impl BlockReader for ConsistentProvider { } impl TransactionsProvider for ConsistentProvider { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( tx_hash.into(), @@ -935,23 +938,30 @@ impl TransactionsProvider for ConsistentProvider { ) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.transaction_by_id(id), |tx_index, _, block_state| { - Ok(block_state.block_ref().block().body.transactions.get(tx_index).cloned()) + Ok(block_state + .block_ref() + .block() + .body + .transactions + .get(tx_index) + .cloned() + .map(Into::into)) }, ) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( id.into(), - |provider| provider.transaction_by_id_no_hash(id), + |provider| provider.transaction_by_id_unhashed(id), |tx_index, _, block_state| { Ok(block_state .block_ref() @@ -965,9 +975,9 @@ impl TransactionsProvider for ConsistentProvider { ) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(tx) = self.head_block.as_ref().and_then(|b| b.transaction_on_chain(hash)) { - return Ok(Some(tx)) + return Ok(Some(tx.into())) } self.storage_provider.transaction_by_hash(hash) @@ -976,11 +986,11 @@ impl TransactionsProvider for ConsistentProvider { fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { if let Some((tx, meta)) = self.head_block.as_ref().and_then(|b| b.transaction_meta_on_chain(tx_hash)) { - return Ok(Some((tx, meta))) + return Ok(Some((tx.into(), meta))) } self.storage_provider.transaction_by_hash_with_meta(tx_hash) @@ -997,22 +1007,44 @@ impl TransactionsProvider for ConsistentProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( id, |provider| provider.transactions_by_block(id), - |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), + |block_state| { + Ok(Some( + block_state + .block_ref() + .block() + .body + .transactions + .iter() + .map(|tx| tx.clone().into()) + .collect(), + )) + }, ) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), + |block_state, _| { + Some( + block_state + .block_ref() + .block() + .body + .transactions + .iter() + .map(|tx| tx.clone().into()) + .collect(), + ) + }, |_| true, ) } @@ -1020,7 +1052,7 @@ impl TransactionsProvider for ConsistentProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_in_memory_or_storage_by_tx_range( range, |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), diff --git a/crates/storage/provider/src/providers/database/chain.rs b/crates/storage/provider/src/providers/database/chain.rs new file mode 100644 index 00000000000..8f9a6395a9d --- /dev/null +++ b/crates/storage/provider/src/providers/database/chain.rs @@ -0,0 +1,26 @@ +use crate::{providers::NodeTypes, DatabaseProvider}; +use reth_db::transaction::{DbTx, DbTxMut}; +use reth_node_types::FullNodePrimitives; +use reth_primitives::EthPrimitives; +use reth_storage_api::{ChainStorageWriter, EthStorage}; + +/// Trait that provides access to implementations of [`ChainStorage`] +pub trait ChainStorage: Send + Sync { + /// Provides access to the chain writer. + fn writer(&self) -> impl ChainStorageWriter, Primitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypes; +} + +impl ChainStorage for EthStorage { + fn writer( + &self, + ) -> impl ChainStorageWriter, EthPrimitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypes, + { + self + } +} diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index 7e9ee7202c0..4ee8f1ce5b1 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -22,14 +22,6 @@ impl Default for DurationsRecorder { } impl DurationsRecorder { - /// Saves the provided duration for future logging and instantly reports as a metric with - /// `action` label. - pub(crate) fn record_duration(&mut self, action: Action, duration: Duration) { - self.actions.push((action, duration)); - self.current_metrics.record_duration(action, duration); - self.latest = Some(self.start.elapsed()); - } - /// Records the duration since last record, saves it for future logging and instantly reports as /// a metric with `action` label. pub(crate) fn record_relative(&mut self, action: Action) { @@ -56,11 +48,6 @@ pub(crate) enum Action { InsertHeaders, InsertHeaderNumbers, InsertHeaderTerminalDifficulties, - InsertBlockOmmers, - InsertTransactionSenders, - InsertTransactions, - InsertTransactionHashNumbers, - InsertBlockWithdrawals, InsertBlockBodyIndices, InsertTransactionBlocks, GetNextTxNum, @@ -95,16 +82,6 @@ struct DatabaseProviderMetrics { insert_header_numbers: Histogram, /// Duration of insert header TD insert_header_td: Histogram, - /// Duration of insert block ommers - insert_block_ommers: Histogram, - /// Duration of insert tx senders - insert_tx_senders: Histogram, - /// Duration of insert transactions - insert_transactions: Histogram, - /// Duration of insert transaction hash numbers - insert_tx_hash_numbers: Histogram, - /// Duration of insert block withdrawals - insert_block_withdrawals: Histogram, /// Duration of insert block body indices insert_block_body_indices: Histogram, /// Duration of insert transaction blocks @@ -131,11 +108,6 @@ impl DatabaseProviderMetrics { Action::InsertHeaders => self.insert_headers.record(duration), Action::InsertHeaderNumbers => self.insert_header_numbers.record(duration), Action::InsertHeaderTerminalDifficulties => self.insert_header_td.record(duration), - Action::InsertBlockOmmers => self.insert_block_ommers.record(duration), - Action::InsertTransactionSenders => self.insert_tx_senders.record(duration), - Action::InsertTransactions => self.insert_transactions.record(duration), - Action::InsertTransactionHashNumbers => self.insert_tx_hash_numbers.record(duration), - Action::InsertBlockWithdrawals => self.insert_block_withdrawals.record(duration), Action::InsertBlockBodyIndices => self.insert_block_body_indices.record(duration), Action::InsertTransactionBlocks => self.insert_tx_blocks.record(duration), Action::GetNextTxNum => self.get_next_tx_num.record(duration), diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 94c83bbb442..57f09e72306 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -19,10 +19,10 @@ use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{NodeTypesWithDB, TxTy}; use reth_primitives::{ Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + StaticFileSegment, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -44,6 +44,9 @@ use super::ProviderNodeTypes; mod metrics; +mod chain; +pub use chain::*; + /// A common provider that fetches data from a database or static file. /// /// This provider implements most provider or provider factory traits. @@ -56,19 +59,22 @@ pub struct ProviderFactory { static_file_provider: StaticFileProvider, /// Optional pruning configuration prune_modes: PruneModes, + /// The node storage handler. + storage: Arc, } impl fmt::Debug for ProviderFactory where - N: NodeTypesWithDB, + N: NodeTypesWithDB, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let Self { db, chain_spec, static_file_provider, prune_modes } = self; + let Self { db, chain_spec, static_file_provider, prune_modes, storage } = self; f.debug_struct("ProviderFactory") .field("db", &db) .field("chain_spec", &chain_spec) .field("static_file_provider", &static_file_provider) .field("prune_modes", &prune_modes) + .field("storage", &storage) .finish() } } @@ -80,7 +86,13 @@ impl ProviderFactory { chain_spec: Arc, static_file_provider: StaticFileProvider, ) -> Self { - Self { db, chain_spec, static_file_provider, prune_modes: PruneModes::none() } + Self { + db, + chain_spec, + static_file_provider, + prune_modes: PruneModes::none(), + storage: Default::default(), + } } /// Enables metrics on the static file provider. @@ -121,6 +133,7 @@ impl>> ProviderFactory { chain_spec, static_file_provider, prune_modes: PruneModes::none(), + storage: Default::default(), }) } } @@ -139,6 +152,7 @@ impl ProviderFactory { self.chain_spec.clone(), self.static_file_provider.clone(), self.prune_modes.clone(), + self.storage.clone(), )) } @@ -153,6 +167,7 @@ impl ProviderFactory { self.chain_spec.clone(), self.static_file_provider.clone(), self.prune_modes.clone(), + self.storage.clone(), ))) } @@ -405,11 +420,13 @@ impl BlockReader for ProviderFactory { } impl TransactionsProvider for ProviderFactory { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.provider()?.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, @@ -418,26 +435,26 @@ impl TransactionsProvider for ProviderFactory { ) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, - |static_file| static_file.transaction_by_id_no_hash(id), - || self.provider()?.transaction_by_id_no_hash(id), + |static_file| static_file.transaction_by_id_unhashed(id), + || self.provider()?.transaction_by_id_unhashed(id), ) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.provider()?.transaction_by_hash_with_meta(tx_hash) } @@ -448,21 +465,21 @@ impl TransactionsProvider for ProviderFactory { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.provider()?.transactions_by_tx_range(range) } @@ -617,6 +634,7 @@ impl Clone for ProviderFactory { chain_spec: self.chain_spec.clone(), static_file_provider: self.static_file_provider.clone(), prune_modes: self.prune_modes.clone(), + storage: self.storage.clone(), } } } @@ -628,7 +646,7 @@ mod tests { providers::{StaticFileProvider, StaticFileWriter}, test_utils::{blocks::TEST_BLOCK, create_test_provider_factory, MockNodeTypesWithDB}, BlockHashReader, BlockNumReader, BlockWriter, DBProvider, HeaderSyncGapProvider, - TransactionsProvider, + StorageLocation, TransactionsProvider, }; use alloy_primitives::{TxNumber, B256, U256}; use assert_matches::assert_matches; @@ -699,14 +717,20 @@ mod tests { { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap()), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); assert_matches!( provider.transaction_sender(0), Ok(Some(sender)) if sender == block.body.transactions[0].recover_signer().unwrap() ); - assert_matches!(provider.transaction_id(block.body.transactions[0].hash), Ok(Some(0))); + assert_matches!( + provider.transaction_id(block.body.transactions[0].hash()), + Ok(Some(0)) + ); } { @@ -717,11 +741,14 @@ mod tests { }; let provider = factory.with_prune_modes(prune_modes).provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap(),), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); assert_matches!(provider.transaction_sender(0), Ok(None)); - assert_matches!(provider.transaction_id(block.body.transactions[0].hash), Ok(None)); + assert_matches!(provider.transaction_id(block.body.transactions[0].hash()), Ok(None)); } } @@ -738,7 +765,10 @@ mod tests { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap()), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); @@ -756,21 +786,6 @@ mod tests { let db_senders = provider.senders_by_tx_range(range); assert_eq!(db_senders, Ok(vec![])); - - let result = provider.take_block_transaction_range(0..=0); - assert_eq!( - result, - Ok(vec![( - 0, - block - .body - .transactions - .iter() - .cloned() - .map(|tx| tx.into_ecrecovered().unwrap()) - .collect() - )]) - ) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index b93112e7084..bf976203726 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,18 +1,23 @@ use crate::{ bundle_state::StorageRevertsIter, - providers::{database::metrics, static_file::StaticFileWriter, StaticFileProvider}, + providers::{ + database::{chain::ChainStorage, metrics}, + static_file::StaticFileWriter, + NodeTypesForProvider, StaticFileProvider, + }, to_range, traits::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, writer::UnifiedStorageWriter, - AccountReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, - BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, EvmEnvProvider, - HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, - HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, - StageCheckpointReader, StateChangeWriter, StateProviderBox, StateReader, StateWriter, - StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, + AccountReader, BlockBodyWriter, BlockExecutionWriter, BlockHashReader, BlockNumReader, + BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, + DBProvider, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, + HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, + LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, + PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, + StateChangeWriter, StateProviderBox, StateReader, StateWriter, StaticFileProviderFactory, + StatsReader, StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; use alloy_consensus::Header; @@ -21,7 +26,7 @@ use alloy_eips::{ BlockHashOrNumber, }; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; -use itertools::{izip, Itertools}; +use itertools::Itertools; use rayon::slice::ParallelSliceMut; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_db::{ @@ -32,21 +37,22 @@ use reth_db_api::{ database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - ShardedKey, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, + ShardedKey, StoredBlockBodyIndices, }, table::Table, transaction::{DbTx, DbTxMut}, - DatabaseError, DbTxUnwindExt, + DatabaseError, }; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; -use reth_node_types::NodeTypes; +use reth_node_types::{NodeTypes, TxTy}; use reth_primitives::{ - Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, - TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, + Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, NodePrimitives, Receipt, + SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, + TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; +use reth_primitives_traits::{BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider}; @@ -67,10 +73,9 @@ use std::{ fmt::Debug, ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::{mpsc, Arc}, - time::{Duration, Instant}, }; use tokio::sync::watch; -use tracing::{debug, error, trace, warn}; +use tracing::{debug, trace}; /// A [`DatabaseProvider`] that holds a read-only database transaction. pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; @@ -138,6 +143,8 @@ pub struct DatabaseProvider { static_file_provider: StaticFileProvider, /// Pruning configuration prune_modes: PruneModes, + /// Node storage handler. + storage: Arc, } impl DatabaseProvider { @@ -224,8 +231,9 @@ impl DatabaseProvider { chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, + storage: Arc, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes } + Self { tx, chain_spec, static_file_provider, prune_modes, storage } } } @@ -235,6 +243,95 @@ impl AsRef for DatabaseProvider { } } +impl DatabaseProvider { + /// Unwinds trie state for the given range. + /// + /// This includes calculating the resulted state root and comparing it with the parent block + /// state root. + pub fn unwind_trie_state_range( + &self, + range: RangeInclusive, + ) -> ProviderResult<()> { + let changed_accounts = self + .tx + .cursor_read::()? + .walk_range(range.clone())? + .collect::, _>>()?; + + // Unwind account hashes. Add changed accounts to account prefix set. + let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; + let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); + let mut destroyed_accounts = HashSet::default(); + for (hashed_address, account) in hashed_addresses { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + if account.is_none() { + destroyed_accounts.insert(hashed_address); + } + } + + // Unwind account history indices. + self.unwind_account_history_indices(changed_accounts.iter())?; + let storage_range = BlockNumberAddress::range(range.clone()); + + let changed_storages = self + .tx + .cursor_read::()? + .walk_range(storage_range)? + .collect::, _>>()?; + + // Unwind storage hashes. Add changed account and storage keys to corresponding prefix + // sets. + let mut storage_prefix_sets = HashMap::::default(); + let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; + for (hashed_address, hashed_slots) in storage_entries { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); + for slot in hashed_slots { + storage_prefix_set.insert(Nibbles::unpack(slot)); + } + storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); + } + + // Unwind storage history indices. + self.unwind_storage_history_indices(changed_storages.iter().copied())?; + + // Calculate the reverted merkle root. + // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets + // are pre-loaded. + let prefix_sets = TriePrefixSets { + account_prefix_set: account_prefix_set.freeze(), + storage_prefix_sets, + destroyed_accounts, + }; + let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) + .with_prefix_sets(prefix_sets) + .root_with_updates() + .map_err(Into::::into)?; + + let parent_number = range.start().saturating_sub(1); + let parent_state_root = self + .header_by_number(parent_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? + .state_root; + + // state root should be always correct as we are reverting state. + // but for sake of double verification we will check it again. + if new_state_root != parent_state_root { + let parent_hash = self + .block_hash(parent_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; + return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { + root: GotExpected { got: new_state_root, expected: parent_state_root }, + block_number: parent_number, + block_hash: parent_hash, + }))) + } + self.write_trie_updates(&trie_updates)?; + + Ok(()) + } +} + impl TryIntoHistoricalStateProvider for DatabaseProvider { fn try_into_history_at_block( self, @@ -277,15 +374,13 @@ impl TryIntoHistoricalStateProvider for Databa } } -impl + 'static> - DatabaseProvider -{ +impl DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] /// Inserts an historical block. **Used for setting up test environments** pub fn insert_historical_block( &self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders::Body>, ) -> ProviderResult { let ttd = if block.number == 0 { block.difficulty @@ -309,7 +404,7 @@ impl + writer.append_header(block.header.as_ref(), ttd, &block.hash())?; - self.insert_block(block) + self.insert_block(block, StorageLocation::Database) } } @@ -367,8 +462,9 @@ impl DatabaseProvider { chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, + storage: Arc, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes } + Self { tx, chain_spec, static_file_provider, prune_modes, storage } } /// Consume `DbTx` or `DbTxMut`. @@ -390,14 +486,16 @@ impl DatabaseProvider { pub fn chain_spec(&self) -> &N::ChainSpec { &self.chain_spec } +} +impl DatabaseProvider { fn transactions_by_tx_range_with_cursor( &self, range: impl RangeBounds, cursor: &mut C, - ) -> ProviderResult> + ) -> ProviderResult>> where - C: DbCursorRO, + C: DbCursorRO>>, { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Transactions, @@ -411,7 +509,7 @@ impl DatabaseProvider { fn block_with_senders( &self, id: BlockHashOrNumber, - transaction_kind: TransactionVariant, + _transaction_kind: TransactionVariant, header_by_number: HF, construct_block: BF, ) -> ProviderResult> @@ -450,18 +548,7 @@ impl DatabaseProvider { (self.transactions_by_tx_range(tx_range.clone())?, self.senders_by_tx_range(tx_range)?) }; - let body = transactions - .into_iter() - .map(|tx| match transaction_kind { - TransactionVariant::NoHash => TransactionSigned { - // Caller explicitly asked for no hash, so we don't calculate it - hash: B256::ZERO, - signature: tx.signature, - transaction: tx.transaction, - }, - TransactionVariant::WithHash => tx.with_hash(), - }) - .collect(); + let body = transactions.into_iter().map(Into::into).collect(); construct_block(header, body, senders, ommers, withdrawals) } @@ -570,7 +657,7 @@ impl DatabaseProvider { Vec
, ) -> ProviderResult, { - let mut tx_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; let mut senders_cursor = self.tx.cursor_read::()?; self.block_range(range, headers_range, |header, tx_range, ommers, withdrawals| { @@ -785,276 +872,6 @@ impl DatabaseProvider { Ok(self.tx.commit()?) } - /// Remove requested block transactions, without returning them. - /// - /// This will remove block data for the given range from the following tables: - /// * [`BlockBodyIndices`](tables::BlockBodyIndices) - /// * [`Transactions`](tables::Transactions) - /// * [`TransactionSenders`](tables::TransactionSenders) - /// * [`TransactionHashNumbers`](tables::TransactionHashNumbers) - /// * [`TransactionBlocks`](tables::TransactionBlocks) - pub fn remove_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult<()> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.take::(range)?; - - if block_bodies.is_empty() { - return Ok(()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(()) - } - - // Get transactions so we can then remove - let transactions = self - .take::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - // remove senders - self.remove::(first_transaction..=last_transaction)?; - - // Remove TransactionHashNumbers - let mut tx_hash_cursor = self.tx.cursor_write::()?; - for (_, tx) in &transactions { - if tx_hash_cursor.seek_exact(tx.hash())?.is_some() { - tx_hash_cursor.delete_current()?; - } - } - - // Remove TransactionBlocks index if there are transaction present - if !transactions.is_empty() { - let tx_id_range = transactions.first().unwrap().0..=transactions.last().unwrap().0; - self.remove::(tx_id_range)?; - } - - Ok(()) - } - - /// Get requested blocks transaction with senders, also removing them from the database - /// - /// This will remove block data for the given range from the following tables: - /// * [`BlockBodyIndices`](tables::BlockBodyIndices) - /// * [`Transactions`](tables::Transactions) - /// * [`TransactionSenders`](tables::TransactionSenders) - /// * [`TransactionHashNumbers`](tables::TransactionHashNumbers) - /// * [`TransactionBlocks`](tables::TransactionBlocks) - pub fn take_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult)>> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.get::(range)?; - - if block_bodies.is_empty() { - return Ok(Vec::new()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(block_bodies.into_iter().map(|(n, _)| (n, Vec::new())).collect()) - } - - // Get transactions and senders - let transactions = self - .take::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - let mut senders = - self.take::(first_transaction..=last_transaction)?; - - recover_block_senders(&mut senders, &transactions, first_transaction, last_transaction)?; - - // Remove TransactionHashNumbers - let mut tx_hash_cursor = self.tx.cursor_write::()?; - for (_, tx) in &transactions { - if tx_hash_cursor.seek_exact(tx.hash())?.is_some() { - tx_hash_cursor.delete_current()?; - } - } - - // Remove TransactionBlocks index if there are transaction present - if !transactions.is_empty() { - let tx_id_range = transactions.first().unwrap().0..=transactions.last().unwrap().0; - self.remove::(tx_id_range)?; - } - - // Merge transaction into blocks - let mut block_tx = Vec::with_capacity(block_bodies.len()); - let mut senders = senders.into_iter(); - let mut transactions = transactions.into_iter(); - for (block_number, block_body) in block_bodies { - let mut one_block_tx = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - let tx = transactions.next(); - let sender = senders.next(); - - let recovered = match (tx, sender) { - (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { - if tx_id == sender_tx_id { - Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) - } else { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - } - (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - (None, None) => Err(ProviderError::BlockBodyTransactionCount), - }?; - one_block_tx.push(recovered) - } - block_tx.push((block_number, one_block_tx)); - } - - Ok(block_tx) - } - - /// Remove the given range of blocks, without returning any of the blocks. - /// - /// This will remove block data for the given range from the following tables: - /// * [`HeaderNumbers`](tables::HeaderNumbers) - /// * [`CanonicalHeaders`](tables::CanonicalHeaders) - /// * [`BlockOmmers`](tables::BlockOmmers) - /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) - /// - /// This will also remove transaction data according to - /// [`remove_block_transaction_range`](Self::remove_block_transaction_range). - pub fn remove_block_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult<()> { - let block_headers = self.remove::(range.clone())?; - if block_headers == 0 { - return Ok(()) - } - - self.tx.unwind_table_by_walker::( - range.clone(), - )?; - self.remove::(range.clone())?; - self.remove::(range.clone())?; - self.remove::(range.clone())?; - self.remove_block_transaction_range(range.clone())?; - self.remove::(range)?; - - Ok(()) - } - - /// Remove the given range of blocks, and return them. - /// - /// This will remove block data for the given range from the following tables: - /// * [`HeaderNumbers`](tables::HeaderNumbers) - /// * [`CanonicalHeaders`](tables::CanonicalHeaders) - /// * [`BlockOmmers`](tables::BlockOmmers) - /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) - /// - /// This will also remove transaction data according to - /// [`take_block_transaction_range`](Self::take_block_transaction_range). - pub fn take_block_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult> - where - N::ChainSpec: EthereumHardforks, - { - // For blocks we need: - // - // - Headers - // - Bodies (transactions) - // - Uncles/ommers - // - Withdrawals - // - Signers - - let block_headers = self.take::(range.clone())?; - if block_headers.is_empty() { - return Ok(Vec::new()) - } - - self.tx.unwind_table_by_walker::( - range.clone(), - )?; - let block_header_hashes = self.take::(range.clone())?; - let block_ommers = self.take::(range.clone())?; - let block_withdrawals = self.take::(range.clone())?; - let block_tx = self.take_block_transaction_range(range.clone())?; - - let mut blocks = Vec::with_capacity(block_headers.len()); - - // rm HeaderTerminalDifficulties - self.remove::(range)?; - - // merge all into block - let block_header_iter = block_headers.into_iter(); - let block_header_hashes_iter = block_header_hashes.into_iter(); - let block_tx_iter = block_tx.into_iter(); - - // Ommers can be empty for some blocks - let mut block_ommers_iter = block_ommers.into_iter(); - let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_ommers = block_ommers_iter.next(); - let mut block_withdrawals = block_withdrawals_iter.next(); - - for ((main_block_number, header), (_, header_hash), (_, tx)) in - izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) - { - let header = SealedHeader::new(header, header_hash); - - let (transactions, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip(); - - // Ommers can be missing - let mut ommers = Vec::new(); - if let Some((block_number, _)) = block_ommers.as_ref() { - if *block_number == main_block_number { - ommers = block_ommers.take().unwrap().1.ommers; - block_ommers = block_ommers_iter.next(); - } - }; - - // withdrawal can be missing - let shanghai_is_active = - self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp); - let mut withdrawals = Some(Withdrawals::default()); - if shanghai_is_active { - if let Some((block_number, _)) = block_withdrawals.as_ref() { - if *block_number == main_block_number { - withdrawals = Some(block_withdrawals.take().unwrap().1.withdrawals); - block_withdrawals = block_withdrawals_iter.next(); - } - } - } else { - withdrawals = None - } - - blocks.push(SealedBlockWithSenders { - block: SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals }, - }, - senders, - }) - } - - Ok(blocks) - } - /// Load shard and remove it. If list is empty, last shard was full or /// there are no shards at all. fn take_shard(&self, key: T::Key) -> ProviderResult> @@ -1396,9 +1213,7 @@ impl BlockNumReader for DatabaseProvider> BlockReader - for DatabaseProvider -{ +impl BlockReader for DatabaseProvider { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { if source.is_canonical() { self.block(hash.into()) @@ -1422,7 +1237,7 @@ impl> BlockReader // If they exist but are not indexed, we don't have enough // information to return the block anyways, so we return `None`. let transactions = match self.transactions_by_block(number.into())? { - Some(transactions) => transactions, + Some(transactions) => transactions.into_iter().map(Into::into).collect(), None => return Ok(None), }; @@ -1522,7 +1337,7 @@ impl> BlockReader } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - let mut tx_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; self.block_range( range, |range| self.headers_range(range), @@ -1573,7 +1388,7 @@ impl> BlockReader } } -impl> TransactionsProviderExt +impl TransactionsProviderExt for DatabaseProvider { /// Recovers transaction hashes by walking through `Transactions` table and @@ -1643,59 +1458,49 @@ impl> Transaction } // Calculates the hash of the given transaction -impl> TransactionsProvider - for DatabaseProvider -{ +impl TransactionsProvider for DatabaseProvider { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { Ok(self.tx.get::(tx_hash)?) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, |static_file| static_file.transaction_by_id(id), - || Ok(self.tx.get::(id)?.map(Into::into)), + || Ok(self.tx.get::>(id)?), ) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, - |static_file| static_file.transaction_by_id_no_hash(id), - || Ok(self.tx.get::(id)?), + |static_file| static_file.transaction_by_id_unhashed(id), + || Ok(self.tx.get::>(id)?), ) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { - Ok(self.transaction_by_id_no_hash(id)?.map(|tx| TransactionSigned { - hash, - signature: tx.signature, - transaction: tx.transaction, - })) + Ok(self.transaction_by_id_unhashed(id)?) } else { Ok(None) } - .map(|tx| tx.map(Into::into)) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { let mut transaction_cursor = self.tx.cursor_read::()?; if let Some(transaction_id) = self.transaction_id(tx_hash)? { - if let Some(tx) = self.transaction_by_id_no_hash(transaction_id)? { - let transaction = TransactionSigned { - hash: tx_hash, - signature: tx.signature, - transaction: tx.transaction, - }; + if let Some(transaction) = self.transaction_by_id_unhashed(transaction_id)? { if let Some(block_number) = transaction_cursor.seek(transaction_id).map(|b| b.map(|(_, bn)| bn))? { @@ -1736,8 +1541,8 @@ impl> Transaction fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { - let mut tx_cursor = self.tx.cursor_read::()?; + ) -> ProviderResult>> { + let mut tx_cursor = self.tx.cursor_read::>()?; if let Some(block_number) = self.convert_hash_or_number(id)? { if let Some(body) = self.block_body_indices(block_number)? { @@ -1745,12 +1550,7 @@ impl> Transaction return if tx_range.is_empty() { Ok(Some(Vec::new())) } else { - Ok(Some( - self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect(), - )) + Ok(Some(self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)?)) } } } @@ -1760,8 +1560,8 @@ impl> Transaction fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { - let mut tx_cursor = self.tx.cursor_read::()?; + ) -> ProviderResult>> { + let mut tx_cursor = self.tx.cursor_read::>()?; let mut results = Vec::new(); let mut body_cursor = self.tx.cursor_read::()?; for entry in body_cursor.walk_range(range)? { @@ -1773,7 +1573,6 @@ impl> Transaction results.push( self.transactions_by_tx_range_with_cursor(tx_num_range, &mut tx_cursor)? .into_iter() - .map(Into::into) .collect(), ); } @@ -1784,10 +1583,10 @@ impl> Transaction fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.transactions_by_tx_range_with_cursor( range, - &mut self.tx.cursor_read::()?, + &mut self.tx.cursor_read::>()?, ) } @@ -1803,9 +1602,7 @@ impl> Transaction } } -impl> ReceiptProvider - for DatabaseProvider -{ +impl ReceiptProvider for DatabaseProvider { fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Receipts, @@ -2070,7 +1867,9 @@ impl StorageReader for DatabaseProvider } } -impl StateChangeWriter for DatabaseProvider { +impl StateChangeWriter + for DatabaseProvider +{ fn write_state_reverts( &self, reverts: PlainStateReverts, @@ -2893,218 +2692,66 @@ impl HistoryWriter for DatabaseProvi } } -impl StateReader for DatabaseProvider { +impl StateReader for DatabaseProvider { fn get_state(&self, block: BlockNumber) -> ProviderResult> { self.get_state(block..=block) } } -impl + 'static> - BlockExecutionWriter for DatabaseProvider +impl BlockExecutionWriter + for DatabaseProvider { - fn take_block_and_execution_range( + fn take_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_transactions_from: StorageLocation, ) -> ProviderResult { - let changed_accounts = self - .tx - .cursor_read::()? - .walk_range(range.clone())? - .collect::, _>>()?; + let range = block + 1..=self.last_block_number()?; - // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; - let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account.is_none() { - destroyed_accounts.insert(hashed_address); - } - } - - // Unwind account history indices. - self.unwind_account_history_indices(changed_accounts.iter())?; - let storage_range = BlockNumberAddress::range(range.clone()); - - let changed_storages = self - .tx - .cursor_read::()? - .walk_range(storage_range)? - .collect::, _>>()?; - - // Unwind storage hashes. Add changed account and storage keys to corresponding prefix - // sets. - let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; - for (hashed_address, hashed_slots) in storage_entries { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); - for slot in hashed_slots { - storage_prefix_set.insert(Nibbles::unpack(slot)); - } - storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); - } - - // Unwind storage history indices. - self.unwind_storage_history_indices(changed_storages.iter().copied())?; - - // Calculate the reverted merkle root. - // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets - // are pre-loaded. - let prefix_sets = TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - }; - let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(Into::::into)?; - - let parent_number = range.start().saturating_sub(1); - let parent_state_root = self - .header_by_number(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? - .state_root; - - // state root should be always correct as we are reverting state. - // but for sake of double verification we will check it again. - if new_state_root != parent_state_root { - let parent_hash = self - .block_hash(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; - return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: new_state_root, expected: parent_state_root }, - block_number: parent_number, - block_hash: parent_hash, - }))) - } - self.write_trie_updates(&trie_updates)?; - - // get blocks - let blocks = self.take_block_range(range.clone())?; - let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); + self.unwind_trie_state_range(range.clone())?; // get execution res let execution_state = self.take_state(range.clone())?; + let blocks = self.sealed_block_with_senders_range(range)?; + // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove::(range)?; + self.remove_blocks_above(block, remove_transactions_from)?; // Update pipeline progress - if let Some(fork_number) = unwind_to { - self.update_pipeline_stages(fork_number, true)?; - } + self.update_pipeline_stages(block, true)?; Ok(Chain::new(blocks, execution_state, None)) } - fn remove_block_and_execution_range( + fn remove_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_transactions_from: StorageLocation, ) -> ProviderResult<()> { - let changed_accounts = self - .tx - .cursor_read::()? - .walk_range(range.clone())? - .collect::, _>>()?; - - // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; - let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account.is_none() { - destroyed_accounts.insert(hashed_address); - } - } - - // Unwind account history indices. - self.unwind_account_history_indices(changed_accounts.iter())?; + let range = block + 1..=self.last_block_number()?; - let storage_range = BlockNumberAddress::range(range.clone()); - let changed_storages = self - .tx - .cursor_read::()? - .walk_range(storage_range)? - .collect::, _>>()?; - - // Unwind storage hashes. Add changed account and storage keys to corresponding prefix - // sets. - let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; - for (hashed_address, hashed_slots) in storage_entries { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); - for slot in hashed_slots { - storage_prefix_set.insert(Nibbles::unpack(slot)); - } - storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); - } - - // Unwind storage history indices. - self.unwind_storage_history_indices(changed_storages.iter().copied())?; - - // Calculate the reverted merkle root. - // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets - // are pre-loaded. - let prefix_sets = TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - }; - let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(Into::::into)?; - - let parent_number = range.start().saturating_sub(1); - let parent_state_root = self - .header_by_number(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? - .state_root; - - // state root should be always correct as we are reverting state. - // but for sake of double verification we will check it again. - if new_state_root != parent_state_root { - let parent_hash = self - .block_hash(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; - return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: new_state_root, expected: parent_state_root }, - block_number: parent_number, - block_hash: parent_hash, - }))) - } - self.write_trie_updates(&trie_updates)?; - - // get blocks - let blocks = self.take_block_range(range.clone())?; - let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); + self.unwind_trie_state_range(range.clone())?; // remove execution res - self.remove_state(range.clone())?; + self.remove_state(range)?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove::(range)?; + self.remove_blocks_above(block, remove_transactions_from)?; // Update pipeline progress - if let Some(block_number) = unwind_to { - self.update_pipeline_stages(block_number, true)?; - } + self.update_pipeline_stages(block, true)?; Ok(()) } } -impl + 'static> BlockWriter +impl BlockWriter for DatabaseProvider { - type Body = BlockBody; + type Body = <::Block as reth_primitives_traits::Block>::Body; /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) @@ -3128,7 +2775,8 @@ impl + /// [`TransactionHashNumbers`](tables::TransactionHashNumbers). fn insert_block( &self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, + write_transactions_to: StorageLocation, ) -> ProviderResult { let block_number = block.number; @@ -3157,15 +2805,6 @@ impl + self.tx.put::(block_number, ttd.into())?; durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties); - // insert body ommers data - if !block.body.ommers.is_empty() { - self.tx.put::( - block_number, - StoredBlockOmmers { ommers: block.block.body.ommers }, - )?; - durations_recorder.record_relative(metrics::Action::InsertBlockOmmers); - } - let mut next_tx_num = self .tx .cursor_read::()? @@ -3175,84 +2814,28 @@ impl + durations_recorder.record_relative(metrics::Action::GetNextTxNum); let first_tx_num = next_tx_num; - let tx_count = block.block.body.transactions.len() as u64; + let tx_count = block.block.body.transactions().len() as u64; // Ensures we have all the senders for the block's transactions. - let mut tx_senders_elapsed = Duration::default(); - let mut transactions_elapsed = Duration::default(); - let mut tx_hash_numbers_elapsed = Duration::default(); - for (transaction, sender) in - block.block.body.transactions.into_iter().zip(block.senders.iter()) + block.block.body.transactions().iter().zip(block.senders.iter()) { - let hash = transaction.hash(); - - if self - .prune_modes - .sender_recovery - .as_ref() - .filter(|prune_mode| prune_mode.is_full()) - .is_none() - { - let start = Instant::now(); - self.tx.put::(next_tx_num, *sender)?; - tx_senders_elapsed += start.elapsed(); - } + let hash = transaction.tx_hash(); - let start = Instant::now(); - self.tx.put::(next_tx_num, transaction.into())?; - let elapsed = start.elapsed(); - if elapsed > Duration::from_secs(1) { - warn!( - target: "providers::db", - ?block_number, - tx_num = %next_tx_num, - hash = %hash, - ?elapsed, - "Transaction insertion took too long" - ); + if self.prune_modes.sender_recovery.as_ref().is_none_or(|m| !m.is_full()) { + self.tx.put::(next_tx_num, *sender)?; } - transactions_elapsed += elapsed; - if self - .prune_modes - .transaction_lookup - .filter(|prune_mode| prune_mode.is_full()) - .is_none() - { - let start = Instant::now(); - self.tx.put::(hash, next_tx_num)?; - tx_hash_numbers_elapsed += start.elapsed(); + if self.prune_modes.transaction_lookup.is_none_or(|m| !m.is_full()) { + self.tx.put::(*hash, next_tx_num)?; } next_tx_num += 1; } - durations_recorder - .record_duration(metrics::Action::InsertTransactionSenders, tx_senders_elapsed); - durations_recorder - .record_duration(metrics::Action::InsertTransactions, transactions_elapsed); - durations_recorder.record_duration( - metrics::Action::InsertTransactionHashNumbers, - tx_hash_numbers_elapsed, - ); - - if let Some(withdrawals) = block.block.body.withdrawals { - if !withdrawals.is_empty() { - self.tx.put::( - block_number, - StoredBlockWithdrawals { withdrawals }, - )?; - durations_recorder.record_relative(metrics::Action::InsertBlockWithdrawals); - } - } - - let block_indices = StoredBlockBodyIndices { first_tx_num, tx_count }; - self.tx.put::(block_number, block_indices)?; - durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); - if !block_indices.is_empty() { - self.tx.put::(block_indices.last_tx_num(), block_number)?; - durations_recorder.record_relative(metrics::Action::InsertTransactionBlocks); - } + self.append_block_bodies( + vec![(block_number, Some(block.block.body))], + write_transactions_to, + )?; debug!( target: "providers::db", @@ -3261,57 +2844,180 @@ impl + "Inserted block" ); - Ok(block_indices) + Ok(StoredBlockBodyIndices { first_tx_num, tx_count }) } fn append_block_bodies( &self, - bodies: impl Iterator)>, + bodies: Vec<(BlockNumber, Option)>, + write_transactions_to: StorageLocation, ) -> ProviderResult<()> { + let Some(from_block) = bodies.first().map(|(block, _)| *block) else { return Ok(()) }; + + // Initialize writer if we will be writing transactions to staticfiles + let mut tx_static_writer = write_transactions_to + .static_files() + .then(|| { + self.static_file_provider.get_writer(from_block, StaticFileSegment::Transactions) + }) + .transpose()?; + let mut block_indices_cursor = self.tx.cursor_write::()?; let mut tx_block_cursor = self.tx.cursor_write::()?; - let mut ommers_cursor = self.tx.cursor_write::()?; - let mut withdrawals_cursor = self.tx.cursor_write::()?; + + // Initialize cursor if we will be writing transactions to database + let mut tx_cursor = write_transactions_to + .database() + .then(|| { + self.tx.cursor_write::::Transaction, + >>() + }) + .transpose()?; // Get id for the next tx_num of zero if there are no transactions. let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); - for (block_number, body) in bodies { - let tx_count = body.as_ref().map(|b| b.transactions.len() as u64).unwrap_or_default(); + for (block_number, body) in &bodies { + // Increment block on static file header. + if let Some(writer) = tx_static_writer.as_mut() { + writer.increment_block(*block_number)?; + } + + let tx_count = body.as_ref().map(|b| b.transactions().len() as u64).unwrap_or_default(); let block_indices = StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count }; + let mut durations_recorder = metrics::DurationsRecorder::default(); + // insert block meta - block_indices_cursor.append(block_number, block_indices)?; + block_indices_cursor.append(*block_number, block_indices)?; + + durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); - next_tx_num += tx_count; let Some(body) = body else { continue }; // write transaction block index - if !body.transactions.is_empty() { - tx_block_cursor.append(block_indices.last_tx_num(), block_number)?; + if !body.transactions().is_empty() { + tx_block_cursor.append(block_indices.last_tx_num(), *block_number)?; + durations_recorder.record_relative(metrics::Action::InsertTransactionBlocks); } - // Write ommers if any - if !body.ommers.is_empty() { - ommers_cursor.append(block_number, StoredBlockOmmers { ommers: body.ommers })?; + // write transactions + for transaction in body.transactions() { + if let Some(writer) = tx_static_writer.as_mut() { + writer.append_transaction(next_tx_num, transaction)?; + } + if let Some(cursor) = tx_cursor.as_mut() { + cursor.append(next_tx_num, transaction.clone())?; + } + + // Increment transaction id for each transaction. + next_tx_num += 1; } - // Write withdrawals if any - if let Some(withdrawals) = body.withdrawals { - if !withdrawals.is_empty() { - withdrawals_cursor - .append(block_number, StoredBlockWithdrawals { withdrawals })?; - } + debug!( + target: "providers::db", + ?block_number, + actions = ?durations_recorder.actions, + "Inserted block body" + ); + } + + self.storage.writer().write_block_bodies(self, bodies)?; + + Ok(()) + } + + fn remove_blocks_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()> { + let mut canonical_headers_cursor = self.tx.cursor_write::()?; + let mut rev_headers = canonical_headers_cursor.walk_back(None)?; + + while let Some(Ok((number, hash))) = rev_headers.next() { + if number <= block { + break + } + self.tx.delete::(hash, None)?; + rev_headers.delete_current()?; + } + self.remove::(block + 1..)?; + self.remove::(block + 1..)?; + + // First transaction to be removed + let unwind_tx_from = self + .tx + .get::(block)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; + + // Last transaction to be removed + let unwind_tx_to = self + .tx + .cursor_read::()? + .last()? + // shouldn't happen because this was OK above + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))? + .1 + .last_tx_num(); + + if unwind_tx_from < unwind_tx_to { + for (hash, _) in self.transaction_hashes_by_range(unwind_tx_from..(unwind_tx_to + 1))? { + self.tx.delete::(hash, None)?; } } + self.remove::(unwind_tx_from..)?; + + self.remove_bodies_above(block, remove_transactions_from)?; + + Ok(()) + } + + fn remove_bodies_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()> { + self.storage.writer().remove_block_bodies_above(self, block)?; + + // First transaction to be removed + let unwind_tx_from = self + .tx + .get::(block)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; + + self.remove::(block + 1..)?; + self.remove::(unwind_tx_from..)?; + + if remove_transactions_from.database() { + self.remove::(unwind_tx_from..)?; + } + + if remove_transactions_from.static_files() { + let static_file_tx_num = self + .static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Transactions); + + let to_delete = static_file_tx_num + .map(|static_tx| (static_tx + 1).saturating_sub(unwind_tx_from)) + .unwrap_or_default(); + + self.static_file_provider + .latest_writer(StaticFileSegment::Transactions)? + .prune_transactions(to_delete, block)?; + } + Ok(()) } /// TODO(joshie): this fn should be moved to `UnifiedStorageWriter` eventually fn append_blocks_with_state( &self, - blocks: Vec, + blocks: Vec>, execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, @@ -3330,7 +3036,7 @@ impl + // Insert the blocks for block in blocks { - self.insert_block(block)?; + self.insert_block(block, StorageLocation::Database)?; durations_recorder.record_relative(metrics::Action::InsertBlock); } @@ -3459,79 +3165,3 @@ impl DBProvider for DatabaseProvider self.prune_modes_ref() } } - -/// Helper method to recover senders for any blocks in the db which do not have senders. This -/// compares the length of the input senders [`Vec`], with the length of given transactions [`Vec`], -/// and will add to the input senders vec if there are more transactions. -/// -/// NOTE: This will modify the input senders list, which is why a mutable reference is required. -fn recover_block_senders( - senders: &mut Vec<(u64, Address)>, - transactions: &[(u64, TransactionSigned)], - first_transaction: u64, - last_transaction: u64, -) -> ProviderResult<()> { - // Recover senders manually if not found in db - // NOTE: Transactions are always guaranteed to be in the database whereas - // senders might be pruned. - if senders.len() != transactions.len() { - if senders.len() > transactions.len() { - error!(target: "providers::db", senders=%senders.len(), transactions=%transactions.len(), - first_tx=%first_transaction, last_tx=%last_transaction, - "unexpected senders and transactions mismatch"); - } - let missing = transactions.len().saturating_sub(senders.len()); - senders.reserve(missing); - // Find all missing senders, their corresponding tx numbers and indexes to the original - // `senders` vector at which the recovered senders will be inserted. - let mut missing_senders = Vec::with_capacity(missing); - { - let mut senders = senders.iter().peekable(); - - // `transactions` contain all entries. `senders` contain _some_ of the senders for - // these transactions. Both are sorted and indexed by `TxNumber`. - // - // The general idea is to iterate on both `transactions` and `senders`, and advance - // the `senders` iteration only if it matches the current `transactions` entry's - // `TxNumber`. Otherwise, add the transaction to the list of missing senders. - for (i, (tx_number, transaction)) in transactions.iter().enumerate() { - if let Some((sender_tx_number, _)) = senders.peek() { - if sender_tx_number == tx_number { - // If current sender's `TxNumber` matches current transaction's - // `TxNumber`, advance the senders iterator. - senders.next(); - } else { - // If current sender's `TxNumber` doesn't match current transaction's - // `TxNumber`, add it to missing senders. - missing_senders.push((i, tx_number, transaction)); - } - } else { - // If there's no more senders left, but we're still iterating over - // transactions, add them to missing senders - missing_senders.push((i, tx_number, transaction)); - } - } - } - - // Recover senders - let recovered_senders = TransactionSigned::recover_signers( - missing_senders.iter().map(|(_, _, tx)| *tx).collect::>(), - missing_senders.len(), - ) - .ok_or(ProviderError::SenderRecoveryError)?; - - // Insert recovered senders along with tx numbers at the corresponding indexes to the - // original `senders` vector - for ((i, tx_number, _), sender) in missing_senders.into_iter().zip(recovered_senders) { - // Insert will put recovered senders at necessary positions and shift the rest - senders.insert(i, (*tx_number, sender)); - } - - // Debug assertions which are triggered during the test to ensure that all senders are - // present and sorted - debug_assert_eq!(senders.len(), transactions.len(), "missing one or more senders"); - debug_assert!(senders.iter().tuple_windows().all(|(a, b)| a.0 < b.0), "senders not sorted"); - } - - Ok(()) -} diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 3bf3e7b247f..68d1a168f15 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -20,12 +20,13 @@ use reth_blockchain_tree_api::{ }; use reth_chain_state::{ChainInfoTracker, ForkChoiceNotifications, ForkChoiceSubscriptions}; use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB, TxTy}; use reth_primitives::{ Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - TransactionMeta, TransactionSigned, TransactionSignedNoHash, + TransactionMeta, TransactionSigned, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -37,6 +38,7 @@ use std::{ sync::Arc, time::Instant, }; + use tracing::trace; mod database; @@ -67,10 +69,39 @@ pub use blockchain_provider::BlockchainProvider2; mod consistent; pub use consistent::ConsistentProvider; +/// Helper trait to bound [`NodeTypes`] so that combined with database they satisfy +/// [`ProviderNodeTypes`]. +pub trait NodeTypesForProvider +where + Self: NodeTypes< + ChainSpec: EthereumHardforks, + Storage: ChainStorage, + Primitives: FullNodePrimitives< + SignedTx: Value + From + Into, + >, + >, +{ +} + +impl NodeTypesForProvider for T where + T: NodeTypes< + ChainSpec: EthereumHardforks, + Storage: ChainStorage, + Primitives: FullNodePrimitives< + SignedTx: Value + From + Into, + >, + > +{ +} + /// Helper trait keeping common requirements of providers for [`NodeTypesWithDB`]. -pub trait ProviderNodeTypes: NodeTypesWithDB {} +pub trait ProviderNodeTypes +where + Self: NodeTypesForProvider + NodeTypesWithDB, +{ +} -impl ProviderNodeTypes for T where T: NodeTypesWithDB {} +impl ProviderNodeTypes for T where T: NodeTypesForProvider + NodeTypesWithDB {} /// The main type for interacting with the blockchain. /// @@ -390,29 +421,31 @@ impl BlockReader for BlockchainProvider { } impl TransactionsProvider for BlockchainProvider { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.database.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.database.transaction_by_id(id) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { - self.database.transaction_by_id_no_hash(id) + ) -> ProviderResult> { + self.database.transaction_by_id_unhashed(id) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.database.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.transaction_by_hash_with_meta(tx_hash) } @@ -423,21 +456,21 @@ impl TransactionsProvider for BlockchainProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.database.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.database.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.transactions_by_tx_range(range) } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index e87829b1133..e04d46312f6 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -7,15 +7,19 @@ use crate::{ TransactionsProvider, }; use alloy_consensus::Header; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; -use reth_db::static_file::{HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}; -use reth_db_api::models::CompactU256; -use reth_node_types::NodePrimitives; -use reth_primitives::{ - Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, +use reth_db::{ + static_file::{ + BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, + TDWithHashMask, TotalDifficultyMask, TransactionMask, + }, + table::Decompress, }; +use reth_node_types::NodePrimitives; +use reth_primitives::{transaction::recover_signers, Receipt, SealedHeader, TransactionMeta}; +use reth_primitives_traits::SignedTransaction; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ fmt::Debug, @@ -90,7 +94,7 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .filter(|(_, hash)| hash == block_hash) .map(|(header, _)| header)) } @@ -102,13 +106,13 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { fn header_td(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(block_hash.into())? + .get_two::(block_hash.into())? .filter(|(_, hash)| hash == block_hash) .map(|(td, _)| td.into())) } fn header_td_by_number(&self, num: BlockNumber) -> ProviderResult> { - Ok(self.cursor()?.get_one::>(num.into())?.map(Into::into)) + Ok(self.cursor()?.get_one::(num.into())?.map(Into::into)) } fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { @@ -129,7 +133,7 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) } @@ -145,7 +149,7 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { for number in range { if let Some((header, hash)) = - cursor.get_two::>(number.into())? + cursor.get_two::>(number.into())? { let sealed = SealedHeader::new(header, hash); if !predicate(&sealed) { @@ -160,7 +164,7 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { impl BlockHashReader for StaticFileJarProvider<'_, N> { fn block_hash(&self, number: u64) -> ProviderResult> { - self.cursor()?.get_one::>(number.into()) + self.cursor()?.get_one::(number.into()) } fn canonical_hashes_range( @@ -172,7 +176,7 @@ impl BlockHashReader for StaticFileJarProvider<'_, N> { let mut hashes = Vec::with_capacity((end - start) as usize); for number in start..end { - if let Some(hash) = cursor.get_one::>(number.into())? { + if let Some(hash) = cursor.get_one::(number.into())? { hashes.push(hash) } } @@ -200,45 +204,43 @@ impl BlockNumReader for StaticFileJarProvider<'_, N> { let mut cursor = self.cursor()?; Ok(cursor - .get_one::>((&hash).into())? + .get_one::((&hash).into())? .and_then(|res| (res == hash).then(|| cursor.number()).flatten())) } } -impl TransactionsProvider for StaticFileJarProvider<'_, N> { +impl> TransactionsProvider + for StaticFileJarProvider<'_, N> +{ + type Transaction = N::SignedTx; + fn transaction_id(&self, hash: TxHash) -> ProviderResult> { let mut cursor = self.cursor()?; Ok(cursor - .get_one::>((&hash).into())? - .and_then(|res| (res.hash() == hash).then(|| cursor.number()).flatten())) + .get_one::>((&hash).into())? + .and_then(|res| (res.trie_hash() == hash).then(|| cursor.number()).flatten())) } - fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { - Ok(self - .cursor()? - .get_one::>(num.into())? - .map(|tx| tx.with_hash())) + fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, num: TxNumber, - ) -> ProviderResult> { - self.cursor()?.get_one::>(num.into()) + ) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - Ok(self - .cursor()? - .get_one::>((&hash).into())? - .map(|tx| tx.with_hash())) + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.cursor()?.get_one::>((&hash).into()) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { // Information required on indexing table [`tables::TransactionBlocks`] Err(ProviderError::UnsupportedProvider) } @@ -251,7 +253,7 @@ impl TransactionsProvider for StaticFileJarProvider<'_, N> { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Related to indexing tables. Live database should get the tx_range and call static file // provider with `transactions_by_tx_range` instead. Err(ProviderError::UnsupportedProvider) @@ -260,7 +262,7 @@ impl TransactionsProvider for StaticFileJarProvider<'_, N> { fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Related to indexing tables. Live database should get the tx_range and call static file // provider with `transactions_by_tx_range` instead. Err(ProviderError::UnsupportedProvider) @@ -269,15 +271,13 @@ impl TransactionsProvider for StaticFileJarProvider<'_, N> { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut txes = Vec::with_capacity((range.end - range.start) as usize); for num in range { - if let Some(tx) = - cursor.get_one::>(num.into())? - { + if let Some(tx) = cursor.get_one::>(num.into())? { txes.push(tx) } } @@ -289,19 +289,20 @@ impl TransactionsProvider for StaticFileJarProvider<'_, N> { range: impl RangeBounds, ) -> ProviderResult> { let txs = self.transactions_by_tx_range(range)?; - TransactionSignedNoHash::recover_signers(&txs, txs.len()) - .ok_or(ProviderError::SenderRecoveryError) + recover_signers(&txs, txs.len()).ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, num: TxNumber) -> ProviderResult> { Ok(self .cursor()? - .get_one::>(num.into())? + .get_one::>(num.into())? .and_then(|tx| tx.recover_signer())) } } -impl ReceiptProvider for StaticFileJarProvider<'_, N> { +impl> ReceiptProvider + for StaticFileJarProvider<'_, N> +{ fn receipt(&self, num: TxNumber) -> ProviderResult> { self.cursor()?.get_one::>(num.into()) } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index bee42fdac83..14821fde547 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -9,6 +9,7 @@ use crate::{ }; use alloy_consensus::Header; use alloy_eips::{ + eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, }; @@ -19,14 +20,15 @@ use parking_lot::RwLock; use reth_chainspec::{ChainInfo, ChainSpecProvider}; use reth_db::{ lockfile::StorageLock, - static_file::{iter_static_files, HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}, + static_file::{ + iter_static_files, BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, + StaticFileCursor, TDWithHashMask, TransactionMask, + }, + table::{Decompress, Value}, tables, }; use reth_db_api::{ - cursor::DbCursorRO, - models::{CompactU256, StoredBlockBodyIndices}, - table::Table, - transaction::DbTx, + cursor::DbCursorRO, models::StoredBlockBodyIndices, table::Table, transaction::DbTx, }; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_node_types::NodePrimitives; @@ -35,9 +37,11 @@ use reth_primitives::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }, + transaction::recover_signers, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + StaticFileSegment, TransactionMeta, TransactionSignedNoHash, }; +use reth_primitives_traits::SignedTransaction; use reth_stages_types::{PipelineTarget, StageId}; use reth_storage_api::DBProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -1236,7 +1240,7 @@ impl HeaderProvider for StaticFileProvider { self.find_static_file(StaticFileSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .and_then(|(header, hash)| { if &hash == block_hash { return Some(header) @@ -1262,7 +1266,7 @@ impl HeaderProvider for StaticFileProvider { self.find_static_file(StaticFileSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? - .get_two::>(block_hash.into())? + .get_two::(block_hash.into())? .and_then(|(td, hash)| (&hash == block_hash).then_some(td.0))) }) } @@ -1310,7 +1314,7 @@ impl HeaderProvider for StaticFileProvider { to_range(range), |cursor, number| { Ok(cursor - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) }, predicate, @@ -1331,13 +1335,15 @@ impl BlockHashReader for StaticFileProvider { self.fetch_range_with_predicate( StaticFileSegment::Headers, start..end, - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::(number.into()), |_| true, ) } } -impl ReceiptProvider for StaticFileProvider { +impl> ReceiptProvider + for StaticFileProvider +{ fn receipt(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Receipts, num, None) .and_then(|provider| provider.receipt(num)) @@ -1374,7 +1380,9 @@ impl ReceiptProvider for StaticFileProvider { } } -impl TransactionsProviderExt for StaticFileProvider { +impl> TransactionsProviderExt + for StaticFileProvider +{ fn transaction_hashes_by_range( &self, tx_range: Range, @@ -1435,13 +1443,17 @@ impl TransactionsProviderExt for StaticFileProvider { } } -impl TransactionsProvider for StaticFileProvider { +impl> TransactionsProvider + for StaticFileProvider +{ + type Transaction = N::SignedTx; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Transactions, |jar_provider| { let mut cursor = jar_provider.cursor()?; if cursor - .get_one::>((&tx_hash).into())? - .and_then(|tx| (tx.hash() == tx_hash).then_some(tx)) + .get_one::>((&tx_hash).into())? + .and_then(|tx| (tx.trie_hash() == tx_hash).then_some(tx)) .is_some() { Ok(cursor.number()) @@ -1451,7 +1463,7 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) .and_then(|provider| provider.transaction_by_id(num)) .or_else(|err| { @@ -1463,12 +1475,12 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, num: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) - .and_then(|provider| provider.transaction_by_id_no_hash(num)) + .and_then(|provider| provider.transaction_by_id_unhashed(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { Ok(None) @@ -1478,20 +1490,19 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Transactions, |jar_provider| { Ok(jar_provider .cursor()? - .get_one::>((&hash).into())? - .map(|tx| tx.with_hash()) - .and_then(|tx| (tx.hash_ref() == &hash).then_some(tx))) + .get_one::>((&hash).into())? + .and_then(|tx| (tx.trie_hash() == hash).then_some(tx))) }) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1504,7 +1515,7 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1512,7 +1523,7 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1520,13 +1531,11 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.fetch_range_with_predicate( StaticFileSegment::Transactions, to_range(range), - |cursor, number| { - cursor.get_one::>(number.into()) - }, + |cursor, number| cursor.get_one::>(number.into()), |_| true, ) } @@ -1536,12 +1545,11 @@ impl TransactionsProvider for StaticFileProvider { range: impl RangeBounds, ) -> ProviderResult> { let txes = self.transactions_by_tx_range(range)?; - TransactionSignedNoHash::recover_signers(&txes, txes.len()) - .ok_or(ProviderError::SenderRecoveryError) + recover_signers(&txes, txes.len()).ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - Ok(self.transaction_by_id_no_hash(id)?.and_then(|tx| tx.recover_signer())) + Ok(self.transaction_by_id_unhashed(id)?.and_then(|tx| tx.recover_signer())) } } @@ -1569,7 +1577,7 @@ impl BlockNumReader for StaticFileProvider { } } -impl BlockReader for StaticFileProvider { +impl> BlockReader for StaticFileProvider { fn find_block_by_hash( &self, _hash: B256, diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 30b8d0344da..673451de65f 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -68,7 +68,7 @@ mod tests { use reth_db_api::transaction::DbTxMut; use reth_primitives::{ static_file::{find_fixed_range, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE}, - Receipt, TransactionSignedNoHash, + EthPrimitives, Receipt, TransactionSigned, }; use reth_storage_api::{ReceiptProvider, TransactionsProvider}; use reth_testing_utils::generators::{self, random_header_range}; @@ -304,20 +304,20 @@ mod tests { /// * `10..=19`: no txs/receipts /// * `20..=29`: only one tx/receipt fn setup_tx_based_scenario( - sf_rw: &StaticFileProvider<()>, + sf_rw: &StaticFileProvider, segment: StaticFileSegment, blocks_per_file: u64, ) { fn setup_block_ranges( - writer: &mut StaticFileProviderRWRefMut<'_, ()>, - sf_rw: &StaticFileProvider<()>, + writer: &mut StaticFileProviderRWRefMut<'_, EthPrimitives>, + sf_rw: &StaticFileProvider, segment: StaticFileSegment, block_range: &Range, mut tx_count: u64, next_tx_num: &mut u64, ) { let mut receipt = Receipt::default(); - let mut tx = TransactionSignedNoHash::default(); + let mut tx = TransactionSigned::default(); for block in block_range.clone() { writer.increment_block(block).unwrap(); @@ -415,7 +415,7 @@ mod tests { #[allow(clippy::too_many_arguments)] fn prune_and_validate( - sf_rw: &StaticFileProvider<()>, + sf_rw: &StaticFileProvider, static_dir: impl AsRef, segment: StaticFileSegment, prune_count: u64, diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 5951dbb751f..83954bde352 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -558,7 +558,10 @@ impl StaticFileProviderRW { /// empty blocks and this function wouldn't be called. /// /// Returns the current [`TxNumber`] as seen in the static file. - pub fn append_transaction(&mut self, tx_num: TxNumber, tx: impl Compact) -> ProviderResult<()> { + pub fn append_transaction(&mut self, tx_num: TxNumber, tx: &N::SignedTx) -> ProviderResult<()> + where + N::SignedTx: Compact, + { let start = Instant::now(); self.ensure_no_queued_prune()?; diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 3259eee2bfb..fdded2807aa 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -88,9 +88,14 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo hex!("cf7b274520720b50e6a4c3e5c4d553101f44945396827705518ce17cb7219a42").into(), ), body: BlockBody { - transactions: vec![TransactionSigned { - hash: hex!("3541dd1d17e76adeb25dcf2b0a9b60a1669219502e58dcf26a2beafbfb550397").into(), - signature: Signature::new( + transactions: vec![TransactionSigned::new( + Transaction::Legacy(TxLegacy { + gas_price: 10, + gas_limit: 400_000, + to: TxKind::Call(hex!("095e7baea6a6c7c4c2dfeb977efac326af552d87").into()), + ..Default::default() + }), + Signature::new( U256::from_str( "51983300959770368863831494747186777928121405155922056726144551509338672451120", ) @@ -101,13 +106,8 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo .unwrap(), false, ), - transaction: Transaction::Legacy(TxLegacy { - gas_price: 10, - gas_limit: 400_000, - to: TxKind::Call(hex!("095e7baea6a6c7c4c2dfeb977efac326af552d87").into()), - ..Default::default() - }), - }], + b256!("3541dd1d17e76adeb25dcf2b0a9b60a1669219502e58dcf26a2beafbfb550397"), + )], ..Default::default() }, }); diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 772e7a32967..8924ee55b5b 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,9 +1,9 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, ChangeSetReader, DatabaseProvider, EvmEnvProvider, HeaderProvider, - ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateReader, - StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + ChainSpecProvider, ChangeSetReader, DatabaseProvider, EthStorage, EvmEnvProvider, + HeaderProvider, ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, + StateReader, StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use alloy_consensus::{constants::EMPTY_ROOT_HASH, Header}; use alloy_eips::{ @@ -23,9 +23,8 @@ use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypes; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, + Account, Block, BlockWithSenders, Bytecode, EthPrimitives, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ @@ -169,9 +168,10 @@ impl MockEthProvider { pub struct MockNode; impl NodeTypes for MockNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; } impl DatabaseProviderFactory for MockEthProvider { @@ -254,6 +254,8 @@ impl ChainSpecProvider for MockEthProvider { } impl TransactionsProvider for MockEthProvider { + type Transaction = TransactionSigned; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { let lock = self.blocks.lock(); let tx_number = lock @@ -265,7 +267,7 @@ impl TransactionsProvider for MockEthProvider { Ok(tx_number) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { let lock = self.blocks.lock(); let transaction = lock.values().flat_map(|block| &block.body.transactions).nth(id as usize).cloned(); @@ -273,16 +275,13 @@ impl TransactionsProvider for MockEthProvider { Ok(transaction) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); - let transaction = lock - .values() - .flat_map(|block| &block.body.transactions) - .nth(id as usize) - .map(|tx| Into::::into(tx.clone())); + let transaction = + lock.values().flat_map(|block| &block.body.transactions).nth(id as usize).cloned(); Ok(transaction) } @@ -296,7 +295,7 @@ impl TransactionsProvider for MockEthProvider { fn transaction_by_hash_with_meta( &self, hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); for (block_hash, block) in lock.iter() { for (index, tx) in block.body.transactions.iter().enumerate() { @@ -332,14 +331,14 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(self.block(id)?.map(|b| b.body.transactions)) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // init btreemap so we can return in order let mut map = BTreeMap::new(); for (_, block) in self.blocks.lock().iter() { @@ -354,14 +353,14 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); let transactions = lock .values() .flat_map(|block| &block.body.transactions) .enumerate() .filter(|&(tx_number, _)| range.contains(&(tx_number as TxNumber))) - .map(|(_, tx)| tx.clone().into()) + .map(|(_, tx)| tx.clone()) .collect(); Ok(transactions) diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index c0e80930b31..2c3795573c2 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,4 +1,7 @@ -use crate::{providers::StaticFileProvider, HashingWriter, ProviderFactory, TrieWriter}; +use crate::{ + providers::{ProviderNodeTypes, StaticFileProvider}, + HashingWriter, ProviderFactory, TrieWriter, +}; use alloy_primitives::B256; use reth_chainspec::{ChainSpec, MAINNET}; use reth_db::{ @@ -6,7 +9,7 @@ use reth_db::{ DatabaseEnv, }; use reth_errors::ProviderResult; -use reth_node_types::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_node_types::NodeTypesWithDBAdapter; use reth_primitives::{Account, StorageEntry}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; @@ -22,10 +25,11 @@ pub use reth_chain_state::test_utils::TestCanonStateSubscriptions; /// Mock [`reth_node_types::NodeTypes`] for testing. pub type MockNodeTypes = reth_node_types::AnyNodeTypesWithEngine< - (), + reth_primitives::EthPrimitives, reth_ethereum_engine_primitives::EthEngineTypes, reth_chainspec::ChainSpec, reth_trie_db::MerklePatriciaTrie, + crate::EthStorage, >; /// Mock [`reth_node_types::NodeTypesWithDB`] for testing. @@ -51,7 +55,7 @@ pub fn create_test_provider_factory_with_chain_spec( } /// Inserts the genesis alloc from the provided chain spec into the trie. -pub fn insert_genesis>( +pub fn insert_genesis>( provider_factory: &ProviderFactory, chain_spec: Arc, ) -> ProviderResult { diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index d12539a2c27..9a88c8c9ab7 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -23,7 +23,7 @@ use reth_errors::ProviderError; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ Account, Block, BlockWithSenders, Bytecode, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + SealedHeader, TransactionMeta, TransactionSigned, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -192,29 +192,31 @@ impl BlockIdReader for NoopProvider { } impl TransactionsProvider for NoopProvider { + type Transaction = TransactionSigned; + fn transaction_id(&self, _tx_hash: TxHash) -> ProviderResult> { Ok(None) } - fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, _id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(None) } - fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { Ok(None) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(None) } @@ -225,21 +227,21 @@ impl TransactionsProvider for NoopProvider { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(None) } fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(Vec::default()) } fn transactions_by_tx_range( &self, _range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(Vec::default()) } diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 50fb032923d..c2ce477051d 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,24 +1,53 @@ +use alloy_consensus::Header; use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::SealedBlockWithSenders; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; -use std::ops::RangeInclusive; + +/// An enum that represents the storage location for a piece of data. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum StorageLocation { + /// Write only to static files. + StaticFiles, + /// Write only to the database. + Database, + /// Write to both the database and static files. + Both, +} + +impl StorageLocation { + /// Returns true if the storage location includes static files. + pub const fn static_files(&self) -> bool { + matches!(self, Self::StaticFiles | Self::Both) + } + + /// Returns true if the storage location includes the database. + pub const fn database(&self) -> bool { + matches!(self, Self::Database | Self::Both) + } +} /// BlockExecution Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockExecutionWriter: BlockWriter + Send + Sync { - /// Take range of blocks and its execution result - fn take_block_and_execution_range( + /// Take all of the blocks above the provided number and their execution result + /// + /// The passed block number will stay in the database. + fn take_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_transactions_from: StorageLocation, ) -> ProviderResult; - /// Remove range of blocks and its execution result - fn remove_block_and_execution_range( + /// Remove all of the blocks above the provided number and their execution result + /// + /// The passed block number will stay in the database. + fn remove_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_transactions_from: StorageLocation, ) -> ProviderResult<()>; } @@ -40,8 +69,11 @@ pub trait BlockWriter: Send + Sync { /// /// Return [StoredBlockBodyIndices] that contains indices of the first and last transactions and /// transition in the block. - fn insert_block(&self, block: SealedBlockWithSenders) - -> ProviderResult; + fn insert_block( + &self, + block: SealedBlockWithSenders, + write_transactions_to: StorageLocation, + ) -> ProviderResult; /// Appends a batch of block bodies extending the canonical chain. This is invoked during /// `Bodies` stage and does not write to `TransactionHashNumbers` and `TransactionSenders` @@ -50,7 +82,24 @@ pub trait BlockWriter: Send + Sync { /// Bodies are passed as [`Option`]s, if body is `None` the corresponding block is empty. fn append_block_bodies( &self, - bodies: impl Iterator)>, + bodies: Vec<(BlockNumber, Option)>, + write_transactions_to: StorageLocation, + ) -> ProviderResult<()>; + + /// Removes all blocks above the given block number from the database. + /// + /// Note: This does not remove state or execution data. + fn remove_blocks_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()>; + + /// Removes all block bodies above the given block number from the database. + fn remove_bodies_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, ) -> ProviderResult<()>; /// Appends a batch of sealed blocks to the blockchain, including sender information, and @@ -69,7 +118,7 @@ pub trait BlockWriter: Send + Sync { /// Returns `Ok(())` on success, or an error if any operation fails. fn append_blocks_with_state( &self, - blocks: Vec, + blocks: Vec>, execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, diff --git a/crates/storage/provider/src/traits/finalized_block.rs b/crates/storage/provider/src/traits/finalized_block.rs deleted file mode 100644 index 98a6d9d0e34..00000000000 --- a/crates/storage/provider/src/traits/finalized_block.rs +++ /dev/null @@ -1,23 +0,0 @@ -use alloy_primitives::BlockNumber; -use reth_errors::ProviderResult; - -/// Functionality to read the last known chain blocks from the database. -pub trait ChainStateBlockReader: Send + Sync { - /// Returns the last finalized block number. - /// - /// If no finalized block has been written yet, this returns `None`. - fn last_finalized_block_number(&self) -> ProviderResult>; - /// Returns the last safe block number. - /// - /// If no safe block has been written yet, this returns `None`. - fn last_safe_block_number(&self) -> ProviderResult>; -} - -/// Functionality to write the last known chain blocks to the database. -pub trait ChainStateBlockWriter: Send + Sync { - /// Saves the given finalized block number in the DB. - fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; - - /// Saves the given safe block number in the DB. - fn save_safe_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; -} diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 4998e974165..9bb357e33a3 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -7,13 +7,13 @@ use crate::{ }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_chainspec::EthereumHardforks; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{NodeTypesWithDB, TxTy}; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory - + StaticFileProviderFactory - + BlockReaderIdExt + + StaticFileProviderFactory + + BlockReaderIdExt> + AccountReader + StateProviderFactory + EvmEnvProvider @@ -30,8 +30,8 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory - + StaticFileProviderFactory - + BlockReaderIdExt + + StaticFileProviderFactory + + BlockReaderIdExt> + AccountReader + StateProviderFactory + EvmEnvProvider diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 69f053936bb..a772204d0c1 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -25,6 +25,3 @@ pub use full::{FullProvider, FullRpcProvider}; mod tree_viewer; pub use tree_viewer::TreeViewer; - -mod finalized_block; -pub use finalized_block::{ChainStateBlockReader, ChainStateBlockWriter}; diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index e329e44bb5b..c26623487f6 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -2,7 +2,7 @@ use crate::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter as SfWriter}, writer::static_file::StaticFileWriter, BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, - StaticFileProviderFactory, TrieWriter, + StaticFileProviderFactory, StorageLocation, TrieWriter, }; use alloy_consensus::Header; use alloy_primitives::{BlockNumber, B256, U256}; @@ -15,7 +15,7 @@ use reth_db::{ }; use reth_errors::{ProviderError, ProviderResult}; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{SealedBlock, StaticFileSegment, TransactionSignedNoHash}; +use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ DBProvider, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, @@ -148,7 +148,7 @@ impl UnifiedStorageWriter<'_, (), ()> { impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> where ProviderDB: DBProvider - + BlockWriter + + BlockWriter + TransactionsProviderExt + StateChangeWriter + TrieWriter @@ -195,7 +195,7 @@ where for block in blocks { let sealed_block = block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); - self.database().insert_block(sealed_block)?; + self.database().insert_block(sealed_block, StorageLocation::Both)?; self.save_header_and_transactions(block.block.clone())?; // Write state and changesets to the database. @@ -246,25 +246,8 @@ where .save_stage_checkpoint(StageId::Headers, StageCheckpoint::new(block.number))?; } - { - let transactions_writer = - self.static_file().get_writer(block.number, StaticFileSegment::Transactions)?; - let mut storage_writer = - UnifiedStorageWriter::from(self.database(), transactions_writer); - let no_hash_transactions = block - .body - .transactions - .clone() - .into_iter() - .map(TransactionSignedNoHash::from) - .collect(); - storage_writer.append_transactions_from_blocks( - block.header().number, - std::iter::once(&no_hash_transactions), - )?; - self.database() - .save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block.number))?; - } + self.database() + .save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block.number))?; Ok(()) } @@ -285,13 +268,12 @@ where let tx_range = self .database() .transaction_range_by_block_range(block_number + 1..=highest_static_file_block)?; - let total_txs = tx_range.end().saturating_sub(*tx_range.start()); + // We are using end + 1 - start here because the returned range is inclusive. + let total_txs = (tx_range.end() + 1).saturating_sub(*tx_range.start()); // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); - self.database().remove_block_and_execution_range( - block_number + 1..=self.database().last_block_number()?, - )?; + self.database().remove_block_and_execution_above(block_number, StorageLocation::Both)?; // IMPORTANT: we use `highest_static_file_block.saturating_sub(block_number)` to make sure // we remove only what is ABOVE the block. @@ -303,10 +285,6 @@ where .get_writer(block_number, StaticFileSegment::Headers)? .prune_headers(highest_static_file_block.saturating_sub(block_number))?; - self.static_file() - .get_writer(block_number, StaticFileSegment::Transactions)? - .prune_transactions(total_txs, block_number)?; - if !self.database().prune_modes_ref().has_receipts_pruning() { self.static_file() .get_writer(block_number, StaticFileSegment::Receipts)? @@ -377,56 +355,6 @@ where Ok(td) } - - /// Appends transactions to static files, using the - /// [`BlockBodyIndices`](tables::BlockBodyIndices) table to determine the transaction number - /// when appending to static files. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Transactions segment. - pub fn append_transactions_from_blocks( - &mut self, - initial_block_number: BlockNumber, - transactions: impl Iterator, - ) -> ProviderResult<()> - where - T: Borrow>, - { - self.ensure_static_file_segment(StaticFileSegment::Transactions)?; - - let mut bodies_cursor = - self.database().tx_ref().cursor_read::()?; - - let mut last_tx_idx = None; - for (idx, transactions) in transactions.enumerate() { - let block_number = initial_block_number + idx as u64; - - let mut first_tx_index = - bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); - - // If there are no indices, that means there have been no transactions - // - // So instead of returning an error, use zero - if block_number == initial_block_number && first_tx_index.is_none() { - first_tx_index = Some(0); - } - - let mut tx_index = first_tx_index - .or(last_tx_idx) - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - - for tx in transactions.borrow() { - self.static_file_mut().append_transaction(tx_index, tx)?; - tx_index += 1; - } - - self.static_file_mut().increment_block(block_number)?; - - // update index - last_tx_idx = Some(tx_index); - } - Ok(()) - } } impl diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 2b13f6332f8..c059eb0d6e9 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -18,6 +18,7 @@ reth-db-models.workspace = true reth-db-api.workspace = true reth-execution-types.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 929f7ecca43..37c7857f1c2 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -267,3 +267,24 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns `None` if block is not found. fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; } + +/// Functionality to read the last known chain blocks from the database. +pub trait ChainStateBlockReader: Send + Sync { + /// Returns the last finalized block number. + /// + /// If no finalized block has been written yet, this returns `None`. + fn last_finalized_block_number(&self) -> ProviderResult>; + /// Returns the last safe block number. + /// + /// If no safe block has been written yet, this returns `None`. + fn last_safe_block_number(&self) -> ProviderResult>; +} + +/// Functionality to write the last known chain blocks to the database. +pub trait ChainStateBlockWriter: Send + Sync { + /// Saves the given finalized block number in the DB. + fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; + + /// Saves the given safe block number in the DB. + fn save_safe_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; +} diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs new file mode 100644 index 00000000000..d5228bdddf7 --- /dev/null +++ b/crates/storage/storage-api/src/chain.rs @@ -0,0 +1,91 @@ +use crate::DBProvider; +use alloy_primitives::BlockNumber; +use reth_db::{ + cursor::DbCursorRW, + models::{StoredBlockOmmers, StoredBlockWithdrawals}, + tables, + transaction::DbTxMut, + DbTxUnwindExt, +}; +use reth_primitives_traits::{Block, BlockBody, FullNodePrimitives}; +use reth_storage_errors::provider::ProviderResult; + +/// Trait that implements how block bodies are written to the storage. +/// +/// Note: Within the current abstraction, this should only write to tables unrelated to +/// transactions. Writing of transactions is handled separately. +#[auto_impl::auto_impl(&, Arc)] +pub trait BlockBodyWriter { + /// Writes a set of block bodies to the storage. + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(BlockNumber, Option)>, + ) -> ProviderResult<()>; + + /// Removes all block bodies above the given block number from the database. + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: BlockNumber, + ) -> ProviderResult<()>; +} + +/// Trait that implements how chain-specific types are written to the storage. +pub trait ChainStorageWriter: + BlockBodyWriter::Body> +{ +} +impl ChainStorageWriter for T where + T: BlockBodyWriter::Body> +{ +} + +/// Ethereum storage implementation. +#[derive(Debug, Default, Clone, Copy)] +pub struct EthStorage; + +impl BlockBodyWriter for EthStorage +where + Provider: DBProvider, +{ + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(u64, Option)>, + ) -> ProviderResult<()> { + let mut ommers_cursor = provider.tx_ref().cursor_write::()?; + let mut withdrawals_cursor = + provider.tx_ref().cursor_write::()?; + + for (block_number, body) in bodies { + let Some(body) = body else { continue }; + + // Write ommers if any + if !body.ommers.is_empty() { + ommers_cursor.append(block_number, StoredBlockOmmers { ommers: body.ommers })?; + } + + // Write withdrawals if any + if let Some(withdrawals) = body.withdrawals { + if !withdrawals.is_empty() { + withdrawals_cursor + .append(block_number, StoredBlockWithdrawals { withdrawals })?; + } + } + } + + Ok(()) + } + + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: BlockNumber, + ) -> ProviderResult<()> { + provider.tx_ref().unwind_table_by_num::(block)?; + provider.tx_ref().unwind_table_by_num::(block)?; + + Ok(()) + } +} diff --git a/crates/storage/storage-api/src/legacy.rs b/crates/storage/storage-api/src/legacy.rs new file mode 100644 index 00000000000..e53a5d8bfa2 --- /dev/null +++ b/crates/storage/storage-api/src/legacy.rs @@ -0,0 +1,83 @@ +//! Traits used by the legacy execution engine. +//! +//! This module is scheduled for removal in the future. + +use alloy_eips::BlockNumHash; +use alloy_primitives::{BlockHash, BlockNumber}; +use auto_impl::auto_impl; +use reth_execution_types::ExecutionOutcome; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; + +/// Blockchain trait provider that gives access to the blockchain state that is not yet committed +/// (pending). +pub trait BlockchainTreePendingStateProvider: Send + Sync { + /// Returns a state provider that includes all state changes of the given (pending) block hash. + /// + /// In other words, the state provider will return the state after all transactions of the given + /// hash have been executed. + fn pending_state_provider( + &self, + block_hash: BlockHash, + ) -> ProviderResult> { + self.find_pending_state_provider(block_hash) + .ok_or(ProviderError::StateForHashNotFound(block_hash)) + } + + /// Returns state provider if a matching block exists. + fn find_pending_state_provider( + &self, + block_hash: BlockHash, + ) -> Option>; +} + +/// Provides data required for post-block execution. +/// +/// This trait offers methods to access essential post-execution data, including the state changes +/// in accounts and storage, as well as block hashes for both the pending and canonical chains. +/// +/// The trait includes: +/// * [`ExecutionOutcome`] - Captures all account and storage changes in the pending chain. +/// * Block hashes - Provides access to the block hashes of both the pending chain and canonical +/// blocks. +#[auto_impl(&, Box)] +pub trait ExecutionDataProvider: Send + Sync { + /// Return the execution outcome. + fn execution_outcome(&self) -> &ExecutionOutcome; + /// Return block hash by block number of pending or canonical chain. + fn block_hash(&self, block_number: BlockNumber) -> Option; +} + +impl ExecutionDataProvider for ExecutionOutcome { + fn execution_outcome(&self) -> &ExecutionOutcome { + self + } + + /// Always returns [None] because we don't have any information about the block header. + fn block_hash(&self, _block_number: BlockNumber) -> Option { + None + } +} + +/// Fork data needed for execution on it. +/// +/// It contains a canonical fork, the block on what pending chain was forked from. +#[auto_impl(&, Box)] +pub trait BlockExecutionForkProvider { + /// Return canonical fork, the block on what post state was forked from. + /// + /// Needed to create state provider. + fn canonical_fork(&self) -> BlockNumHash; +} + +/// Provides comprehensive post-execution state data required for further execution. +/// +/// This trait is used to create a state provider over the pending state and is a combination of +/// [`ExecutionDataProvider`] and [`BlockExecutionForkProvider`]. +/// +/// The pending state includes: +/// * `ExecutionOutcome`: Contains all changes to accounts and storage within the pending chain. +/// * Block hashes: Represents hashes of both the pending chain and canonical blocks. +/// * Canonical fork: Denotes the block from which the pending chain forked. +pub trait FullExecutionDataProvider: ExecutionDataProvider + BlockExecutionForkProvider {} + +impl FullExecutionDataProvider for T where T: ExecutionDataProvider + BlockExecutionForkProvider {} diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 7b7ad761476..de09e66f128 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -22,6 +22,9 @@ pub use block_id::*; mod block_hash; pub use block_hash::*; +mod chain; +pub use chain::*; + mod header; pub use header::*; @@ -64,3 +67,6 @@ mod hashing; pub use hashing::*; mod stats; pub use stats::*; + +mod legacy; +pub use legacy::*; diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index d37940f0478..3174489fc4a 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -3,12 +3,11 @@ use super::{ StorageRootProvider, }; use alloy_consensus::constants::KECCAK_EMPTY; -use alloy_eips::{BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue, B256, U256}; use auto_impl::auto_impl; -use reth_execution_types::ExecutionOutcome; use reth_primitives::Bytecode; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use reth_storage_errors::provider::ProviderResult; /// Type alias of boxed [`StateProvider`]. pub type StateProviderBox = Box; @@ -167,77 +166,3 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { /// If the block couldn't be found, returns `None`. fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult>; } - -/// Blockchain trait provider that gives access to the blockchain state that is not yet committed -/// (pending). -pub trait BlockchainTreePendingStateProvider: Send + Sync { - /// Returns a state provider that includes all state changes of the given (pending) block hash. - /// - /// In other words, the state provider will return the state after all transactions of the given - /// hash have been executed. - fn pending_state_provider( - &self, - block_hash: BlockHash, - ) -> ProviderResult> { - self.find_pending_state_provider(block_hash) - .ok_or(ProviderError::StateForHashNotFound(block_hash)) - } - - /// Returns state provider if a matching block exists. - fn find_pending_state_provider( - &self, - block_hash: BlockHash, - ) -> Option>; -} - -/// Provides data required for post-block execution. -/// -/// This trait offers methods to access essential post-execution data, including the state changes -/// in accounts and storage, as well as block hashes for both the pending and canonical chains. -/// -/// The trait includes: -/// * [`ExecutionOutcome`] - Captures all account and storage changes in the pending chain. -/// * Block hashes - Provides access to the block hashes of both the pending chain and canonical -/// blocks. -#[auto_impl(&, Box)] -pub trait ExecutionDataProvider: Send + Sync { - /// Return the execution outcome. - fn execution_outcome(&self) -> &ExecutionOutcome; - /// Return block hash by block number of pending or canonical chain. - fn block_hash(&self, block_number: BlockNumber) -> Option; -} - -impl ExecutionDataProvider for ExecutionOutcome { - fn execution_outcome(&self) -> &ExecutionOutcome { - self - } - - /// Always returns [None] because we don't have any information about the block header. - fn block_hash(&self, _block_number: BlockNumber) -> Option { - None - } -} - -/// Fork data needed for execution on it. -/// -/// It contains a canonical fork, the block on what pending chain was forked from. -#[auto_impl(&, Box)] -pub trait BlockExecutionForkProvider { - /// Return canonical fork, the block on what post state was forked from. - /// - /// Needed to create state provider. - fn canonical_fork(&self) -> BlockNumHash; -} - -/// Provides comprehensive post-execution state data required for further execution. -/// -/// This trait is used to create a state provider over the pending state and is a combination of -/// [`ExecutionDataProvider`] and [`BlockExecutionForkProvider`]. -/// -/// The pending state includes: -/// * `ExecutionOutcome`: Contains all changes to accounts and storage within the pending chain. -/// * Block hashes: Represents hashes of both the pending chain and canonical blocks. -/// * Canonical fork: Denotes the block from which the pending chain forked. -pub trait FullExecutionDataProvider: ExecutionDataProvider + BlockExecutionForkProvider {} - -impl FullExecutionDataProvider for T where T: ExecutionDataProvider + BlockExecutionForkProvider {} diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index f2c44e9e140..ca2bcaeb469 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -1,7 +1,8 @@ use crate::{BlockNumReader, BlockReader}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; -use reth_primitives::{TransactionMeta, TransactionSigned, TransactionSignedNoHash}; +use reth_primitives::TransactionMeta; +use reth_primitives_traits::SignedTransaction; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::ops::{Range, RangeBounds, RangeInclusive}; @@ -18,9 +19,12 @@ pub enum TransactionVariant { WithHash, } -/// Client trait for fetching [TransactionSigned] related data. +/// Client trait for fetching transactions related data. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionsProvider: BlockNumReader + Send + Sync { + /// The transaction type this provider reads. + type Transaction: Send + Sync + SignedTransaction; + /// Get internal transaction identifier by transaction hash. /// /// This is the inverse of [TransactionsProvider::transaction_by_id]. @@ -28,23 +32,21 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult>; /// Get transaction by id, computes hash every time so more expensive. - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; /// Get transaction by id without computing the hash. - fn transaction_by_id_no_hash( - &self, - id: TxNumber, - ) -> ProviderResult>; + fn transaction_by_id_unhashed(&self, id: TxNumber) + -> ProviderResult>; /// Get transaction by transaction hash. - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult>; + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult>; /// Get transaction by transaction hash and additional metadata of the block the transaction was /// mined in fn transaction_by_hash_with_meta( &self, hash: TxHash, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Get transaction block number fn transaction_block(&self, id: TxNumber) -> ProviderResult>; @@ -53,19 +55,19 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transactions_by_block( &self, block: BlockHashOrNumber, - ) -> ProviderResult>>; + ) -> ProviderResult>>; /// Get transactions by block range. fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>>; + ) -> ProviderResult>>; /// Get transactions by tx range. fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Get Senders from a tx range. fn senders_by_tx_range( @@ -79,7 +81,10 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transaction_sender(&self, id: TxNumber) -> ProviderResult>; } -/// Client trait for fetching additional [TransactionSigned] related data. +/// A helper type alias to access [`TransactionsProvider::Transaction`]. +pub type ProviderTx

=

::Transaction; + +/// Client trait for fetching additional transactions related data. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionsProviderExt: BlockReader + Send + Sync { /// Get transactions range by block range. diff --git a/crates/tasks/Cargo.toml b/crates/tasks/Cargo.toml index 82c80c0932b..68d8e958979 100644 --- a/crates/tasks/Cargo.toml +++ b/crates/tasks/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # async tokio = { workspace = true, features = ["sync", "rt"] } -tracing-futures = "0.2" +tracing-futures.workspace = true futures-util.workspace = true # metrics diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 28b5eaba9ff..340e925ec56 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -111,6 +111,13 @@ dyn_clone::clone_trait_object!(TaskSpawner); #[non_exhaustive] pub struct TokioTaskExecutor; +impl TokioTaskExecutor { + /// Converts the instance to a boxed [`TaskSpawner`]. + pub fn boxed(self) -> Box { + Box::new(self) + } +} + impl TaskSpawner for TokioTaskExecutor { fn spawn(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { tokio::task::spawn(fut) diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 22df8253682..7c0f3476559 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -88,6 +88,7 @@ serde = [ "rand?/serde", "revm/serde", "smallvec/serde", + "reth-primitives-traits/serde", ] test-utils = [ "rand", diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 9d02276db85..67c36a65998 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -75,10 +75,7 @@ impl BlobStore for DiskFileBlobStore { } fn cleanup(&self) -> BlobStoreCleanupStat { - let txs_to_delete = { - let mut txs_to_delete = self.inner.txs_to_delete.write(); - std::mem::take(&mut *txs_to_delete) - }; + let txs_to_delete = std::mem::take(&mut *self.inner.txs_to_delete.write()); let mut stat = BlobStoreCleanupStat::default(); let mut subsize = 0; debug!(target:"txpool::blob", num_blobs=%txs_to_delete.len(), "Removing blobs from disk"); @@ -554,4 +551,136 @@ mod tests { assert_eq!(store.data_size_hint(), Some(0)); assert_eq!(store.inner.size_tracker.num_blobs.load(Ordering::Relaxed), 0); } + + #[test] + fn disk_insert_and_retrieve() { + let (store, _dir) = tmp_store(); + + let (tx, blob) = rng_blobs(1).into_iter().next().unwrap(); + store.insert(tx, blob.clone()).unwrap(); + + assert!(store.is_cached(&tx)); + let retrieved_blob = store.get(tx).unwrap().map(Arc::unwrap_or_clone).unwrap(); + assert_eq!(retrieved_blob, blob); + } + + #[test] + fn disk_delete_blob() { + let (store, _dir) = tmp_store(); + + let (tx, blob) = rng_blobs(1).into_iter().next().unwrap(); + store.insert(tx, blob).unwrap(); + assert!(store.is_cached(&tx)); + + store.delete(tx).unwrap(); + assert!(store.inner.txs_to_delete.read().contains(&tx)); + store.cleanup(); + + let result = store.get(tx).unwrap(); + assert_eq!( + result, + Some(Arc::new(BlobTransactionSidecar { + blobs: vec![], + commitments: vec![], + proofs: vec![] + })) + ); + } + + #[test] + fn disk_insert_all_and_delete_all() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(5); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + for (tx, _) in &blobs { + assert!(store.is_cached(tx)); + } + + store.delete_all(txs.clone()).unwrap(); + store.cleanup(); + + for tx in txs { + let result = store.get(tx).unwrap(); + assert_eq!( + result, + Some(Arc::new(BlobTransactionSidecar { + blobs: vec![], + commitments: vec![], + proofs: vec![] + })) + ); + } + } + + #[test] + fn disk_get_all_blobs() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + let retrieved_blobs = store.get_all(txs.clone()).unwrap(); + for (tx, blob) in retrieved_blobs { + assert!(blobs.contains(&(tx, Arc::unwrap_or_clone(blob)))); + } + + store.delete_all(txs).unwrap(); + store.cleanup(); + } + + #[test] + fn disk_get_exact_blobs_success() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + let retrieved_blobs = store.get_exact(txs).unwrap(); + for (retrieved_blob, (_, original_blob)) in retrieved_blobs.into_iter().zip(blobs) { + assert_eq!(Arc::unwrap_or_clone(retrieved_blob), original_blob); + } + } + + #[test] + fn disk_get_exact_blobs_failure() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(2); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs).unwrap(); + + // Try to get a blob that was never inserted + let missing_tx = TxHash::random(); + let result = store.get_exact(vec![txs[0], missing_tx]); + assert!(result.is_err()); + } + + #[test] + fn disk_data_size_hint() { + let (store, _dir) = tmp_store(); + assert_eq!(store.data_size_hint(), Some(0)); + + let blobs = rng_blobs(2); + store.insert_all(blobs).unwrap(); + assert!(store.data_size_hint().unwrap() > 0); + } + + #[test] + fn disk_cleanup_stat() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs).unwrap(); + + store.delete_all(txs).unwrap(); + let stat = store.cleanup(); + assert_eq!(stat.delete_succeed, 3); + assert_eq!(stat.delete_failed, 0); + } } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index f1612bcd022..a21cea6e06c 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -152,7 +152,7 @@ impl PartialEq for BlobStoreSize { } /// Statistics for the cleanup operation. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct BlobStoreCleanupStat { /// the number of successfully deleted blobs pub delete_succeed: usize, diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index f22dcf5706e..0f48c89a499 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -43,7 +43,7 @@ impl BlobStoreCanonTracker { .body .transactions() .filter(|tx| tx.transaction.is_eip4844()) - .map(|tx| tx.hash); + .map(|tx| tx.hash()); (*num, iter) }); self.add_blocks(blob_txs); @@ -82,6 +82,7 @@ pub enum BlobStoreUpdates { #[cfg(test)] mod tests { use alloy_consensus::Header; + use alloy_primitives::PrimitiveSignature as Signature; use reth_execution_types::Chain; use reth_primitives::{ BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, @@ -127,22 +128,22 @@ mod tests { ), body: BlockBody { transactions: vec![ - TransactionSigned { - hash: tx1_hash, - transaction: Transaction::Eip4844(Default::default()), - ..Default::default() - }, - TransactionSigned { - hash: tx2_hash, - transaction: Transaction::Eip4844(Default::default()), - ..Default::default() - }, + TransactionSigned::new( + Transaction::Eip4844(Default::default()), + Signature::test_signature(), + tx1_hash, + ), + TransactionSigned::new( + Transaction::Eip4844(Default::default()), + Signature::test_signature(), + tx2_hash, + ), // Another transaction that is not EIP-4844 - TransactionSigned { - hash: B256::random(), - transaction: Transaction::Eip7702(Default::default()), - ..Default::default() - }, + TransactionSigned::new( + Transaction::Eip7702(Default::default()), + Signature::test_signature(), + B256::random(), + ), ], ..Default::default() }, @@ -160,16 +161,16 @@ mod tests { ), body: BlockBody { transactions: vec![ - TransactionSigned { - hash: tx3_hash, - transaction: Transaction::Eip1559(Default::default()), - ..Default::default() - }, - TransactionSigned { - hash: tx2_hash, - transaction: Transaction::Eip2930(Default::default()), - ..Default::default() - }, + TransactionSigned::new( + Transaction::Eip1559(Default::default()), + Signature::test_signature(), + tx3_hash, + ), + TransactionSigned::new( + Transaction::Eip2930(Default::default()), + Signature::test_signature(), + tx2_hash, + ), ], ..Default::default() }, @@ -178,7 +179,7 @@ mod tests { }; // Extract blocks from the chain - let chain = Chain::new(vec![block1, block2], Default::default(), None); + let chain: Chain = Chain::new(vec![block1, block2], Default::default(), None); let blocks = chain.into_inner().0; // Add new chain blocks to the tracker diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 8d11d7595b1..3194ebba6f8 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -409,7 +409,7 @@ where &self, max: usize, ) -> Vec>> { - self.pooled_transactions().into_iter().take(max).collect() + self.pool.pooled_transactions_max(max) } fn get_pooled_transaction_elements( diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 271c63a388a..47e70e91433 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -317,7 +317,7 @@ pub async fn maintain_transaction_pool( // find all transactions that were mined in the old chain but not in the new chain let pruned_old_transactions = old_blocks .transactions_ecrecovered() - .filter(|tx| !new_mined_transactions.contains(&tx.hash)) + .filter(|tx| !new_mined_transactions.contains(tx.hash_ref())) .filter_map(|tx| { if tx.is_eip4844() { // reorged blobs no longer include the blob, which is necessary for @@ -325,7 +325,7 @@ pub async fn maintain_transaction_pool( // been validated previously, we still need the blob in order to // accurately set the transaction's // encoded-length which is propagated over the network. - pool.get_blob(tx.hash) + pool.get_blob(TransactionSigned::hash(&tx)) .ok() .flatten() .map(Arc::unwrap_or_clone) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 7c2e5a025b7..171faccf7c2 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -401,7 +401,7 @@ mod tests { use crate::{ pool::pending::PendingPool, test_utils::{MockOrdering, MockTransaction, MockTransactionFactory}, - Priority, + BestTransactions, Priority, }; use alloy_primitives::U256; use reth_payload_util::{PayloadTransactionsChain, PayloadTransactionsFixed}; @@ -897,5 +897,127 @@ mod tests { assert_eq!(block.next(()).unwrap().signer(), address_regular); } + #[test] + fn test_best_with_fees_iter_no_blob_fee_required() { + // Tests transactions without blob fees where base fees are checked. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 0; // No blob fee requirement + + // Insert transactions with max_fee_per_gas above the base fee + for nonce in 0..5 { + let tx = MockTransaction::eip1559() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 + 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // All transactions should be returned as no blob fee requirement is imposed + for nonce in 0..5 { + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.nonce(), nonce); + } + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_with_fees_iter_mix_of_blob_and_non_blob_transactions() { + // Tests mixed scenarios with both blob and non-blob transactions. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 15; + + // Add a non-blob transaction that satisfies the base fee + let tx_non_blob = + MockTransaction::eip1559().rng_hash().with_nonce(0).with_max_fee(base_fee as u128 + 5); + pool.add_transaction(Arc::new(f.validated(tx_non_blob.clone())), 0); + + // Add a blob transaction that satisfies both base fee and blob fee + let tx_blob = MockTransaction::eip4844() + .rng_hash() + .with_nonce(1) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 + 5); + pool.add_transaction(Arc::new(f.validated(tx_blob.clone())), 0); + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // Verify both transactions are returned + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_non_blob); + + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_blob); + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_transactions_with_skipping_blobs() { + // Tests the skip_blobs functionality to ensure blob transactions are skipped. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add a blob transaction + let tx_blob = MockTransaction::eip4844().rng_hash().with_nonce(0).with_blob_fee(100); + let valid_blob_tx = f.validated(tx_blob); + pool.add_transaction(Arc::new(valid_blob_tx), 0); + + // Add a non-blob transaction + let tx_non_blob = MockTransaction::eip1559().rng_hash().with_nonce(1).with_max_fee(200); + let valid_non_blob_tx = f.validated(tx_non_blob.clone()); + pool.add_transaction(Arc::new(valid_non_blob_tx), 0); + + let mut best = pool.best(); + best.skip_blobs(); + + // Only the non-blob transaction should be returned + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_non_blob); + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_transactions_no_updates() { + // Tests the no_updates functionality to ensure it properly clears the + // new_transaction_receiver. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add a transaction + let tx = MockTransaction::eip1559().rng_hash().with_nonce(0).with_max_fee(100); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + + let mut best = pool.best(); + + // Use a broadcast channel for transaction updates + let (_tx_sender, tx_receiver) = + tokio::sync::broadcast::channel::>(1000); + best.new_transaction_receiver = Some(tx_receiver); + + // Ensure receiver is set + assert!(best.new_transaction_receiver.is_some()); + + // Call no_updates to clear the receiver + best.no_updates(); + + // Ensure receiver is cleared + assert!(best.new_transaction_receiver.is_none()); + } + // TODO: Same nonce test } diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index ac39c6ab781..e6c0cb245c3 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -693,4 +693,102 @@ mod tests { ); } } + + #[test] + fn test_empty_pool_operations() { + let mut pool: BlobTransactions = BlobTransactions::default(); + + // Ensure pool is empty + assert!(pool.is_empty()); + assert_eq!(pool.len(), 0); + assert_eq!(pool.size(), 0); + + // Attempt to remove a non-existent transaction + let non_existent_id = TransactionId::new(0.into(), 0); + assert!(pool.remove_transaction(&non_existent_id).is_none()); + + // Check contains method on empty pool + assert!(!pool.contains(&non_existent_id)); + } + + #[test] + fn test_transaction_removal() { + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + + // Add a transaction + let tx = factory.validated_arc(MockTransaction::eip4844()); + let tx_id = *tx.id(); + pool.add_transaction(tx); + + // Remove the transaction + let removed = pool.remove_transaction(&tx_id); + assert!(removed.is_some()); + assert_eq!(*removed.unwrap().id(), tx_id); + assert!(pool.is_empty()); + } + + #[test] + fn test_satisfy_attributes_empty_pool() { + let pool: BlobTransactions = BlobTransactions::default(); + let attributes = BestTransactionsAttributes { blob_fee: Some(100), basefee: 100 }; + // Satisfy attributes on an empty pool should return an empty vector + let satisfied = pool.satisfy_attributes(attributes); + assert!(satisfied.is_empty()); + } + + #[test] + #[should_panic(expected = "transaction is not a blob tx")] + fn test_add_non_blob_transaction() { + // Ensure that adding a non-blob transaction causes a panic + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx = factory.validated_arc(MockTransaction::eip1559()); // Not a blob transaction + pool.add_transaction(tx); + } + + #[test] + #[should_panic(expected = "transaction already included")] + fn test_add_duplicate_blob_transaction() { + // Ensure that adding a duplicate blob transaction causes a panic + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx = factory.validated_arc(MockTransaction::eip4844()); + pool.add_transaction(tx.clone()); // First addition + pool.add_transaction(tx); // Attempt to add the same transaction again + } + + #[test] + fn test_remove_transactions_until_limit() { + // Test truncating the pool until it satisfies the given size limit + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx1 = factory.validated_arc(MockTransaction::eip4844().with_size(100)); + let tx2 = factory.validated_arc(MockTransaction::eip4844().with_size(200)); + let tx3 = factory.validated_arc(MockTransaction::eip4844().with_size(300)); + + // Add transactions to the pool + pool.add_transaction(tx1); + pool.add_transaction(tx2); + pool.add_transaction(tx3); + + // Set a size limit that requires truncation + let limit = SubPoolLimit { max_txs: 2, max_size: 300 }; + let removed = pool.truncate_pool(limit); + + // Check that only one transaction was removed to satisfy the limit + assert_eq!(removed.len(), 1); + assert_eq!(pool.len(), 2); + assert!(pool.size() <= limit.max_size); + } + + #[test] + fn test_empty_pool_invariants() { + // Ensure that the invariants hold for an empty pool + let pool: BlobTransactions = BlobTransactions::default(); + pool.assert_invariants(); + assert!(pool.is_empty()); + assert_eq!(pool.size(), 0); + assert_eq!(pool.len(), 0); + } } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 6441ed687f2..1a23bf3e07c 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -78,7 +78,8 @@ use crate::{ PoolTransaction, PropagatedTransactions, TransactionOrigin, }, validate::{TransactionValidationOutcome, ValidPoolTransaction}, - CanonicalStateUpdate, PoolConfig, TransactionOrdering, TransactionValidator, + CanonicalStateUpdate, EthPoolTransaction, PoolConfig, TransactionOrdering, + TransactionValidator, }; use alloy_primitives::{Address, TxHash, B256}; use best::BestTransactions; @@ -87,9 +88,7 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use alloy_eips::eip4844::BlobTransactionSidecar; -use reth_primitives::{ - BlobTransaction, PooledTransactionsElement, TransactionSigned, TransactionSignedEcRecovered, -}; +use reth_primitives::PooledTransactionsElement; use std::{ collections::{HashMap, HashSet}, fmt, @@ -304,18 +303,41 @@ where self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).collect() } - /// Returns the [`BlobTransaction`] for the given transaction if the sidecar exists. + /// Returns only the first `max` transactions in the pool. + pub(crate) fn pooled_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).take(max).collect() + } + + /// Converts the internally tracked transaction to the pooled format. /// - /// Caution: this assumes the given transaction is eip-4844 - fn get_blob_transaction(&self, transaction: TransactionSigned) -> Option { - if let Ok(Some(sidecar)) = self.blob_store.get(transaction.hash()) { - if let Ok(blob) = - BlobTransaction::try_from_signed(transaction, Arc::unwrap_or_clone(sidecar)) - { - return Some(blob) - } + /// If the transaction is an EIP-4844 transaction, the blob sidecar is fetched from the blob + /// store and attached to the transaction. + fn to_pooled_transaction( + &self, + transaction: Arc>, + ) -> Option<<::Transaction as PoolTransaction>::Pooled> + where + ::Transaction: EthPoolTransaction, + { + if transaction.is_eip4844() { + let sidecar = self.blob_store.get(*transaction.hash()).ok()??; + transaction.transaction.clone().try_into_pooled_eip4844(sidecar) + } else { + transaction + .transaction + .clone() + .try_into_pooled() + .inspect_err(|err| { + debug!( + target: "txpool", %err, + "failed to convert transaction to pooled element; skipping", + ); + }) + .ok() } - None } /// Returns converted [`PooledTransactionsElement`] for the given transaction hashes. @@ -325,39 +347,19 @@ where limit: GetPooledTransactionLimit, ) -> Vec where - ::Transaction: - PoolTransaction>, + ::Transaction: EthPoolTransaction, { let transactions = self.get_all(tx_hashes); let mut elements = Vec::with_capacity(transactions.len()); let mut size = 0; for transaction in transactions { let encoded_len = transaction.encoded_length(); - let recovered: TransactionSignedEcRecovered = - transaction.transaction.clone().into_consensus().into(); - let tx = recovered.into_signed(); - let pooled = if tx.is_eip4844() { - // for EIP-4844 transactions, we need to fetch the blob sidecar from the blob store - if let Some(blob) = self.get_blob_transaction(tx) { - PooledTransactionsElement::BlobTransaction(blob) - } else { - continue - } - } else { - match PooledTransactionsElement::try_from(tx) { - Ok(element) => element, - Err(err) => { - debug!( - target: "txpool", %err, - "failed to convert transaction to pooled element; skipping", - ); - continue - } - } + let Some(pooled) = self.to_pooled_transaction(transaction) else { + continue; }; size += encoded_len; - elements.push(pooled); + elements.push(pooled.into()); if limit.exceeds(size) { break @@ -373,19 +375,9 @@ where tx_hash: TxHash, ) -> Option where - ::Transaction: - PoolTransaction>, + ::Transaction: EthPoolTransaction, { - self.get(&tx_hash).and_then(|transaction| { - let recovered: TransactionSignedEcRecovered = - transaction.transaction.clone().into_consensus().into(); - let tx = recovered.into_signed(); - if tx.is_eip4844() { - self.get_blob_transaction(tx).map(PooledTransactionsElement::BlobTransaction) - } else { - PooledTransactionsElement::try_from(tx).ok() - } - }) + self.get(&tx_hash).and_then(|tx| self.to_pooled_transaction(tx).map(Into::into)) } /// Updates the entire pool after a new block was executed. @@ -1302,7 +1294,7 @@ mod tests { // Insert the sidecar into the blob store if the current index is within the blob limit. if n < blob_limit.max_txs { - blob_store.insert(tx.get_hash(), sidecar.clone()).unwrap(); + blob_store.insert(*tx.get_hash(), sidecar.clone()).unwrap(); } // Add the transaction to the pool with external origin and valid outcome. diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index f4bce8c85a6..89e673aad99 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -8,7 +8,7 @@ use crate::{ }; use std::{ cmp::Ordering, - collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap}, + collections::{hash_map::Entry, BTreeMap, HashMap}, ops::Bound::Unbounded, sync::Arc, }; @@ -34,8 +34,6 @@ pub struct PendingPool { submission_id: u64, /// _All_ Transactions that are currently inside the pool grouped by their identifier. by_id: BTreeMap>, - /// _All_ transactions sorted by priority - all: BTreeSet>, /// The highest nonce transactions for each sender - like the `independent` set, but the /// highest instead of lowest nonce. highest_nonces: HashMap>, @@ -61,7 +59,6 @@ impl PendingPool { ordering, submission_id: 0, by_id: Default::default(), - all: Default::default(), independent_transactions: Default::default(), highest_nonces: Default::default(), size_of: Default::default(), @@ -78,7 +75,6 @@ impl PendingPool { fn clear_transactions(&mut self) -> BTreeMap> { self.independent_transactions.clear(); self.highest_nonces.clear(); - self.all.clear(); self.size_of.reset(); std::mem::take(&mut self.by_id) } @@ -194,7 +190,6 @@ impl PendingPool { } else { self.size_of += tx.transaction.size(); self.update_independents_and_highest_nonces(&tx); - self.all.insert(tx.clone()); self.by_id.insert(id, tx); } } @@ -240,7 +235,6 @@ impl PendingPool { self.size_of += tx.transaction.size(); self.update_independents_and_highest_nonces(&tx); - self.all.insert(tx.clone()); self.by_id.insert(id, tx); } } @@ -307,7 +301,6 @@ impl PendingPool { let tx = PendingTransaction { submission_id, transaction: tx, priority }; self.update_independents_and_highest_nonces(&tx); - self.all.insert(tx.clone()); // send the new transaction to any existing pendingpool static file iterators if self.new_transaction_notifier.receiver_count() > 0 { @@ -337,7 +330,6 @@ impl PendingPool { let tx = self.by_id.remove(id)?; self.size_of -= tx.transaction.size(); - self.all.remove(&tx); if let Some(highest) = self.highest_nonces.get(&id.sender) { if highest.transaction.nonce() == id.nonce { @@ -538,13 +530,12 @@ impl PendingPool { /// Asserts that the bijection between `by_id` and `all` is valid. #[cfg(any(test, feature = "test-utils"))] pub(crate) fn assert_invariants(&self) { - assert_eq!(self.by_id.len(), self.all.len(), "by_id.len() != all.len()"); assert!( - self.independent_transactions.len() <= self.all.len(), + self.independent_transactions.len() <= self.by_id.len(), "independent.len() > all.len()" ); assert!( - self.highest_nonces.len() <= self.all.len(), + self.highest_nonces.len() <= self.by_id.len(), "independent_descendants.len() > all.len()" ); assert_eq!( @@ -880,4 +871,104 @@ mod tests { } } } + + #[test] + fn test_empty_pool_behavior() { + let mut pool = PendingPool::::new(MockOrdering::default()); + + // Ensure the pool is empty + assert!(pool.is_empty()); + assert_eq!(pool.len(), 0); + assert_eq!(pool.size(), 0); + + // Verify that attempting to truncate an empty pool does not panic and returns an empty vec + let removed = pool.truncate_pool(SubPoolLimit { max_txs: 10, max_size: 1000 }); + assert!(removed.is_empty()); + + // Verify that retrieving transactions from an empty pool yields nothing + let all_txs: Vec<_> = pool.all().collect(); + assert!(all_txs.is_empty()); + } + + #[test] + fn test_add_remove_transaction() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add a transaction and check if it's in the pool + let tx = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx.clone(), 0); + assert!(pool.contains(tx.id())); + assert_eq!(pool.len(), 1); + + // Remove the transaction and ensure it's no longer in the pool + let removed_tx = pool.remove_transaction(tx.id()).unwrap(); + assert_eq!(removed_tx.id(), tx.id()); + assert!(!pool.contains(tx.id())); + assert_eq!(pool.len(), 0); + } + + #[test] + fn test_reorder_on_basefee_update() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add two transactions with different fees + let tx1 = f.validated_arc(MockTransaction::eip1559().inc_price()); + let tx2 = f.validated_arc(MockTransaction::eip1559().inc_price_by(20)); + pool.add_transaction(tx1.clone(), 0); + pool.add_transaction(tx2.clone(), 0); + + // Ensure the transactions are in the correct order + let mut best = pool.best(); + assert_eq!(best.next().unwrap().hash(), tx2.hash()); + assert_eq!(best.next().unwrap().hash(), tx1.hash()); + + // Update the base fee to a value higher than tx1's fee, causing it to be removed + let removed = pool.update_base_fee((tx1.max_fee_per_gas() + 1) as u64); + assert_eq!(removed.len(), 1); + assert_eq!(removed[0].hash(), tx1.hash()); + + // Verify that only tx2 remains in the pool + assert_eq!(pool.len(), 1); + assert!(pool.contains(tx2.id())); + assert!(!pool.contains(tx1.id())); + } + + #[test] + #[should_panic(expected = "transaction already included")] + fn test_handle_duplicates() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add the same transaction twice and ensure it only appears once + let tx = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx.clone(), 0); + assert!(pool.contains(tx.id())); + assert_eq!(pool.len(), 1); + + // Attempt to add the same transaction again, which should be ignored + pool.add_transaction(tx, 0); + } + + #[test] + fn test_update_blob_fee() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add transactions with varying blob fees + let tx1 = f.validated_arc(MockTransaction::eip4844().set_blob_fee(50).clone()); + let tx2 = f.validated_arc(MockTransaction::eip4844().set_blob_fee(150).clone()); + pool.add_transaction(tx1.clone(), 0); + pool.add_transaction(tx2.clone(), 0); + + // Update the blob fee to a value that causes tx1 to be removed + let removed = pool.update_blob_fee(100); + assert_eq!(removed.len(), 1); + assert_eq!(removed[0].hash(), tx1.hash()); + + // Verify that only tx2 remains in the pool + assert!(pool.contains(tx2.id())); + assert!(!pool.contains(tx1.id())); + } } diff --git a/crates/transaction-pool/src/pool/state.rs b/crates/transaction-pool/src/pool/state.rs index d0a3b10f8cb..d65fc05b03f 100644 --- a/crates/transaction-pool/src/pool/state.rs +++ b/crates/transaction-pool/src/pool/state.rs @@ -46,8 +46,6 @@ bitflags::bitflags! { } } -// === impl TxState === - impl TxState { /// The state of a transaction is considered `pending`, if the transaction has: /// - _No_ parked ancestors @@ -89,8 +87,6 @@ pub enum SubPool { Pending, } -// === impl SubPool === - impl SubPool { /// Whether this transaction is to be moved to the pending sub-pool. #[inline] @@ -126,16 +122,15 @@ impl SubPool { impl From for SubPool { fn from(value: TxState) -> Self { if value.is_pending() { - return Self::Pending - } - if value.is_blob() { + Self::Pending + } else if value.is_blob() { // all _non-pending_ blob transactions are in the blob sub-pool - return Self::Blob + Self::Blob + } else if value.bits() < TxState::BASE_FEE_POOL_BITS.bits() { + Self::Queued + } else { + Self::BaseFee } - if value.bits() < TxState::BASE_FEE_POOL_BITS.bits() { - return Self::Queued - } - Self::BaseFee } } @@ -204,4 +199,61 @@ mod tests { assert!(state.is_blob()); assert!(!state.is_pending()); } + + #[test] + fn test_tx_state_no_nonce_gap() { + let mut state = TxState::default(); + state |= TxState::NO_NONCE_GAPS; + assert!(!state.has_nonce_gap()); + } + + #[test] + fn test_tx_state_with_nonce_gap() { + let state = TxState::default(); + assert!(state.has_nonce_gap()); + } + + #[test] + fn test_tx_state_enough_balance() { + let mut state = TxState::default(); + state.insert(TxState::ENOUGH_BALANCE); + assert!(state.contains(TxState::ENOUGH_BALANCE)); + } + + #[test] + fn test_tx_state_not_too_much_gas() { + let mut state = TxState::default(); + state.insert(TxState::NOT_TOO_MUCH_GAS); + assert!(state.contains(TxState::NOT_TOO_MUCH_GAS)); + } + + #[test] + fn test_tx_state_enough_fee_cap_block() { + let mut state = TxState::default(); + state.insert(TxState::ENOUGH_FEE_CAP_BLOCK); + assert!(state.contains(TxState::ENOUGH_FEE_CAP_BLOCK)); + } + + #[test] + fn test_tx_base_fee() { + let state = TxState::BASE_FEE_POOL_BITS; + assert_eq!(SubPool::BaseFee, state.into()); + } + + #[test] + fn test_blob_transaction_only() { + let state = TxState::BLOB_TRANSACTION; + assert_eq!(SubPool::Blob, state.into()); + assert!(state.is_blob()); + assert!(!state.is_pending()); + } + + #[test] + fn test_blob_transaction_with_base_fee_bits() { + let mut state = TxState::BASE_FEE_POOL_BITS; + state.insert(TxState::BLOB_TRANSACTION); + assert_eq!(SubPool::Blob, state.into()); + assert!(state.is_blob()); + assert!(!state.is_pending()); + } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 040deb15fcb..86bf5f741c3 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -657,7 +657,7 @@ impl TxPool { InsertErr::Overdraft { transaction } => Err(PoolError::new( *transaction.hash(), PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Overdraft { - cost: transaction.cost(), + cost: *transaction.cost(), balance: on_chain_balance, }), )), @@ -1229,7 +1229,7 @@ impl AllTransactions { tx.state.insert(TxState::NO_NONCE_GAPS); tx.state.insert(TxState::NO_PARKED_ANCESTORS); tx.cumulative_cost = U256::ZERO; - if tx.transaction.cost() > info.balance { + if tx.transaction.cost() > &info.balance { // sender lacks sufficient funds to pay for this transaction tx.state.remove(TxState::ENOUGH_BALANCE); } else { @@ -1328,7 +1328,7 @@ impl AllTransactions { id: *tx.transaction.id(), hash: *tx.transaction.hash(), current: current_pool, - destination: Destination::Pool(tx.subpool), + destination: tx.subpool.into(), }) } } @@ -1542,7 +1542,7 @@ impl AllTransactions { } } } - } else if new_blob_tx.cost() > on_chain_balance { + } else if new_blob_tx.cost() > &on_chain_balance { // the transaction would go into overdraft return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) }) } @@ -1738,7 +1738,7 @@ impl AllTransactions { id: *id, hash: *tx.transaction.hash(), current: current_pool, - destination: Destination::Pool(tx.subpool), + destination: tx.subpool.into(), }) } } @@ -2486,8 +2486,7 @@ mod tests { let tx = MockTransaction::eip1559().inc_price().inc_limit(); let first = f.validated(tx.clone()); pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap(); - let tx = - MockTransaction::eip4844().set_sender(tx.get_sender()).inc_price_by(100).inc_limit(); + let tx = MockTransaction::eip4844().set_sender(tx.sender()).inc_price_by(100).inc_limit(); let blob = f.validated(tx); let err = pool.insert_tx(blob, on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{err:?}"); @@ -2502,8 +2501,7 @@ mod tests { let tx = MockTransaction::eip4844().inc_price().inc_limit(); let first = f.validated(tx.clone()); pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap(); - let tx = - MockTransaction::eip1559().set_sender(tx.get_sender()).inc_price_by(100).inc_limit(); + let tx = MockTransaction::eip1559().set_sender(tx.sender()).inc_price_by(100).inc_limit(); let tx = f.validated(tx); let err = pool.insert_tx(tx, on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{err:?}"); @@ -2622,7 +2620,7 @@ mod tests { assert_eq!( pool.max_account_slots, - pool.tx_count(f.ids.sender_id(&tx.get_sender()).unwrap()) + pool.tx_count(f.ids.sender_id(tx.get_sender()).unwrap()) ); let err = @@ -2654,7 +2652,7 @@ mod tests { assert_eq!( pool.max_account_slots, - pool.tx_count(f.ids.sender_id(&tx.get_sender()).unwrap()) + pool.tx_count(f.ids.sender_id(tx.get_sender()).unwrap()) ); pool.insert_tx( @@ -2829,7 +2827,7 @@ mod tests { let mut changed_senders = HashMap::default(); changed_senders.insert( id.sender, - SenderInfo { state_nonce: next.get_nonce(), balance: U256::from(1_000) }, + SenderInfo { state_nonce: next.nonce(), balance: U256::from(1_000) }, ); let outcome = pool.update_accounts(changed_senders); assert_eq!(outcome.discarded.len(), 1); diff --git a/crates/transaction-pool/src/pool/update.rs b/crates/transaction-pool/src/pool/update.rs index a5cce8291fa..d62b1792e7b 100644 --- a/crates/transaction-pool/src/pool/update.rs +++ b/crates/transaction-pool/src/pool/update.rs @@ -26,3 +26,9 @@ pub(crate) enum Destination { /// Move transaction to pool Pool(SubPool), } + +impl From for Destination { + fn from(sub_pool: SubPool) -> Self { + Self::Pool(sub_pool) + } +} diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 858098ec91a..95a179aec81 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -199,7 +199,7 @@ impl TransactionBuilder { /// Signs the provided transaction using the specified signer and returns a signed transaction. fn signed(transaction: Transaction, signer: B256) -> TransactionSigned { let signature = sign_message(signer, transaction.signature_hash()).unwrap(); - TransactionSigned::from_transaction_and_signature(transaction, signature) + TransactionSigned::new_unhashed(transaction, signature) } /// Sets the signer for the transaction builder. diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index fc43349f3f1..afa1638c851 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -28,6 +28,7 @@ use reth_primitives::{ transaction::TryFromRecoveredTransactionError, PooledTransactionsElementEcRecovered, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, }; +use reth_primitives_traits::InMemorySize; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; /// A transaction pool implementation using [`MockOrdering`] for transaction ordering. @@ -58,6 +59,8 @@ macro_rules! set_value { *$field = new_value; } } + // Ensure the tx cost is always correct after each mutation. + $this.update_cost(); }; } @@ -68,7 +71,7 @@ macro_rules! get_value { MockTransaction::Legacy { $field, .. } | MockTransaction::Eip1559 { $field, .. } | MockTransaction::Eip4844 { $field, .. } | - MockTransaction::Eip2930 { $field, .. } => $field.clone(), + MockTransaction::Eip2930 { $field, .. } => $field, } }; } @@ -90,7 +93,7 @@ macro_rules! make_setters_getters { } /// Gets the value of the specified field. - pub fn [](&self) -> $t { + pub const fn [](&self) -> &$t { get_value!(self => $name) } )*} @@ -122,6 +125,8 @@ pub enum MockTransaction { input: Bytes, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-2930 transaction type. Eip2930 { @@ -147,6 +152,8 @@ pub enum MockTransaction { access_list: AccessList, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-1559 transaction type. Eip1559 { @@ -174,6 +181,8 @@ pub enum MockTransaction { input: Bytes, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-4844 transaction type. Eip4844 { @@ -205,6 +214,8 @@ pub enum MockTransaction { sidecar: BlobTransactionSidecar, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, } @@ -234,6 +245,7 @@ impl MockTransaction { value: Default::default(), input: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -251,6 +263,7 @@ impl MockTransaction { gas_price: 0, access_list: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -269,6 +282,7 @@ impl MockTransaction { input: Bytes::new(), access_list: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -289,6 +303,7 @@ impl MockTransaction { access_list: Default::default(), sidecar: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -559,6 +574,19 @@ impl MockTransaction { pub const fn is_eip2930(&self) -> bool { matches!(self, Self::Eip2930 { .. }) } + + fn update_cost(&mut self) { + match self { + Self::Legacy { cost, gas_limit, gas_price, value, .. } | + Self::Eip2930 { cost, gas_limit, gas_price, value, .. } => { + *cost = U256::from(*gas_limit) * U256::from(*gas_price) + *value + } + Self::Eip1559 { cost, gas_limit, max_fee_per_gas, value, .. } | + Self::Eip4844 { cost, gas_limit, max_fee_per_gas, value, .. } => { + *cost = U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value + } + }; + } } impl PoolTransaction for MockTransaction { @@ -580,48 +608,39 @@ impl PoolTransaction for MockTransaction { pooled.into() } + fn try_consensus_into_pooled( + tx: Self::Consensus, + ) -> Result { + Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) + } + fn hash(&self) -> &TxHash { - match self { - Self::Legacy { hash, .. } | - Self::Eip1559 { hash, .. } | - Self::Eip4844 { hash, .. } | - Self::Eip2930 { hash, .. } => hash, - } + self.get_hash() } fn sender(&self) -> Address { - match self { - Self::Legacy { sender, .. } | - Self::Eip1559 { sender, .. } | - Self::Eip4844 { sender, .. } | - Self::Eip2930 { sender, .. } => *sender, - } + *self.get_sender() } fn nonce(&self) -> u64 { - match self { - Self::Legacy { nonce, .. } | - Self::Eip1559 { nonce, .. } | - Self::Eip4844 { nonce, .. } | - Self::Eip2930 { nonce, .. } => *nonce, - } + *self.get_nonce() } - fn cost(&self) -> U256 { + // Having `get_cost` from `make_setters_getters` would be cleaner but we didn't + // want to also generate the error-prone cost setters. For now cost should be + // correct at construction and auto-updated per field update via `update_cost`, + // not to be manually set. + fn cost(&self) -> &U256 { match self { - Self::Legacy { gas_price, value, gas_limit, .. } | - Self::Eip2930 { gas_limit, gas_price, value, .. } => { - U256::from(*gas_limit) * U256::from(*gas_price) + *value - } - Self::Eip1559 { max_fee_per_gas, value, gas_limit, .. } | - Self::Eip4844 { max_fee_per_gas, value, gas_limit, .. } => { - U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value - } + Self::Legacy { cost, .. } | + Self::Eip2930 { cost, .. } | + Self::Eip1559 { cost, .. } | + Self::Eip4844 { cost, .. } => cost, } } fn gas_limit(&self) -> u64 { - self.get_gas_limit() + *self.get_gas_limit() } fn max_fee_per_gas(&self) -> u128 { @@ -702,22 +721,12 @@ impl PoolTransaction for MockTransaction { /// Returns the input data associated with the transaction. fn input(&self) -> &[u8] { - match self { - Self::Legacy { .. } => &[], - Self::Eip1559 { input, .. } | - Self::Eip4844 { input, .. } | - Self::Eip2930 { input, .. } => input, - } + self.get_input() } /// Returns the size of the transaction. fn size(&self) -> usize { - match self { - Self::Legacy { size, .. } | - Self::Eip1559 { size, .. } | - Self::Eip4844 { size, .. } | - Self::Eip2930 { size, .. } => *size, - } + *self.get_size() } /// Returns the transaction type as a byte identifier. @@ -761,6 +770,14 @@ impl EthPoolTransaction for MockTransaction { } } + fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option { + Self::Pooled::try_from_blob_transaction( + self.into_consensus(), + Arc::unwrap_or_clone(sidecar), + ) + .ok() + } + fn validate_blob( &self, _blob: &BlobTransactionSidecar, @@ -807,6 +824,7 @@ impl TryFrom for MockTransaction { value, input, size, + cost: U256::from(gas_limit) * U256::from(gas_price) + value, }), Transaction::Eip2930(TxEip2930 { chain_id, @@ -829,6 +847,7 @@ impl TryFrom for MockTransaction { input, access_list, size, + cost: U256::from(gas_limit) * U256::from(gas_price) + value, }), Transaction::Eip1559(TxEip1559 { chain_id, @@ -853,6 +872,7 @@ impl TryFrom for MockTransaction { input, access_list, size, + cost: U256::from(gas_limit) * U256::from(max_fee_per_gas) + value, }), Transaction::Eip4844(TxEip4844 { chain_id, @@ -881,6 +901,7 @@ impl TryFrom for MockTransaction { access_list, sidecar: BlobTransactionSidecar::default(), size, + cost: U256::from(gas_limit) * U256::from(max_fee_per_gas) + value, }), _ => unreachable!("Invalid transaction type"), } @@ -897,11 +918,8 @@ impl From for MockTransaction { impl From for TransactionSignedEcRecovered { fn from(tx: MockTransaction) -> Self { - let signed_tx = TransactionSigned { - hash: *tx.hash(), - signature: Signature::test_signature(), - transaction: tx.clone().into(), - }; + let signed_tx = + TransactionSigned::new(tx.clone().into(), Signature::test_signature(), *tx.hash()); Self::from_signed_transaction(signed_tx, tx.sender()) } @@ -912,28 +930,24 @@ impl From for Transaction { match mock { MockTransaction::Legacy { chain_id, - hash: _, - sender: _, nonce, gas_price, gas_limit, to, value, input, - size: _, + .. } => Self::Legacy(TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input }), MockTransaction::Eip2930 { chain_id, - hash: _, - sender: _, nonce, - to, + gas_price, gas_limit, - input, + to, value, - gas_price, access_list, - size: _, + input, + .. } => Self::Eip2930(TxEip2930 { chain_id, nonce, @@ -946,17 +960,15 @@ impl From for Transaction { }), MockTransaction::Eip1559 { chain_id, - hash: _, - sender: _, nonce, + gas_limit, max_fee_per_gas, max_priority_fee_per_gas, - gas_limit, to, value, access_list, input, - size: _, + .. } => Self::Eip1559(TxEip1559 { chain_id, nonce, @@ -970,19 +982,17 @@ impl From for Transaction { }), MockTransaction::Eip4844 { chain_id, - hash: _, - sender: _, nonce, + gas_limit, max_fee_per_gas, max_priority_fee_per_gas, - max_fee_per_blob_gas, - gas_limit, to, value, access_list, - input, sidecar, - size: _, + max_fee_per_blob_gas, + input, + .. } => Self::Eip4844(TxEip4844 { chain_id, nonce, @@ -1006,109 +1016,14 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { use proptest::prelude::Strategy; use proptest_arbitrary_interop::arb; - use reth_primitives_traits::size::InMemorySize; - - arb::<(Transaction, Address, B256)>() - .prop_map(|(tx, sender, tx_hash)| match &tx { - Transaction::Legacy(TxLegacy { - chain_id, - nonce, - gas_price, - gas_limit, - to, - value, - input, - }) => Self::Legacy { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - gas_price: *gas_price, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - size: tx.size(), - }, - - Transaction::Eip2930(TxEip2930 { - chain_id, - nonce, - gas_price, - gas_limit, - to, - value, - access_list, - input, - }) => Self::Eip2930 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - gas_price: *gas_price, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - size: tx.size(), - }, - Transaction::Eip1559(TxEip1559 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - input, - access_list, - }) => Self::Eip1559 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - max_fee_per_gas: *max_fee_per_gas, - max_priority_fee_per_gas: *max_priority_fee_per_gas, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - size: tx.size(), - }, - Transaction::Eip4844(TxEip4844 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - input, - max_fee_per_blob_gas, - access_list, - blob_versioned_hashes: _, - }) => Self::Eip4844 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - max_fee_per_gas: *max_fee_per_gas, - max_priority_fee_per_gas: *max_priority_fee_per_gas, - max_fee_per_blob_gas: *max_fee_per_blob_gas, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - // only generate a sidecar if it is a 4844 tx - also for the sake of - // performance just use a default sidecar - sidecar: BlobTransactionSidecar::default(), - size: tx.size(), - }, - #[allow(unreachable_patterns)] - _ => unimplemented!(), + + arb::<(TransactionSigned, Address)>() + .prop_map(|(signed_transaction, signer)| { + TransactionSignedEcRecovered::from_signed_transaction(signed_transaction, signer) + .try_into() + .expect( + "Failed to create an Arbitrary MockTransaction via TransactionSignedEcRecovered", + ) }) .boxed() } @@ -1127,8 +1042,8 @@ pub struct MockTransactionFactory { impl MockTransactionFactory { /// Generates a transaction ID for the given [`MockTransaction`]. pub fn tx_id(&mut self, tx: &MockTransaction) -> TransactionId { - let sender = self.ids.sender_id_or_create(tx.get_sender()); - TransactionId::new(sender, tx.get_nonce()) + let sender = self.ids.sender_id_or_create(tx.sender()); + TransactionId::new(sender, tx.nonce()) } /// Validates a [`MockTransaction`] and returns a [`MockValidTx`]. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 6c247a84cdb..9d19105b5da 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -20,7 +20,8 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, + PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSigned, + TransactionSignedEcRecovered, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -551,6 +552,11 @@ impl AllPoolTransactions { pub fn queued_recovered(&self) -> impl Iterator + '_ { self.queued.iter().map(|tx| tx.transaction.clone().into()) } + + /// Returns an iterator over all transactions, both pending and queued. + pub fn all(&self) -> impl Iterator + '_ { + self.pending.iter().chain(self.queued.iter()).map(|tx| tx.transaction.clone().into()) + } } impl Default for AllPoolTransactions { @@ -915,10 +921,18 @@ impl BestTransactionsAttributes { } } -/// Trait for transaction types used inside the pool +/// Trait for transaction types used inside the pool. +/// +/// This supports two transaction formats +/// - Consensus format: the form the transaction takes when it is included in a block. +/// - Pooled format: the form the transaction takes when it is gossiping around the network. +/// +/// This distinction is necessary for the EIP-4844 blob transactions, which require an additional +/// sidecar when they are gossiped around the network. It is expected that the `Consensus` format is +/// a subset of the `Pooled` format. pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Associated error type for the `try_from_consensus` method. - type TryFromConsensusError; + type TryFromConsensusError: fmt::Display; /// Associated type representing the raw consensus variant of the transaction. type Consensus: From + TryInto; @@ -941,6 +955,16 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { pooled.into() } + /// Tries to convert the `Consensus` type into the `Pooled` type. + fn try_into_pooled(self) -> Result { + Self::try_consensus_into_pooled(self.into_consensus()) + } + + /// Tries to convert the `Consensus` type into the `Pooled` type. + fn try_consensus_into_pooled( + tx: Self::Consensus, + ) -> Result; + /// Hash of the transaction. fn hash(&self) -> &TxHash; @@ -956,7 +980,7 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// For legacy transactions: `gas_price * gas_limit + tx_value`. /// For EIP-4844 blob transactions: `max_fee_per_gas * gas_limit + tx_value + /// max_blob_fee_per_gas * blob_gas_used`. - fn cost(&self) -> U256; + fn cost(&self) -> &U256; /// Amount of gas that should be used in executing this transaction. This is paid up-front. fn gas_limit(&self) -> u64; @@ -1055,11 +1079,19 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { } } -/// Super trait for transactions that can be converted to and from Eth transactions +/// Super trait for transactions that can be converted to and from Eth transactions intended for the +/// ethereum style pool. +/// +/// This extends the [`PoolTransaction`] trait with additional methods that are specific to the +/// Ethereum pool. pub trait EthPoolTransaction: PoolTransaction< - Consensus: From + Into, - Pooled: From + Into, + Consensus: From + + Into + + Into, + Pooled: From + + Into + + Into, > { /// Extracts the blob sidecar from the transaction. @@ -1068,6 +1100,13 @@ pub trait EthPoolTransaction: /// Returns the number of blobs this transaction has. fn blob_count(&self) -> usize; + /// A specialization for the EIP-4844 transaction type. + /// Tries to reattach the blob sidecar to the transaction. + /// + /// This returns an option, but callers should ensure that the transaction is an EIP-4844 + /// transaction: [`PoolTransaction::is_eip4844`]. + fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option; + /// Validates the blob sidecar of the transaction with the given settings. fn validate_blob( &self, @@ -1084,9 +1123,9 @@ pub trait EthPoolTransaction: /// This type is essentially a wrapper around [`TransactionSignedEcRecovered`] with additional /// fields derived from the transaction that are frequently used by the pools for ordering. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct EthPooledTransaction { - /// `EcRecovered` transaction info - pub(crate) transaction: TransactionSignedEcRecovered, +pub struct EthPooledTransaction { + /// `EcRecovered` transaction, the consensus format. + pub(crate) transaction: T, /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. @@ -1102,30 +1141,6 @@ pub struct EthPooledTransaction { pub(crate) blob_sidecar: EthBlobTransactionSidecar, } -/// Represents the blob sidecar of the [`EthPooledTransaction`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum EthBlobTransactionSidecar { - /// This transaction does not have a blob sidecar - None, - /// This transaction has a blob sidecar (EIP-4844) but it is missing - /// - /// It was either extracted after being inserted into the pool or re-injected after reorg - /// without the blob sidecar - Missing, - /// The eip-4844 transaction was pulled from the network and still has its blob sidecar - Present(BlobTransactionSidecar), -} - -impl EthBlobTransactionSidecar { - /// Returns the blob sidecar if it is present - pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { - match self { - Self::Present(sidecar) => Some(sidecar), - _ => None, - } - } -} - impl EthPooledTransaction { /// Create new instance of [Self]. /// @@ -1134,34 +1149,20 @@ impl EthPooledTransaction { pub fn new(transaction: TransactionSignedEcRecovered, encoded_length: usize) -> Self { let mut blob_sidecar = EthBlobTransactionSidecar::None; - #[allow(unreachable_patterns)] - let gas_cost = match &transaction.transaction { - Transaction::Legacy(t) => { - U256::from(t.gas_price).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip2930(t) => { - U256::from(t.gas_price).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip1559(t) => { - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip4844(t) => { - blob_sidecar = EthBlobTransactionSidecar::Missing; - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip7702(t) => { - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - _ => U256::ZERO, - }; - let mut cost = transaction.value(); - cost = cost.saturating_add(gas_cost); + let gas_cost = U256::from(transaction.transaction.max_fee_per_gas()) + .saturating_mul(U256::from(transaction.transaction.gas_limit())); + + let mut cost = gas_cost.saturating_add(transaction.value()); if let Some(blob_tx) = transaction.as_eip4844() { // Add max blob cost using saturating math to avoid overflow cost = cost.saturating_add(U256::from( blob_tx.max_fee_per_blob_gas.saturating_mul(blob_tx.blob_gas() as u128), )); + + // because the blob sidecar is not included in this transaction variant, mark it as + // missing + blob_sidecar = EthBlobTransactionSidecar::Missing; } Self { transaction, cost, encoded_length, blob_sidecar } @@ -1202,6 +1203,12 @@ impl PoolTransaction for EthPooledTransaction { type Pooled = PooledTransactionsElementEcRecovered; + fn try_consensus_into_pooled( + tx: Self::Consensus, + ) -> Result { + Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) + } + /// Returns hash of the transaction. fn hash(&self) -> &TxHash { self.transaction.hash_ref() @@ -1223,8 +1230,8 @@ impl PoolTransaction for EthPooledTransaction { /// For legacy transactions: `gas_price * gas_limit + tx_value`. /// For EIP-4844 blob transactions: `max_fee_per_gas * gas_limit + tx_value + /// max_blob_fee_per_gas * blob_gas_used`. - fn cost(&self) -> U256 { - self.cost + fn cost(&self) -> &U256 { + &self.cost } /// Amount of gas that should be used in executing this transaction. This is paid up-front. @@ -1238,15 +1245,7 @@ impl PoolTransaction for EthPooledTransaction { /// /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). fn max_fee_per_gas(&self) -> u128 { - #[allow(unreachable_patterns)] - match &self.transaction.transaction { - Transaction::Legacy(tx) => tx.gas_price, - Transaction::Eip2930(tx) => tx.gas_price, - Transaction::Eip1559(tx) => tx.max_fee_per_gas, - Transaction::Eip4844(tx) => tx.max_fee_per_gas, - Transaction::Eip7702(tx) => tx.max_fee_per_gas, - _ => 0, - } + self.transaction.transaction.max_fee_per_gas() } fn access_list(&self) -> Option<&AccessList> { @@ -1257,14 +1256,7 @@ impl PoolTransaction for EthPooledTransaction { /// /// This will return `None` for non-EIP1559 transactions fn max_priority_fee_per_gas(&self) -> Option { - #[allow(unreachable_patterns, clippy::match_same_arms)] - match &self.transaction.transaction { - Transaction::Legacy(_) | Transaction::Eip2930(_) => None, - Transaction::Eip1559(tx) => Some(tx.max_priority_fee_per_gas), - Transaction::Eip4844(tx) => Some(tx.max_priority_fee_per_gas), - Transaction::Eip7702(tx) => Some(tx.max_priority_fee_per_gas), - _ => None, - } + self.transaction.transaction.max_priority_fee_per_gas() } fn max_fee_per_blob_gas(&self) -> Option { @@ -1332,6 +1324,14 @@ impl EthPoolTransaction for EthPooledTransaction { } } + fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option { + PooledTransactionsElementEcRecovered::try_from_blob_transaction( + self.into_consensus(), + Arc::unwrap_or_clone(sidecar), + ) + .ok() + } + fn validate_blob( &self, sidecar: &BlobTransactionSidecar, @@ -1384,6 +1384,30 @@ impl From for TransactionSignedEcRecovered { } } +/// Represents the blob sidecar of the [`EthPooledTransaction`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum EthBlobTransactionSidecar { + /// This transaction does not have a blob sidecar + None, + /// This transaction has a blob sidecar (EIP-4844) but it is missing + /// + /// It was either extracted after being inserted into the pool or re-injected after reorg + /// without the blob sidecar + Missing, + /// The eip-4844 transaction was pulled from the network and still has its blob sidecar + Present(BlobTransactionSidecar), +} + +impl EthBlobTransactionSidecar { + /// Returns the blob sidecar if it is present + pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { + match self { + Self::Present(sidecar) => Some(sidecar), + _ => None, + } + } +} + /// Represents the current status of the pool. #[derive(Debug, Clone, Copy, Default)] pub struct PoolSize { @@ -1565,7 +1589,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); @@ -1587,7 +1611,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); @@ -1609,7 +1633,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); @@ -1633,7 +1657,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 300); @@ -1657,7 +1681,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index d5f7101eb55..ca745222575 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -107,6 +107,19 @@ where } /// A [`TransactionValidator`] implementation that validates ethereum transaction. +/// +/// It supports all known ethereum transaction types: +/// - Legacy +/// - EIP-2718 +/// - EIP-1559 +/// - EIP-4844 +/// - EIP-7702 +/// +/// And enforces additional constraints such as: +/// - Maximum transaction size +/// - Maximum gas limit +/// +/// And adheres to the configured [`LocalTransactionConfig`]. #[derive(Debug)] pub(crate) struct EthTransactionValidatorInner { /// Spec of the chain @@ -384,11 +397,12 @@ where let cost = transaction.cost(); // Checks for max cost - if cost > account.balance { + if cost > &account.balance { + let expected = *cost; return TransactionValidationOutcome::Invalid( transaction, InvalidTransactionError::InsufficientFunds( - GotExpected { got: account.balance, expected: cost }.into(), + GotExpected { got: account.balance, expected }.into(), ) .into(), ) diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 8a5ecc9c419..35e3a85537e 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -312,7 +312,7 @@ impl ValidPoolTransaction { /// /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. - pub fn cost(&self) -> U256 { + pub fn cost(&self) -> &U256 { self.transaction.cost() } diff --git a/crates/transaction-pool/tests/it/blobs.rs b/crates/transaction-pool/tests/it/blobs.rs index 0cdc6d088c0..9417c62278b 100644 --- a/crates/transaction-pool/tests/it/blobs.rs +++ b/crates/transaction-pool/tests/it/blobs.rs @@ -3,7 +3,7 @@ use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{MockTransaction, MockTransactionFactory, TestPoolBuilder}, - TransactionOrigin, TransactionPool, + PoolTransaction, TransactionOrigin, TransactionPool, }; #[tokio::test(flavor = "multi_thread")] @@ -16,23 +16,22 @@ async fn blobs_exclusive() { .add_transaction(TransactionOrigin::External, blob_tx.transaction.clone()) .await .unwrap(); - assert_eq!(hash, blob_tx.transaction.get_hash()); + assert_eq!(hash, *blob_tx.transaction.get_hash()); let mut best_txns = txpool.best_transactions(); assert_eq!(best_txns.next().unwrap().transaction.get_hash(), blob_tx.transaction.get_hash()); assert!(best_txns.next().is_none()); - let eip1559_tx = MockTransaction::eip1559() - .set_sender(blob_tx.transaction.get_sender()) - .inc_price_by(10_000); + let eip1559_tx = + MockTransaction::eip1559().set_sender(blob_tx.transaction.sender()).inc_price_by(10_000); let res = txpool.add_transaction(TransactionOrigin::External, eip1559_tx.clone()).await.unwrap_err(); - assert_eq!(res.hash, eip1559_tx.get_hash()); + assert_eq!(res.hash, *eip1559_tx.get_hash()); match res.kind { PoolErrorKind::ExistingConflictingTransactionType(addr, tx_type) => { - assert_eq!(addr, eip1559_tx.get_sender()); + assert_eq!(addr, eip1559_tx.sender()); assert_eq!(tx_type, eip1559_tx.tx_type()); } _ => unreachable!(), diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index fea50962fd9..3b74b8cb230 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -8,7 +8,8 @@ use reth_transaction_pool::{ test_utils::{ MockFeeRange, MockTransactionDistribution, MockTransactionRatio, TestPool, TestPoolBuilder, }, - BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionPool, TransactionPoolExt, + BlockInfo, PoolConfig, PoolTransaction, SubPoolLimit, TransactionOrigin, TransactionPool, + TransactionPoolExt, }; #[tokio::test(flavor = "multi_thread")] @@ -87,7 +88,7 @@ async fn only_blobs_eviction() { let set = set.into_vec(); // ensure that the first nonce is 0 - assert_eq!(set[0].get_nonce(), 0); + assert_eq!(set[0].nonce(), 0); // and finally insert it into the pool let results = pool.add_transactions(TransactionOrigin::External, set).await; @@ -194,7 +195,7 @@ async fn mixed_eviction() { ); let set = set.into_inner().into_vec(); - assert_eq!(set[0].get_nonce(), 0); + assert_eq!(set[0].nonce(), 0); let results = pool.add_transactions(TransactionOrigin::External, set).await; for (i, result) in results.iter().enumerate() { diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs index ad13af22a6a..0f8a0b19e2b 100644 --- a/crates/transaction-pool/tests/it/listeners.rs +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -33,11 +33,11 @@ async fn txpool_listener_all() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); assert_matches!( all_tx_events.next().await, - Some(FullTransactionEvent::Pending(hash)) if hash == transaction.transaction.get_hash() + Some(FullTransactionEvent::Pending(hash)) if hash == *transaction.transaction.get_hash() ); } diff --git a/crates/transaction-pool/tests/it/pending.rs b/crates/transaction-pool/tests/it/pending.rs index 0b6349b24cc..be559c71eec 100644 --- a/crates/transaction-pool/tests/it/pending.rs +++ b/crates/transaction-pool/tests/it/pending.rs @@ -12,7 +12,7 @@ async fn txpool_new_pending_txs() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); let mut best_txns = txpool.best_transactions(); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); @@ -20,6 +20,6 @@ async fn txpool_new_pending_txs() { let transaction = mock_tx_factory.create_eip1559(); let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); } diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index c0998764e40..0a3893beb20 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -12,7 +12,7 @@ description = "Commonly used types for trie usage in reth." workspace = true [dependencies] -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde"] } reth-codecs.workspace = true alloy-primitives.workspace = true @@ -36,6 +36,7 @@ plain_hasher = { version = "0.2", optional = true } arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] +alloy-primitives = { workspace = true, features = ["getrandom"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true diff --git a/crates/trie/common/src/key.rs b/crates/trie/common/src/key.rs index 9e440d199fa..71f8019bff5 100644 --- a/crates/trie/common/src/key.rs +++ b/crates/trie/common/src/key.rs @@ -1,5 +1,4 @@ -use alloy_primitives::B256; -use revm_primitives::keccak256; +use alloy_primitives::{keccak256, B256}; /// Trait for hashing keys in state. pub trait KeyHasher: Default + Clone + Send + Sync + 'static { diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 67c04e0cb75..e1bd85dde3d 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -12,7 +12,7 @@ use alloy_trie::{ use itertools::Itertools; use reth_primitives_traits::Account; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; +use std::collections::{hash_map, HashMap}; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes @@ -81,6 +81,24 @@ impl MultiProof { } Ok(AccountProof { address, info, proof, storage_root, storage_proofs }) } + + /// Extends this multiproof with another one, merging both account and storage + /// proofs. + pub fn extend(&mut self, other: Self) { + self.account_subtree.extend_from(other.account_subtree); + + for (hashed_address, storage) in other.storages { + match self.storages.entry(hashed_address) { + hash_map::Entry::Occupied(mut entry) => { + debug_assert_eq!(entry.get().root, storage.root); + entry.get_mut().subtree.extend_from(storage.subtree); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(storage); + } + } + } + } } /// The merkle multiproof of storage trie. @@ -260,3 +278,61 @@ pub mod triehash { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_multiproof_extend_account_proofs() { + let mut proof1 = MultiProof::default(); + let mut proof2 = MultiProof::default(); + + let addr1 = B256::random(); + let addr2 = B256::random(); + + proof1.account_subtree.insert( + Nibbles::unpack(addr1), + alloy_rlp::encode_fixed_size(&U256::from(42)).to_vec().into(), + ); + proof2.account_subtree.insert( + Nibbles::unpack(addr2), + alloy_rlp::encode_fixed_size(&U256::from(43)).to_vec().into(), + ); + + proof1.extend(proof2); + + assert!(proof1.account_subtree.contains_key(&Nibbles::unpack(addr1))); + assert!(proof1.account_subtree.contains_key(&Nibbles::unpack(addr2))); + } + + #[test] + fn test_multiproof_extend_storage_proofs() { + let mut proof1 = MultiProof::default(); + let mut proof2 = MultiProof::default(); + + let addr = B256::random(); + let root = B256::random(); + + let mut subtree1 = ProofNodes::default(); + subtree1.insert( + Nibbles::from_nibbles(vec![0]), + alloy_rlp::encode_fixed_size(&U256::from(42)).to_vec().into(), + ); + proof1.storages.insert(addr, StorageMultiProof { root, subtree: subtree1 }); + + let mut subtree2 = ProofNodes::default(); + subtree2.insert( + Nibbles::from_nibbles(vec![1]), + alloy_rlp::encode_fixed_size(&U256::from(43)).to_vec().into(), + ); + proof2.storages.insert(addr, StorageMultiProof { root, subtree: subtree2 }); + + proof1.extend(proof2); + + let storage = proof1.storages.get(&addr).unwrap(); + assert_eq!(storage.root, root); + assert!(storage.subtree.contains_key(&Nibbles::from_nibbles(vec![0]))); + assert!(storage.subtree.contains_key(&Nibbles::from_nibbles(vec![1]))); + } +} diff --git a/crates/trie/common/src/root.rs b/crates/trie/common/src/root.rs index dbcbf4200d7..982dec98837 100644 --- a/crates/trie/common/src/root.rs +++ b/crates/trie/common/src/root.rs @@ -7,17 +7,6 @@ use alloy_trie::HashBuilder; use itertools::Itertools; use nybbles::Nibbles; -/// Adjust the index of an item for rlp encoding. -pub const fn adjust_index_for_rlp(i: usize, len: usize) -> usize { - if i > 0x7f { - i - } else if i == 0x7f || i + 1 == len { - 0 - } else { - i + 1 - } -} - /// Hashes and sorts account keys, then proceeds to calculating the root hash of the state /// represented as MPT. /// See [`state_root_unsorted`] for more info. diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index bafb9917c60..dcb1a0231dd 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -17,12 +17,12 @@ use reth_trie::{ proof::StorageProof, trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, walker::TrieWalker, - HashBuilder, MultiProof, Nibbles, TrieAccount, TrieInput, + HashBuilder, MultiProof, Nibbles, TrieAccount, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::proof::ProofRetainer; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::sync::Arc; -use tracing::debug; +use tracing::{debug, error}; #[cfg(feature = "metrics")] use crate::metrics::ParallelStateRootMetrics; @@ -126,7 +126,9 @@ where )) }) })(); - let _ = tx.send(result); + if let Err(err) = tx.send(result) { + error!(target: "trie::parallel", ?hashed_address, err_content = ?err.0, "Failed to send proof result"); + } }); storage_proofs.insert(hashed_address, rx); } @@ -153,7 +155,7 @@ where let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); let mut storages = HashMap::default(); - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::new( walker, hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index e432b91062c..7a316d8b15f 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -14,7 +14,7 @@ use reth_trie::{ trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdates, walker::TrieWalker, - HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, + HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::{collections::HashMap, sync::Arc}; @@ -149,7 +149,7 @@ where ); let mut hash_builder = HashBuilder::default().with_updates(retain_updates); - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); while let Some(node) = account_node_iter.try_next().map_err(ProviderError::Database)? { match node { TrieElement::Branch(node) => { diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 1c5bb7d8a33..3301975961e 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -27,6 +27,7 @@ smallvec = { workspace = true, features = ["const_new"] } thiserror.workspace = true [dev-dependencies] +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } @@ -35,6 +36,7 @@ assert_matches.workspace = true criterion.workspace = true itertools.workspace = true pretty_assertions = "1.4" +proptest-arbitrary-interop.workspace = true proptest.workspace = true rand.workspace = true diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index d7557a7a365..0b0db140115 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -42,6 +42,10 @@ impl SparseStateTrie { account: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { + if self.revealed.contains_key(&account) { + return Ok(()); + } + let mut proof = proof.into_iter().peekable(); let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; @@ -69,6 +73,10 @@ impl SparseStateTrie { slot: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { + if self.revealed.get(&account).is_some_and(|v| v.contains(&slot)) { + return Ok(()); + } + let mut proof = proof.into_iter().peekable(); let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 696934d3edb..dff29027175 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,17 +1,21 @@ use crate::{SparseTrieError, SparseTrieResult}; -use alloy_primitives::{hex, keccak256, map::HashMap, B256}; +use alloy_primitives::{ + hex, keccak256, + map::{HashMap, HashSet}, + B256, +}; use alloy_rlp::Decodable; use reth_tracing::tracing::debug; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut}, - RlpNode, + BranchNodeCompact, RlpNode, }; use reth_trie_common::{ BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, TrieMask, TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; -use std::fmt; +use std::{borrow::Cow, fmt}; /// Inner representation of the sparse trie. /// Sparse trie is blind by default until nodes are revealed. @@ -21,13 +25,13 @@ pub enum SparseTrie { #[default] Blind, /// The trie nodes have been revealed. - Revealed(RevealedSparseTrie), + Revealed(Box), } impl SparseTrie { /// Creates new revealed empty trie. pub fn revealed_empty() -> Self { - Self::Revealed(RevealedSparseTrie::default()) + Self::Revealed(Box::default()) } /// Returns `true` if the sparse trie has no revealed nodes. @@ -51,7 +55,7 @@ impl SparseTrie { /// Mutable reference to [`RevealedSparseTrie`]. pub fn reveal_root(&mut self, root: TrieNode) -> SparseTrieResult<&mut RevealedSparseTrie> { if self.is_blind() { - *self = Self::Revealed(RevealedSparseTrie::from_root(root)?) + *self = Self::Revealed(Box::new(RevealedSparseTrie::from_root(root)?)) } Ok(self.as_revealed_mut().unwrap()) } @@ -87,6 +91,7 @@ pub struct RevealedSparseTrie { prefix_set: PrefixSetMut, /// Reusable buffer for RLP encoding of nodes. rlp_buf: Vec, + updates: Option, } impl fmt::Debug for RevealedSparseTrie { @@ -96,6 +101,7 @@ impl fmt::Debug for RevealedSparseTrie { .field("values", &self.values) .field("prefix_set", &self.prefix_set) .field("rlp_buf", &hex::encode(&self.rlp_buf)) + .field("updates", &self.updates) .finish() } } @@ -107,6 +113,7 @@ impl Default for RevealedSparseTrie { values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), + updates: None, } } } @@ -119,11 +126,30 @@ impl RevealedSparseTrie { values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), + updates: None, }; this.reveal_node(Nibbles::default(), node)?; Ok(this) } + /// Makes the sparse trie to store updated branch nodes. + pub fn with_updates(mut self, retain_updates: bool) -> Self { + if retain_updates { + self.updates = Some(SparseTrieUpdates::default()); + } + self + } + + /// Returns a reference to the retained sparse node updates without taking them. + pub fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) + } + + /// Takes and returns the retained sparse node updates + pub fn take_updates(&mut self) -> SparseTrieUpdates { + self.updates.take().unwrap_or_default() + } + /// Reveal the trie node only if it was not known already. pub fn reveal_node(&mut self, path: Nibbles, node: TrieNode) -> SparseTrieResult<()> { // TODO: revise all inserts to not overwrite existing entries @@ -146,10 +172,7 @@ impl RevealedSparseTrie { match self.nodes.get(&path) { // Blinded and non-existent nodes can be replaced. Some(SparseNode::Hash(_)) | None => { - self.nodes.insert( - path, - SparseNode::Branch { state_mask: branch.state_mask, hash: None }, - ); + self.nodes.insert(path, SparseNode::new_branch(branch.state_mask)); } // Branch node already exists, or an extension node was placed where a // branch node was before. @@ -165,7 +188,7 @@ impl RevealedSparseTrie { let mut child_path = path.clone(); child_path.extend_from_slice_unchecked(&ext.key); self.reveal_node_or_hash(child_path, &ext.child)?; - self.nodes.insert(path, SparseNode::Extension { key: ext.key, hash: None }); + self.nodes.insert(path, SparseNode::new_ext(ext.key)); } // Extension node already exists, or an extension node was placed where a branch // node was before. @@ -390,7 +413,7 @@ impl RevealedSparseTrie { SparseNode::Branch { .. } => removed_node.node, } } - SparseNode::Branch { mut state_mask, hash: _ } => { + SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { // If the node is a branch node, we need to check the number of children left // after deleting the child at the given nibble. @@ -452,6 +475,10 @@ impl RevealedSparseTrie { self.nodes.remove(&child_path); } + if let Some(updates) = self.updates.as_mut() { + updates.removed_nodes.insert(removed_path.clone()); + } + new_node } // If more than one child is left set in the branch, we just re-insert it @@ -558,11 +585,11 @@ impl RevealedSparseTrie { pub fn root(&mut self) -> B256 { // take the current prefix set. let mut prefix_set = std::mem::take(&mut self.prefix_set).freeze(); - let root_rlp = self.rlp_node_allocate(Nibbles::default(), &mut prefix_set); - if let Some(root_hash) = root_rlp.as_hash() { + let rlp_node = self.rlp_node_allocate(Nibbles::default(), &mut prefix_set); + if let Some(root_hash) = rlp_node.as_hash() { root_hash } else { - keccak256(root_rlp) + keccak256(rlp_node) } } @@ -608,7 +635,7 @@ impl RevealedSparseTrie { paths.push((path, level + 1)); } } - SparseNode::Branch { state_mask, hash } => { + SparseNode::Branch { state_mask, hash, .. } => { if hash.is_some() && !prefix_set.contains(&path) { continue } @@ -644,48 +671,70 @@ impl RevealedSparseTrie { let mut prefix_set_contains = |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); - let rlp_node = match self.nodes.get_mut(&path).unwrap() { - SparseNode::Empty => RlpNode::word_rlp(&EMPTY_ROOT_HASH), - SparseNode::Hash(hash) => RlpNode::word_rlp(hash), + let (rlp_node, calculated, node_type) = match self.nodes.get_mut(&path).unwrap() { + SparseNode::Empty => { + (RlpNode::word_rlp(&EMPTY_ROOT_HASH), false, SparseNodeType::Empty) + } + SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), false, SparseNodeType::Hash), SparseNode::Leaf { key, hash } => { self.rlp_buf.clear(); let mut path = path.clone(); path.extend_from_slice_unchecked(key); if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - RlpNode::word_rlp(&hash) + (RlpNode::word_rlp(&hash), false, SparseNodeType::Leaf) } else { let value = self.values.get(&path).unwrap(); let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - rlp_node + (rlp_node, true, SparseNodeType::Leaf) } } SparseNode::Extension { key, hash } => { let mut child_path = path.clone(); child_path.extend_from_slice_unchecked(key); if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - RlpNode::word_rlp(&hash) + ( + RlpNode::word_rlp(&hash), + false, + SparseNodeType::Extension { store_in_db_trie: true }, + ) } else if buffers.rlp_node_stack.last().is_some_and(|e| e.0 == child_path) { - let (_, child) = buffers.rlp_node_stack.pop().unwrap(); + let (_, child, _, node_type) = buffers.rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - rlp_node + + ( + rlp_node, + true, + SparseNodeType::Extension { + // Inherit the `store_in_db_trie` flag from the child node, which is + // always the branch node + store_in_db_trie: node_type.store_in_db_trie(), + }, + ) } else { // need to get rlp node for child first buffers.path_stack.extend([(path, is_in_prefix_set), (child_path, None)]); continue } } - SparseNode::Branch { state_mask, hash } => { - if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - buffers.rlp_node_stack.push((path, RlpNode::word_rlp(&hash))); + SparseNode::Branch { state_mask, hash, store_in_db_trie } => { + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + buffers.rlp_node_stack.push(( + path, + RlpNode::word_rlp(&hash), + false, + SparseNodeType::Branch { store_in_db_trie }, + )); continue } buffers.branch_child_buf.clear(); // Walk children in a reverse order from `f` to `0`, so we pop the `0` first - // from the stack. + // from the stack and keep walking in the sorted order. for bit in CHILD_INDEX_RANGE.rev() { if state_mask.is_bit_set(bit) { let mut child = path.clone(); @@ -698,11 +747,43 @@ impl RevealedSparseTrie { .branch_value_stack_buf .resize(buffers.branch_child_buf.len(), Default::default()); let mut added_children = false; + + // TODO(alexey): set the `TrieMask` bits directly + let mut tree_mask_values = Vec::new(); + let mut hash_mask_values = Vec::new(); + let mut hashes = Vec::new(); for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { if buffers.rlp_node_stack.last().is_some_and(|e| &e.0 == child_path) { - let (_, child) = buffers.rlp_node_stack.pop().unwrap(); - // Insert children in the resulting buffer in a normal order, because - // initially we iterated in reverse. + let (_, child, calculated, node_type) = + buffers.rlp_node_stack.pop().unwrap(); + + // Update the masks only if we need to retain trie updates + if self.updates.is_some() { + // Set the trie mask + if node_type.store_in_db_trie() { + // A branch or an extension node explicitly set the + // `store_in_db_trie` flag + tree_mask_values.push(true); + } else { + // Set the flag according to whether a child node was + // pre-calculated + // (`calculated = false`), meaning that it wasn't in the + // database + tree_mask_values.push(!calculated); + } + + // Set the hash mask. If a child node has a hash value AND is a + // branch node, set the hash mask + // and save the hash. + let hash = child.as_hash().filter(|_| node_type.is_branch()); + hash_mask_values.push(hash.is_some()); + if let Some(hash) = hash { + hashes.push(hash); + } + } + + // Insert children in the resulting buffer in a normal order, + // because initially we iterated in reverse. buffers.branch_value_stack_buf [buffers.branch_child_buf.len() - i - 1] = child; added_children = true; @@ -717,21 +798,101 @@ impl RevealedSparseTrie { } self.rlp_buf.clear(); - let rlp_node = BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask) - .rlp(&mut self.rlp_buf); + let branch_node_ref = + BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask); + let rlp_node = branch_node_ref.rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - rlp_node + + let store_in_db_trie_value = if let Some(updates) = self.updates.as_mut() { + let mut tree_mask_values = tree_mask_values.into_iter().rev(); + let mut hash_mask_values = hash_mask_values.into_iter().rev(); + let mut tree_mask = TrieMask::default(); + let mut hash_mask = TrieMask::default(); + for (i, child) in branch_node_ref.children() { + if child.is_some() { + if hash_mask_values.next().unwrap() { + hash_mask.set_bit(i); + } + if tree_mask_values.next().unwrap() { + tree_mask.set_bit(i); + } + } + } + + // Store in DB trie if there are either any children that are stored in the + // DB trie, or any children represent hashed values + let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); + if store_in_db_trie { + hashes.reverse(); + let branch_node = BranchNodeCompact::new( + *state_mask, + tree_mask, + hash_mask, + hashes, + hash.filter(|_| path.len() == 0), + ); + updates.updated_nodes.insert(path.clone(), branch_node); + } + + store_in_db_trie + } else { + false + }; + *store_in_db_trie = Some(store_in_db_trie_value); + + ( + rlp_node, + true, + SparseNodeType::Branch { store_in_db_trie: store_in_db_trie_value }, + ) } }; - buffers.rlp_node_stack.push((path, rlp_node)); + buffers.rlp_node_stack.push((path, rlp_node, calculated, node_type)); } + debug_assert_eq!(buffers.rlp_node_stack.len(), 1); buffers.rlp_node_stack.pop().unwrap().1 } } +/// Enum representing sparse trie node type. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum SparseNodeType { + /// Empty trie node. + Empty, + /// The hash of the node that was not revealed. + Hash, + /// Sparse leaf node. + Leaf, + /// Sparse extension node. + Extension { + /// A flag indicating whether the extension node should be stored in the database. + store_in_db_trie: bool, + }, + /// Sparse branch node. + Branch { + /// A flag indicating whether the branch node should be stored in the database. + store_in_db_trie: bool, + }, +} + +impl SparseNodeType { + const fn is_branch(&self) -> bool { + matches!(self, Self::Branch { .. }) + } + + const fn store_in_db_trie(&self) -> bool { + match *self { + Self::Extension { store_in_db_trie } | Self::Branch { store_in_db_trie } => { + store_in_db_trie + } + _ => false, + } + } +} + /// Enum representing trie nodes in sparse trie. -#[derive(PartialEq, Eq, Clone, Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum SparseNode { /// Empty trie node. Empty, @@ -760,6 +921,9 @@ pub enum SparseNode { /// Pre-computed hash of the sparse node. /// Can be reused unless this trie path has been updated. hash: Option, + /// Pre-computed flag indicating whether the trie node should be stored in the database. + /// Can be reused unless this trie path has been updated. + store_in_db_trie: Option, }, } @@ -776,7 +940,7 @@ impl SparseNode { /// Create new [`SparseNode::Branch`] from state mask. pub const fn new_branch(state_mask: TrieMask) -> Self { - Self::Branch { state_mask, hash: None } + Self::Branch { state_mask, hash: None, store_in_db_trie: None } } /// Create new [`SparseNode::Branch`] with two bits set. @@ -785,7 +949,7 @@ impl SparseNode { // set bits for both children (1u16 << bit_a) | (1u16 << bit_b), ); - Self::Branch { state_mask, hash: None } + Self::Branch { state_mask, hash: None, store_in_db_trie: None } } /// Create new [`SparseNode::Extension`] from the key slice. @@ -812,7 +976,7 @@ struct RlpNodeBuffers { /// Stack of paths we need rlp nodes for and whether the path is in the prefix set. path_stack: Vec<(Nibbles, Option)>, /// Stack of rlp nodes - rlp_node_stack: Vec<(Nibbles, RlpNode)>, + rlp_node_stack: Vec<(Nibbles, RlpNode, bool, SparseNodeType)>, /// Reusable branch child path branch_child_buf: SmallVec<[Nibbles; 16]>, /// Reusable branch value stack @@ -831,37 +995,101 @@ impl RlpNodeBuffers { } } +/// The aggregation of sparse trie updates. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct SparseTrieUpdates { + updated_nodes: HashMap, + removed_nodes: HashSet, +} + #[cfg(test)] mod tests { use std::collections::BTreeMap; use super::*; use alloy_primitives::{map::HashSet, U256}; + use alloy_rlp::Encodable; use assert_matches::assert_matches; use itertools::Itertools; use prop::sample::SizeRange; use proptest::prelude::*; + use proptest_arbitrary_interop::arb; use rand::seq::IteratorRandom; - use reth_trie::{BranchNode, ExtensionNode, LeafNode}; + use reth_primitives_traits::Account; + use reth_trie::{ + hashed_cursor::{noop::NoopHashedAccountCursor, HashedPostStateAccountCursor}, + node_iter::{TrieElement, TrieNodeIter}, + trie_cursor::noop::NoopAccountTrieCursor, + walker::TrieWalker, + BranchNode, ExtensionNode, HashedPostState, LeafNode, TrieAccount, + }; use reth_trie_common::{ proof::{ProofNodes, ProofRetainer}, HashBuilder, }; + /// Pad nibbles to the length of a B256 hash with zeros on the left. + fn pad_nibbles_left(nibbles: Nibbles) -> Nibbles { + let mut base = + Nibbles::from_nibbles_unchecked(vec![0; B256::len_bytes() * 2 - nibbles.len()]); + base.extend_from_slice_unchecked(&nibbles); + base + } + + /// Pad nibbles to the length of a B256 hash with zeros on the right. + fn pad_nibbles_right(mut nibbles: Nibbles) -> Nibbles { + nibbles.extend_from_slice_unchecked(&vec![0; B256::len_bytes() * 2 - nibbles.len()]); + nibbles + } + /// Calculate the state root by feeding the provided state to the hash builder and retaining the /// proofs for the provided targets. /// /// Returns the state root and the retained proof nodes. - fn hash_builder_root_with_proofs>( - state: impl IntoIterator, + fn run_hash_builder( + state: impl IntoIterator + Clone, proof_targets: impl IntoIterator, - ) -> (B256, ProofNodes) { - let mut hash_builder = - HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter(proof_targets)); - for (key, value) in state { - hash_builder.add_leaf(key, value.as_ref()); + ) -> HashBuilder { + let mut account_rlp = Vec::new(); + + let mut hash_builder = HashBuilder::default() + .with_updates(true) + .with_proof_retainer(ProofRetainer::from_iter(proof_targets)); + + let mut prefix_set = PrefixSetMut::default(); + prefix_set.extend_keys(state.clone().into_iter().map(|(nibbles, _)| nibbles)); + let walker = TrieWalker::new(NoopAccountTrieCursor::default(), prefix_set.freeze()) + .with_deletions_retained(true); + let hashed_post_state = HashedPostState::default() + .with_accounts(state.into_iter().map(|(nibbles, account)| { + (nibbles.pack().into_inner().unwrap().into(), Some(account)) + })) + .into_sorted(); + let mut node_iter = TrieNodeIter::new( + walker, + HashedPostStateAccountCursor::new( + NoopHashedAccountCursor::default(), + hashed_post_state.accounts(), + ), + ); + + while let Some(node) = node_iter.try_next().unwrap() { + match node { + TrieElement::Branch(branch) => { + hash_builder.add_branch(branch.key, branch.value, branch.children_are_in_trie); + } + TrieElement::Leaf(key, account) => { + let account = TrieAccount::from((account, EMPTY_ROOT_HASH)); + account.encode(&mut account_rlp); + + hash_builder.add_leaf(Nibbles::unpack(key), &account_rlp); + account_rlp.clear(); + } + } } - (hash_builder.root(), hash_builder.take_proof_nodes()) + hash_builder.root(); + + hash_builder } /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. @@ -915,58 +1143,80 @@ mod tests { #[test] fn sparse_trie_empty_update_one() { - let path = Nibbles::unpack(B256::with_last_byte(42)); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let key = Nibbles::unpack(B256::with_last_byte(42)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = - hash_builder_root_with_proofs([(path.clone(), &value)], [path.clone()]); + let mut hash_builder = run_hash_builder([(key.clone(), value())], [key.clone()]); - let mut sparse = RevealedSparseTrie::default(); - sparse.update_leaf(path, value.to_vec()).unwrap(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); + sparse.update_leaf(key, value_encoded()).unwrap(); let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] fn sparse_trie_empty_update_multiple_lower_nibbles() { + reth_tracing::init_test_tracing(); + let paths = (0..=16).map(|b| Nibbles::unpack(B256::with_last_byte(b))).collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] fn sparse_trie_empty_update_multiple_upper_nibbles() { let paths = (239..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] @@ -980,55 +1230,79 @@ mod tests { }) }) .collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(|| value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + pretty_assertions::assert_eq!( + BTreeMap::from_iter(sparse_updates.updated_nodes), + BTreeMap::from_iter(hash_builder.updated_branch_nodes.take().unwrap()) + ); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] fn sparse_trie_empty_update_repeated() { let paths = (0..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); - let old_value = alloy_rlp::encode_fixed_size(&U256::from(1)); - let new_value = alloy_rlp::encode_fixed_size(&U256::from(2)); + let old_value = Account { nonce: 1, ..Default::default() }; + let old_value_encoded = { + let mut account_rlp = Vec::new(); + TrieAccount::from((old_value, EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; + let new_value = Account { nonce: 2, ..Default::default() }; + let new_value_encoded = { + let mut account_rlp = Vec::new(); + TrieAccount::from((new_value, EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| old_value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(|| old_value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), old_value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), old_value_encoded.clone()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.updates_ref(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| new_value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(|| new_value)), paths.clone(), ); for path in &paths { - sparse.update_leaf(path.clone(), new_value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), new_value_encoded.clone()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] @@ -1311,30 +1585,43 @@ mod tests { // to test the sparse trie updates. const KEY_NIBBLES_LEN: usize = 3; - fn test(updates: Vec<(HashMap>, HashSet)>) { + fn test(updates: Vec<(HashMap, HashSet)>) { { let mut state = BTreeMap::default(); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for (update, keys_to_delete) in updates { // Insert state updates into the sparse trie and calculate the root - for (key, value) in update.clone() { - sparse.update_leaf(key, value).unwrap(); + for (key, account) in update.clone() { + let account = TrieAccount::from((account, EMPTY_ROOT_HASH)); + let mut account_rlp = Vec::new(); + account.encode(&mut account_rlp); + sparse.update_leaf(key, account_rlp).unwrap(); } - let sparse_root = sparse.root(); + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); // Insert state updates into the hash builder and calculate the root state.extend(update); - let (hash_builder_root, hash_builder_proof_nodes) = - hash_builder_root_with_proofs( - state.clone(), - state.keys().cloned().collect::>(), - ); + let mut hash_builder = + run_hash_builder(state.clone(), state.keys().cloned().collect::>()); // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_root, hash_builder.root()); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + sparse_updates.updated_nodes, + hash_builder.updated_branch_nodes.take().unwrap() + ); // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq_sparse_trie_proof_nodes( + &updated_sparse, + hash_builder.take_proof_nodes(), + ); // Delete some keys from both the hash builder and the sparse trie and check // that the sparse trie root still matches the hash builder root @@ -1343,34 +1630,36 @@ mod tests { sparse.remove_leaf(&key).unwrap(); } - let sparse_root = sparse.root(); + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); - let (hash_builder_root, hash_builder_proof_nodes) = - hash_builder_root_with_proofs( - state.clone(), - state.keys().cloned().collect::>(), - ); + let mut hash_builder = + run_hash_builder(state.clone(), state.keys().cloned().collect::>()); // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_root, hash_builder.root()); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + sparse_updates.updated_nodes, + hash_builder.updated_branch_nodes.take().unwrap() + ); // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq_sparse_trie_proof_nodes( + &updated_sparse, + hash_builder.take_proof_nodes(), + ); } } } - /// Pad nibbles of length [`KEY_NIBBLES_LEN`] with zeros to the length of a B256 hash. - fn pad_nibbles(nibbles: Nibbles) -> Nibbles { - let mut base = - Nibbles::from_nibbles_unchecked([0; { B256::len_bytes() / 2 - KEY_NIBBLES_LEN }]); - base.extend_from_slice_unchecked(&nibbles); - base - } - fn transform_updates( - updates: Vec>>, + updates: Vec>, mut rng: impl Rng, - ) -> Vec<(HashMap>, HashSet)> { + ) -> Vec<(HashMap, HashSet)> { let mut keys = HashSet::new(); updates .into_iter() @@ -1393,8 +1682,8 @@ mod tests { proptest!(ProptestConfig::with_cases(10), |( updates in proptest::collection::vec( proptest::collection::hash_map( - any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles), - any::>(), + any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles_right), + arb::(), 1..100, ).prop_map(HashMap::from_iter), 1..100, @@ -1417,24 +1706,28 @@ mod tests { /// replacing it. #[test] fn sparse_trie_reveal_node_1() { - let key1 = || Nibbles::from_nibbles_unchecked([0x00]); - let key2 = || Nibbles::from_nibbles_unchecked([0x01]); - let key3 = || Nibbles::from_nibbles_unchecked([0x02]); - let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x02])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; // Generate the proof for the root node and initialize the sparse trie with it - let (_, proof_nodes) = hash_builder_root_with_proofs( - [(key1(), value()), (key3(), value())], - [Nibbles::default()], - ); + let proof_nodes = + run_hash_builder([(key1(), value()), (key3(), value())], [Nibbles::default()]) + .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), ) .unwrap(); // Generate the proof for the first key and reveal it in the sparse trie - let (_, proof_nodes) = - hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key1()]); + let proof_nodes = + run_hash_builder([(key1(), value()), (key3(), value())], [key1()]).take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1446,7 +1739,7 @@ mod tests { ); // Insert the leaf for the second key - sparse.update_leaf(key2(), value().to_vec()).unwrap(); + sparse.update_leaf(key2(), value_encoded()).unwrap(); // Check that the branch node was updated and another nibble was set assert_eq!( @@ -1455,8 +1748,8 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let (_, proof_nodes_3) = - hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key3()]); + let proof_nodes_3 = + run_hash_builder([(key1(), value()), (key3(), value())], [key3()]).take_proof_nodes(); for (path, node) in proof_nodes_3.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1469,10 +1762,11 @@ mod tests { // Generate the nodes for the full trie with all three key using the hash builder, and // compare them to the sparse trie - let (_, proof_nodes) = hash_builder_root_with_proofs( + let proof_nodes = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], [key1(), key2(), key3()], - ); + ) + .take_proof_nodes(); assert_eq_sparse_trie_proof_nodes(&sparse, proof_nodes); } @@ -1489,16 +1783,17 @@ mod tests { /// into an extension node, so it should ignore this node. #[test] fn sparse_trie_reveal_node_2() { - let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x00]); - let key2 = || Nibbles::from_nibbles_unchecked([0x01, 0x01]); - let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x02]); - let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x02])); + let value = || Account::default(); // Generate the proof for the root node and initialize the sparse trie with it - let (_, proof_nodes) = hash_builder_root_with_proofs( + let proof_nodes = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], [Nibbles::default()], - ); + ) + .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), ) @@ -1506,10 +1801,11 @@ mod tests { // Generate the proof for the children of the root branch node and reveal it in the sparse // trie - let (_, proof_nodes) = hash_builder_root_with_proofs( + let proof_nodes = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], [key1(), Nibbles::from_nibbles_unchecked([0x01])], - ); + ) + .take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1530,10 +1826,9 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let (_, proof_nodes) = hash_builder_root_with_proofs( - [(key1(), value()), (key2(), value()), (key3(), value())], - [key2()], - ); + let proof_nodes = + run_hash_builder([(key1(), value()), (key2(), value()), (key3(), value())], [key2()]) + .take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1555,16 +1850,20 @@ mod tests { /// overwritten with the extension node from the proof. #[test] fn sparse_trie_reveal_node_3() { - let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x01]); - let key2 = || Nibbles::from_nibbles_unchecked([0x00, 0x02]); - let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x00]); - let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x01])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x02])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x00])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; // Generate the proof for the root node and initialize the sparse trie with it - let (_, proof_nodes) = hash_builder_root_with_proofs( - [(key1(), value()), (key2(), value())], - [Nibbles::default()], - ); + let proof_nodes = + run_hash_builder([(key1(), value()), (key2(), value())], [Nibbles::default()]) + .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), ) @@ -1577,17 +1876,17 @@ mod tests { ); // Insert the leaf with a different prefix - sparse.update_leaf(key3(), value().to_vec()).unwrap(); + sparse.update_leaf(key3(), value_encoded()).unwrap(); // Check that the extension node was turned into a branch node assert_matches!( sparse.nodes.get(&Nibbles::default()), - Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) ); // Generate the proof for the first key and reveal it in the sparse trie - let (_, proof_nodes) = - hash_builder_root_with_proofs([(key1(), value()), (key2(), value())], [key1()]); + let proof_nodes = + run_hash_builder([(key1(), value()), (key2(), value())], [key1()]).take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1595,7 +1894,7 @@ mod tests { // Check that the branch node wasn't overwritten by the extension node in the proof assert_matches!( sparse.nodes.get(&Nibbles::default()), - Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) ); } @@ -1671,4 +1970,27 @@ mod tests { ] ); } + + #[test] + fn hash_builder_branch_hash_mask() { + let key1 = || pad_nibbles_left(Nibbles::from_nibbles_unchecked([0x00])); + let key2 = || pad_nibbles_left(Nibbles::from_nibbles_unchecked([0x01])); + let value = || Account { bytecode_hash: Some(B256::repeat_byte(1)), ..Default::default() }; + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; + + let mut hash_builder = + run_hash_builder([(key1(), value()), (key2(), value())], [Nibbles::default()]); + let mut sparse = RevealedSparseTrie::default(); + sparse.update_leaf(key1(), value_encoded()).unwrap(); + sparse.update_leaf(key2(), value_encoded()).unwrap(); + let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); + + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + } } diff --git a/crates/trie/trie/benches/trie_root.rs b/crates/trie/trie/benches/trie_root.rs index ad169936463..893e6e9e999 100644 --- a/crates/trie/trie/benches/trie_root.rs +++ b/crates/trie/trie/benches/trie_root.rs @@ -44,7 +44,8 @@ criterion_main!(benches); mod implementations { use super::*; use alloy_rlp::Encodable; - use reth_trie_common::{root::adjust_index_for_rlp, HashBuilder, Nibbles}; + use alloy_trie::root::adjust_index_for_rlp; + use reth_trie_common::{HashBuilder, Nibbles}; pub fn trie_hash_ordered_trie_root(receipts: &[ReceiptWithBloom]) -> B256 { triehash::ordered_trie_root::(receipts.iter().map(|receipt| { diff --git a/crates/trie/trie/src/constants.rs b/crates/trie/trie/src/constants.rs new file mode 100644 index 00000000000..7354290d959 --- /dev/null +++ b/crates/trie/trie/src/constants.rs @@ -0,0 +1,24 @@ +/// The maximum size of RLP encoded trie account in bytes. +/// 2 (header) + 4 * 1 (field lens) + 8 (nonce) + 32 * 3 (balance, storage root, code hash) +pub const TRIE_ACCOUNT_RLP_MAX_SIZE: usize = 110; + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{B256, U256}; + use alloy_rlp::Encodable; + use reth_trie_common::TrieAccount; + + #[test] + fn account_rlp_max_size() { + let account = TrieAccount { + nonce: u64::MAX, + balance: U256::MAX, + storage_root: B256::from_slice(&[u8::MAX; 32]), + code_hash: B256::from_slice(&[u8::MAX; 32]), + }; + let mut encoded = Vec::new(); + account.encode(&mut encoded); + assert_eq!(encoded.len(), TRIE_ACCOUNT_RLP_MAX_SIZE); + } +} diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index bb568ae8b8c..26bdc751124 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -13,6 +13,10 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +/// Constants related to the trie computation. +mod constants; +pub use constants::*; + /// The implementation of a container for storing intermediate changes to a trie. /// The container indicates when the trie has been modified. pub mod prefix_set; diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index e99d686aca7..895a3de153d 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -4,7 +4,7 @@ use crate::{ prefix_set::{PrefixSetMut, TriePrefixSetsMut}, trie_cursor::TrieCursorFactory, walker::TrieWalker, - HashBuilder, Nibbles, + HashBuilder, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use alloy_primitives::{ keccak256, @@ -104,7 +104,7 @@ where let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); let mut storages = HashMap::default(); - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::new(walker, hashed_account_cursor); while let Some(account_node) = account_node_iter.try_next()? { match account_node { diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 1bf8cf1ce79..74faf7bbc60 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -7,7 +7,7 @@ use crate::{ trie_cursor::TrieCursorFactory, updates::{StorageTrieUpdates, TrieUpdates}, walker::TrieWalker, - HashBuilder, Nibbles, TrieAccount, + HashBuilder, Nibbles, TrieAccount, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{keccak256, Address, B256}; @@ -178,7 +178,7 @@ where } }; - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut hashed_entries_walked = 0; let mut updated_storage_nodes = 0; while let Some(node) = account_node_iter.try_next()? { diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 8290f158062..46f85c4d82e 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -3,7 +3,7 @@ use crate::{ prefix_set::TriePrefixSetsMut, proof::{Proof, StorageProof}, trie_cursor::TrieCursorFactory, - HashedPostState, + HashedPostState, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{ @@ -97,7 +97,7 @@ where // Attempt to compute state root from proofs and gather additional // information for the witness. - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_trie_nodes = BTreeMap::default(); for (hashed_address, hashed_slots) in proof_targets { let storage_multiproof = account_multiproof @@ -118,16 +118,15 @@ where account_rlp.clone() }); let key = Nibbles::unpack(hashed_address); - account_trie_nodes.extend( - self.target_nodes( - key.clone(), - value, - account_multiproof - .account_subtree - .matching_nodes_iter(&key) - .sorted_by(|a, b| a.0.cmp(b.0)), - )?, - ); + account_trie_nodes.extend(target_nodes( + key.clone(), + value, + Some(&mut self.witness), + account_multiproof + .account_subtree + .matching_nodes_iter(&key) + .sorted_by(|a, b| a.0.cmp(b.0)), + )?); // Gather and record storage trie nodes for this account. let mut storage_trie_nodes = BTreeMap::default(); @@ -138,19 +137,18 @@ where .and_then(|s| s.storage.get(&hashed_slot)) .filter(|v| !v.is_zero()) .map(|v| alloy_rlp::encode_fixed_size(v).to_vec()); - storage_trie_nodes.extend( - self.target_nodes( - slot_nibbles.clone(), - slot_value, - storage_multiproof - .subtree - .matching_nodes_iter(&slot_nibbles) - .sorted_by(|a, b| a.0.cmp(b.0)), - )?, - ); + storage_trie_nodes.extend(target_nodes( + slot_nibbles.clone(), + slot_value, + Some(&mut self.witness), + storage_multiproof + .subtree + .matching_nodes_iter(&slot_nibbles) + .sorted_by(|a, b| a.0.cmp(b.0)), + )?); } - Self::next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { + next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { // Right pad the target with 0s. let mut padded_key = key.pack(); padded_key.resize(32, 0); @@ -177,7 +175,7 @@ where })?; } - Self::next_root_from_proofs(account_trie_nodes, |key: Nibbles| { + next_root_from_proofs(account_trie_nodes, |key: Nibbles| { // Right pad the target with 0s. let mut padded_key = key.pack(); padded_key.resize(32, 0); @@ -197,63 +195,6 @@ where Ok(self.witness) } - /// Decodes and unrolls all nodes from the proof. Returns only sibling nodes - /// in the path of the target and the final leaf node with updated value. - fn target_nodes<'b>( - &mut self, - key: Nibbles, - value: Option>, - proof: impl IntoIterator, - ) -> Result>>, TrieWitnessError> { - let mut trie_nodes = BTreeMap::default(); - let mut proof_iter = proof.into_iter().enumerate().peekable(); - while let Some((idx, (path, encoded))) = proof_iter.next() { - // Record the node in witness. - self.witness.insert(keccak256(encoded.as_ref()), encoded.clone()); - - let mut next_path = path.clone(); - match TrieNode::decode(&mut &encoded[..])? { - TrieNode::Branch(branch) => { - next_path.push(key[path.len()]); - let children = branch_node_children(path.clone(), &branch); - for (child_path, value) in children { - if !key.starts_with(&child_path) { - let value = if value.len() < B256::len_bytes() { - Either::Right(value.to_vec()) - } else { - Either::Left(B256::from_slice(&value[1..])) - }; - trie_nodes.insert(child_path, value); - } - } - } - TrieNode::Extension(extension) => { - next_path.extend_from_slice(&extension.key); - } - TrieNode::Leaf(leaf) => { - next_path.extend_from_slice(&leaf.key); - if next_path != key { - trie_nodes.insert( - next_path.clone(), - Either::Right(leaf.value.as_slice().to_vec()), - ); - } - } - TrieNode::EmptyRoot => { - if idx != 0 || proof_iter.peek().is_some() { - return Err(TrieWitnessError::UnexpectedEmptyRoot(next_path)) - } - } - }; - } - - if let Some(value) = value { - trie_nodes.insert(key, Either::Right(value)); - } - - Ok(trie_nodes) - } - /// Retrieve proof targets for incoming hashed state. /// This method will aggregate all accounts and slots present in the hash state as well as /// select all existing slots from the database for the accounts that have been destroyed. @@ -272,84 +213,140 @@ where let mut storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(*hashed_address)?; // position cursor at the start - if let Some((hashed_slot, _)) = storage_cursor.seek(B256::ZERO)? { - storage_keys.insert(hashed_slot); - } - while let Some((hashed_slot, _)) = storage_cursor.next()? { + let mut current_entry = storage_cursor.seek(B256::ZERO)?; + while let Some((hashed_slot, _)) = current_entry { storage_keys.insert(hashed_slot); + current_entry = storage_cursor.next()?; } } proof_targets.insert(*hashed_address, storage_keys); } Ok(proof_targets) } +} + +/// Decodes and unrolls all nodes from the proof. Returns only sibling nodes +/// in the path of the target and the final leaf node with updated value. +pub fn target_nodes<'b>( + key: Nibbles, + value: Option>, + mut witness: Option<&mut HashMap>, + proof: impl IntoIterator, +) -> Result>>, TrieWitnessError> { + let mut trie_nodes = BTreeMap::default(); + let mut proof_iter = proof.into_iter().enumerate().peekable(); + while let Some((idx, (path, encoded))) = proof_iter.next() { + // Record the node in witness. + if let Some(witness) = witness.as_mut() { + witness.insert(keccak256(encoded.as_ref()), encoded.clone()); + } - fn next_root_from_proofs( - trie_nodes: BTreeMap>>, - mut trie_node_provider: impl FnMut(Nibbles) -> Result, - ) -> Result { - // Ignore branch child hashes in the path of leaves or lower child hashes. - let mut keys = trie_nodes.keys().peekable(); - let mut ignored = HashSet::::default(); - while let Some(key) = keys.next() { - if keys.peek().is_some_and(|next| next.starts_with(key)) { - ignored.insert(key.clone()); + let mut next_path = path.clone(); + match TrieNode::decode(&mut &encoded[..])? { + TrieNode::Branch(branch) => { + next_path.push(key[path.len()]); + let children = branch_node_children(path.clone(), &branch); + for (child_path, value) in children { + if !key.starts_with(&child_path) { + let value = if value.len() < B256::len_bytes() { + Either::Right(value.to_vec()) + } else { + Either::Left(B256::from_slice(&value[1..])) + }; + trie_nodes.insert(child_path, value); + } + } + } + TrieNode::Extension(extension) => { + next_path.extend_from_slice(&extension.key); } + TrieNode::Leaf(leaf) => { + next_path.extend_from_slice(&leaf.key); + if next_path != key { + trie_nodes + .insert(next_path.clone(), Either::Right(leaf.value.as_slice().to_vec())); + } + } + TrieNode::EmptyRoot => { + if idx != 0 || proof_iter.peek().is_some() { + return Err(TrieWitnessError::UnexpectedEmptyRoot(next_path)) + } + } + }; + } + + if let Some(value) = value { + trie_nodes.insert(key, Either::Right(value)); + } + + Ok(trie_nodes) +} + +/// Computes the next root hash of a trie by processing a set of trie nodes and +/// their provided values. +pub fn next_root_from_proofs( + trie_nodes: BTreeMap>>, + mut trie_node_provider: impl FnMut(Nibbles) -> Result, +) -> Result { + // Ignore branch child hashes in the path of leaves or lower child hashes. + let mut keys = trie_nodes.keys().peekable(); + let mut ignored = HashSet::::default(); + while let Some(key) = keys.next() { + if keys.peek().is_some_and(|next| next.starts_with(key)) { + ignored.insert(key.clone()); } + } - let mut hash_builder = HashBuilder::default(); - let mut trie_nodes = trie_nodes.into_iter().filter(|e| !ignored.contains(&e.0)).peekable(); - while let Some((path, value)) = trie_nodes.next() { - match value { - Either::Left(branch_hash) => { - let parent_branch_path = path.slice(..path.len() - 1); - if hash_builder.key.starts_with(&parent_branch_path) || - trie_nodes - .peek() - .is_some_and(|next| next.0.starts_with(&parent_branch_path)) - { - hash_builder.add_branch(path, branch_hash, false); - } else { - // Parent is a branch node that needs to be turned into an extension node. - let mut path = path.clone(); - loop { - let node = trie_node_provider(path.clone())?; - match TrieNode::decode(&mut &node[..])? { - TrieNode::Branch(branch) => { - let children = branch_node_children(path, &branch); - for (child_path, value) in children { - if value.len() < B256::len_bytes() { - hash_builder.add_leaf(child_path, value); - } else { - let hash = B256::from_slice(&value[1..]); - hash_builder.add_branch(child_path, hash, false); - } + let mut hash_builder = HashBuilder::default(); + let mut trie_nodes = trie_nodes.into_iter().filter(|e| !ignored.contains(&e.0)).peekable(); + while let Some((path, value)) = trie_nodes.next() { + match value { + Either::Left(branch_hash) => { + let parent_branch_path = path.slice(..path.len() - 1); + if hash_builder.key.starts_with(&parent_branch_path) || + trie_nodes.peek().is_some_and(|next| next.0.starts_with(&parent_branch_path)) + { + hash_builder.add_branch(path, branch_hash, false); + } else { + // Parent is a branch node that needs to be turned into an extension node. + let mut path = path.clone(); + loop { + let node = trie_node_provider(path.clone())?; + match TrieNode::decode(&mut &node[..])? { + TrieNode::Branch(branch) => { + let children = branch_node_children(path, &branch); + for (child_path, value) in children { + if value.len() < B256::len_bytes() { + hash_builder.add_leaf(child_path, value); + } else { + let hash = B256::from_slice(&value[1..]); + hash_builder.add_branch(child_path, hash, false); } - break - } - TrieNode::Leaf(leaf) => { - let mut child_path = path; - child_path.extend_from_slice(&leaf.key); - hash_builder.add_leaf(child_path, &leaf.value); - break - } - TrieNode::Extension(ext) => { - path.extend_from_slice(&ext.key); - } - TrieNode::EmptyRoot => { - return Err(TrieWitnessError::UnexpectedEmptyRoot(path)) } + break + } + TrieNode::Leaf(leaf) => { + let mut child_path = path; + child_path.extend_from_slice(&leaf.key); + hash_builder.add_leaf(child_path, &leaf.value); + break + } + TrieNode::Extension(ext) => { + path.extend_from_slice(&ext.key); + } + TrieNode::EmptyRoot => { + return Err(TrieWitnessError::UnexpectedEmptyRoot(path)) } } } } - Either::Right(leaf_value) => { - hash_builder.add_leaf(path, &leaf_value); - } + } + Either::Right(leaf_value) => { + hash_builder.add_leaf(path, &leaf_value); } } - Ok(hash_builder.root()) } + Ok(hash_builder.root()) } /// Returned branch node children with keys in order. diff --git a/docs/crates/network.md b/docs/crates/network.md index a6ac2430565..be2c7cb3b14 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -991,9 +991,9 @@ fn import_transactions(&mut self, peer_id: PeerId, transactions: Vec { // transaction was already inserted entry.get_mut().push(peer_id); diff --git a/docs/design/metrics.md b/docs/design/metrics.md index 0ac1f71c90d..cc386a11251 100644 --- a/docs/design/metrics.md +++ b/docs/design/metrics.md @@ -42,7 +42,7 @@ There will only ever exist one description per metric `KeyName`; it is not possi The `metrics` crate provides three macros per metric variant: `register_!`, `!`, and `describe_!`. Prefer to use these where possible, since they generate the code necessary to register and update metrics under various conditions. - The `register_!` macro simply creates the metric and returns a handle to it (e.g. a `Counter`). These metric structs are thread-safe and cheap to clone. -- The `!` macro registers the metric if it does not exist, and updates it's value. +- The `!` macro registers the metric if it does not exist, and updates its value. - The `describe_!` macro adds an end-user description for the metric. How the metrics are exposed to the end-user is determined by the CLI. diff --git a/examples/beacon-api-sidecar-fetcher/Cargo.toml b/examples/beacon-api-sidecar-fetcher/Cargo.toml index 47a2a181f7e..d9590f87e07 100644 --- a/examples/beacon-api-sidecar-fetcher/Cargo.toml +++ b/examples/beacon-api-sidecar-fetcher/Cargo.toml @@ -11,6 +11,7 @@ reth-node-ethereum.workspace = true alloy-rpc-types-beacon.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true clap.workspace = true eyre.workspace = true diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 2436ee0210e..d2077edafff 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -1,4 +1,5 @@ use crate::BeaconSidecarConfig; +use alloy_consensus::Transaction as _; use alloy_primitives::B256; use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator}; use eyre::Result; diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 704ecb7e3c4..c21e893e05a 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -42,7 +42,8 @@ use reth::{ PayloadBuilderConfig, }, network::NetworkHandle, - providers::{CanonStateSubscriptions, StateProviderFactory}, + primitives::EthPrimitives, + providers::{CanonStateSubscriptions, EthStorage, StateProviderFactory}, rpc::eth::EthApi, tasks::TaskManager, transaction_pool::TransactionPool, @@ -227,9 +228,10 @@ struct MyCustomNode; /// Configure the node types impl NodeTypes for MyCustomNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; } /// Configure the node types with the custom engine types @@ -254,7 +256,14 @@ pub type MyNodeAddOns = RpcAddOns< /// This provides a preset configuration for the node impl Node for MyCustomNode where - N: FullNodeTypes>, + N: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = CustomEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, + >, { type ComponentsBuilder = ComponentsBuilder< N, diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 0f7d1a269f3..179d1216053 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -4,7 +4,7 @@ use reth_chainspec::ChainSpecBuilder; use reth_db::{open_db_read_only, DatabaseEnv}; use reth_node_ethereum::EthereumNode; use reth_node_types::NodeTypesWithDBAdapter; -use reth_primitives::SealedHeader; +use reth_primitives::{SealedHeader, TransactionSigned}; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, StateProvider, TransactionsProvider, @@ -83,7 +83,9 @@ fn header_provider_example(provider: T, number: u64) -> eyre: } /// The `TransactionsProvider` allows querying transaction-related information -fn txs_provider_example(provider: T) -> eyre::Result<()> { +fn txs_provider_example>( + provider: T, +) -> eyre::Result<()> { // Try the 5th tx let txid = 5; @@ -92,16 +94,17 @@ fn txs_provider_example(provider: T) -> eyre::Result<() // Can query the tx by hash let tx_by_hash = - provider.transaction_by_hash(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; + provider.transaction_by_hash(tx.hash())?.ok_or(eyre::eyre!("txhash not found"))?; assert_eq!(tx, tx_by_hash); // Can query the tx by hash with info about the block it was included in - let (tx, meta) = - provider.transaction_by_hash_with_meta(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; - assert_eq!(tx.hash, meta.tx_hash); + let (tx, meta) = provider + .transaction_by_hash_with_meta(tx.hash())? + .ok_or(eyre::eyre!("txhash not found"))?; + assert_eq!(tx.hash(), meta.tx_hash); // Can reverse lookup the key too - let id = provider.transaction_id(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; + let id = provider.transaction_id(tx.hash())?.ok_or(eyre::eyre!("txhash not found"))?; assert_eq!(id, txid); // Can find the block of a transaction given its key @@ -159,7 +162,9 @@ fn block_provider_example(provider: T, number: u64) -> eyre::Res } /// The `ReceiptProvider` allows querying the receipts tables. -fn receipts_provider_example( +fn receipts_provider_example< + T: ReceiptProvider + TransactionsProvider + HeaderProvider, +>( provider: T, ) -> eyre::Result<()> { let txid = 5; @@ -171,7 +176,7 @@ fn receipts_provider_example TransactionValidationOutcome { // Always return valid TransactionValidationOutcome::Valid { - balance: transaction.cost(), + balance: *transaction.cost(), state_nonce: transaction.nonce(), transaction: ValidTransaction::Valid(transaction), propagate: false, diff --git a/fork.yaml b/fork.yaml index ffc49c306e9..37935a42410 100644 --- a/fork.yaml +++ b/fork.yaml @@ -4,7 +4,7 @@ footer: | base: name: reth url: https://github.com/paradigmxyz/reth - hash: 7ae8ce1d0096a32211cda406c1f1176cfc217b43 + hash: 02824da4fcd0794d45442adbdbf6b953be04d4f3 fork: name: scroll-reth url: https://github.com/scroll-tech/reth diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index 6d07043adc3..4278da97a95 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -12,7 +12,7 @@ repository.workspace = true workspace = true [dependencies] -reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives = { workspace = true, features = ["secp256k1", "arbitrary"] } alloy-genesis.workspace = true alloy-primitives.workspace = true diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 32a12b04453..8cf67915335 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -149,7 +149,7 @@ pub fn sign_tx_with_key_pair(key_pair: Keypair, tx: Transaction) -> TransactionS let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); - TransactionSigned::from_transaction_and_signature(tx, signature) + TransactionSigned::new_unhashed(tx, signature) } /// Generates a set of [Keypair]s based on the desired count. @@ -488,7 +488,7 @@ mod tests { sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), signature_hash) .unwrap(); - let signed = TransactionSigned::from_transaction_and_signature(tx.clone(), signature); + let signed = TransactionSigned::new_unhashed(tx.clone(), signature); let recovered = signed.recover_signer().unwrap(); let expected = public_key_to_address(key_pair.public_key());