diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b7ad4160b0c..8dbb193977a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -122,7 +122,7 @@ jobs: - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@master with: - toolchain: "1.86" # MSRV + toolchain: "1.88" # MSRV - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -274,13 +274,15 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master with: - toolchain: nightly-2025-02-14 + toolchain: nightly-2025-07-30 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - run: cargo install --locked --git http://github.com/openvm-org/openvm.git --tag v1.2.0 cargo-openvm + - run: cargo install --locked --git https://github.com/openvm-org/openvm.git --tag v1.4.0-rc.2 cargo-openvm - name: verify openvm compatibility - run: cargo openvm build --manifest-dir crates/scroll/openvm-compat + env: + OPENVM_RUST_TOOLCHAIN: nightly-2025-07-30 + run: cargo openvm build --manifest-path crates/scroll/openvm-compat/Cargo.toml lint-success: name: lint success diff --git a/.gitignore b/.gitignore index 54821e04d07..894ca5f346b 100644 --- a/.gitignore +++ b/.gitignore @@ -69,3 +69,7 @@ links-report.json __pycache__/ *.py[cod] *$py.class + +# direnv +.envrc +.direnv/ diff --git a/Cargo.lock b/Cargo.lock index 97d81f9d9a6..ec3ce362808 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6093bc69509849435a2d68237a2e9fea79d27390c8e62f1e4012c460aabad8" +checksum = "eda689f7287f15bd3582daba6be8d1545bad3740fd1fb778f629a1fe866bb43b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -138,9 +138,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d1cfed4fefd13b5620cb81cdb6ba397866ff0de514c1b24806e6e79cdff5570" +checksum = "2b5659581e41e8fe350ecc3593cb5c9dcffddfd550896390f2b78a07af67b0fa" dependencies = [ "alloy-consensus", "alloy-eips", @@ -153,9 +153,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28074a21cd4f7c3a7ab218c4f38fae6be73944e1feae3b670c68b60bf85ca40" +checksum = "944085cf3ac8f32d96299aa26c03db7c8ca6cdaafdbc467910b889f0328e6b70" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5937e2d544e9b71000942d875cbc57965b32859a666ea543cc57aae5a06d602d" +checksum = "6f35887da30b5fc50267109a3c61cd63e6ca1f45967983641053a40ee83468c1" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -259,9 +259,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.16.2" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4042e855163839443cba91147fb7737c4aba02df4767cb322b0e8cea5a77642c" +checksum = "2211ccd0f05e2fea4f767242957f5e8cfb08b127ea2e6a3c0d9e5b10e6bf67d9" dependencies = [ "alloy-consensus", "alloy-eips", @@ -279,9 +279,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51b4c13e02a8104170a4de02ccf006d7c233e6c10ab290ee16e7041e6ac221d" +checksum = "11d4009efea6f403b3a80531f9c6f70fc242399498ff71196a1688cc1c901f44" dependencies = [ "alloy-eips", "alloy-primitives", @@ -319,9 +319,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b590caa6b6d8bc10e6e7a7696c59b1e550e89f27f50d1ee13071150d3a3e3f66" +checksum = "883dee3b4020fcb5667ee627b4f401e899dad82bf37b246620339dd980720ed9" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -334,9 +334,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36fe5af1fca03277daa56ad4ce5f6d623d3f4c2273ea30b9ee8674d18cefc1fa" +checksum = "cd6e5b8ac1654a05c224390008e43634a2bdc74e181e02cf8ed591d8b3d4ad08" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -360,9 +360,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793df1e3457573877fbde8872e4906638fde565ee2d3bd16d04aad17d43dbf0e" +checksum = "80d7980333dd9391719756ac28bc2afa9baa705fc70ffd11dc86ab078dd64477" dependencies = [ "alloy-consensus", "alloy-eips", @@ -373,9 +373,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.16.2" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c0bc6a883d3198c43c4018aa952448a303dec265439fa1c2e7c4397beeb289" +checksum = "8582f8583eabdb6198cd392ff34fbf98d4aa64f9ef9b7b7838139669bc70a932" dependencies = [ "alloy-consensus", "alloy-eips", @@ -415,7 +415,7 @@ dependencies = [ "derive_more", "foldhash", "getrandom 0.3.3", - "hashbrown 0.15.4", + "hashbrown 0.15.5", "indexmap 2.10.0", "itoa", "k256", @@ -433,9 +433,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59879a772ebdcde9dc4eb38b2535d32e8503d3175687cc09e763a625c5fcf32" +checksum = "478a42fe167057b7b919cd8b0c2844f0247f667473340dad100eaf969de5754e" dependencies = [ "alloy-chains", "alloy-consensus", @@ -463,7 +463,6 @@ dependencies = [ "either", "futures", "futures-utils-wasm", - "http", "lru 0.13.0", "parking_lot", "pin-project", @@ -479,13 +478,14 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbdfb2899b54b7cb0063fa8e61938320f9be6b81b681be69c203abf130a87baa" +checksum = "b0a99b17987f40a066b29b6b56d75e84cd193b866cac27cae17b59f40338de95" dependencies = [ "alloy-json-rpc", "alloy-primitives", "alloy-transport", + "auto_impl", "bimap", "futures", "parking_lot", @@ -522,9 +522,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f060e3bb9f319eb01867a2d6d1ff9e0114e8877f5ca8f5db447724136106cae" +checksum = "8a0c6d723fbdf4a87454e2e3a275e161be27edcfbf46e2e3255dd66c138634b6" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -548,9 +548,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d47b637369245d2dafef84b223b1ff5ea59e6cd3a98d2d3516e32788a0b216df" +checksum = "c41492dac39365b86a954de86c47ec23dcc7452cdb2fde591caadc194b3e34c6" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -561,9 +561,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db29bf8f7c961533b017f383122cab6517c8da95712cf832e23c60415d520a58" +checksum = "9c0f415ad97cc68d2f49eb08214f45c6827a6932a69773594f4ce178f8a41dc0" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -573,9 +573,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0b1f499acb3fc729615147bc113b8b798b17379f19d43058a687edc5792c102" +checksum = "10493fa300a2757d8134f584800fef545c15905c95122bed1f6dde0b0d9dae27" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -585,9 +585,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e26b4dd90b33bd158975307fb9cf5fafa737a0e33cbb772a8648bf8be13c104" +checksum = "8f7eb22670a972ad6c222a6c6dac3eef905579acffe9d63ab42be24c7d158535" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -596,9 +596,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9196cbbf4b82a3cc0c471a8e68ccb30102170d930948ac940d2bceadc1b1346b" +checksum = "53381ffba0110a8aed4c9f108ef34a382ed21aeefb5f50f91c73451ae68b89aa" dependencies = [ "alloy-eips", "alloy-primitives", @@ -614,19 +614,20 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71841e6fc8e221892035a74f7d5b279c0a2bf27a7e1c93e7476c64ce9056624e" +checksum = "a9b6f0482c82310366ec3dcf4e5212242f256a69fcf1a26e5017e6704091ee95" dependencies = [ "alloy-primitives", + "derive_more", "serde", ] [[package]] name = "alloy-rpc-types-engine" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f9cbf5f781b9ee39cfdddea078fdef6015424f4c8282ef0e5416d15ca352c4" +checksum = "e24c171377c0684e3860385f6d93fbfcc8ecc74f6cce8304c822bf1a50bacce0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -645,9 +646,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46586ec3c278639fc0e129f0eb73dbfa3d57f683c44b2ff5e066fab7ba63fa1f" +checksum = "b777b98526bbe5b7892ca22a7fd5f18ed624ff664a79f40d0f9f2bf94ba79a84" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -667,9 +668,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b6e80b501842c3f5803dd5752ae41b61f43bf6d2e1b8d29999d3312d67a8a5" +checksum = "c15e8ccb6c16e196fcc968e16a71cd8ce4160f3ec5871d2ea196b75bf569ac02" dependencies = [ "alloy-consensus", "alloy-eips", @@ -682,9 +683,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc9a2184493c374ca1dbba9569d37215c23e489970f8c3994f731cb3ed6b0b7d" +checksum = "d6a854af3fe8fce1cfe319fcf84ee8ba8cda352b14d3dd4221405b5fc6cce9e1" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -696,9 +697,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3aaf142f4f6c0bdd06839c422179bae135024407d731e6f365380f88cd4730e" +checksum = "3cc803e9b8d16154c856a738c376e002abe4b388e5fef91c8aebc8373e99fd45" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -708,9 +709,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e1722bc30feef87cc0fa824e43c9013f9639cc6c037be7be28a31361c788be2" +checksum = "ee8d2c52adebf3e6494976c8542fbdf12f10123b26e11ad56f77274c16a2a039" dependencies = [ "alloy-primitives", "arbitrary", @@ -720,9 +721,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3674beb29e68fbbc7be302b611cf35fe07b736e308012a280861df5a2361395" +checksum = "7c0494d1e0f802716480aabbe25549c7f6bc2a25ff33b08fd332bbb4b7d06894" dependencies = [ "alloy-primitives", "async-trait", @@ -735,9 +736,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad7094c39cd41b03ed642145b0bd37251e31a9cf2ed19e1ce761f089867356a6" +checksum = "59c2435eb8979a020763ced3fb478932071c56e5f75ea86db41f320915d325ba" dependencies = [ "alloy-consensus", "alloy-network", @@ -823,12 +824,13 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f89bec2f59a41c0e259b6fe92f78dfc49862c17d10f938db9c33150d5a7f42b6" +checksum = "3c0107675e10c7f248bf7273c1e7fdb02409a717269cc744012e6f3c39959bfb" dependencies = [ "alloy-json-rpc", "alloy-primitives", + "auto_impl", "base64 0.22.1", "derive_more", "futures", @@ -846,9 +848,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d3615ec64d775fec840f4e9d5c8e1f739eb1854d8d28db093fb3d4805e0cb53" +checksum = "78e3736701b5433afd06eecff08f0688a71a10e0e1352e0bbf0bed72f0dd4e35" dependencies = [ "alloy-json-rpc", "alloy-rpc-types-engine", @@ -867,9 +869,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374db72669d8ee09063b9aa1a316e812d5cdfce7fc9a99a3eceaa0e5512300d2" +checksum = "c79064b5a08259581cb5614580010007c2df6deab1e8f3e8c7af8d7e9227008f" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -887,9 +889,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5dbaa6851875d59c8803088f4b6ec72eaeddf7667547ae8995c1a19fbca6303" +checksum = "77fd607158cb9bc54cbcfcaab4c5f36c5b26994c7dc58b6f095ce27a54f270f3" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -925,9 +927,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f916ff6d52f219c44a9684aea764ce2c7e1d53bd4a724c9b127863aeacc30bb" +checksum = "6acb36318dfa50817154064fea7932adf2eec3f51c86680e2b37d7e8906c66bb" dependencies = [ "alloy-primitives", "darling 0.20.11", @@ -1073,7 +1075,7 @@ dependencies = [ "ark-std 0.5.0", "educe", "fnv", - "hashbrown 0.15.4", + "hashbrown 0.15.5", "itertools 0.13.0", "num-bigint", "num-integer", @@ -1219,7 +1221,7 @@ dependencies = [ "ark-std 0.5.0", "educe", "fnv", - "hashbrown 0.15.4", + "hashbrown 0.15.5", ] [[package]] @@ -1743,7 +1745,7 @@ dependencies = [ "cfg-if", "dashmap 6.1.0", "fast-float2", - "hashbrown 0.15.4", + "hashbrown 0.15.5", "icu_normalizer 1.5.0", "indexmap 2.10.0", "intrusive-collections", @@ -1778,7 +1780,7 @@ dependencies = [ "boa_macros", "boa_profiler", "boa_string", - "hashbrown 0.15.4", + "hashbrown 0.15.5", "thin-vec", ] @@ -1790,7 +1792,7 @@ checksum = "42407a3b724cfaecde8f7d4af566df4b56af32a2f11f0956f5570bb974e7f749" dependencies = [ "boa_gc", "boa_macros", - "hashbrown 0.15.4", + "hashbrown 0.15.5", "indexmap 2.10.0", "once_cell", "phf", @@ -1919,18 +1921,18 @@ checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" [[package]] name = "bytemuck" -version = "1.23.1" +version = "1.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4" +checksum = "4f154e572231cb6ba2bd1176980827e3d5dc04cc183a75dea38109fbdd672d29" dependencies = [ "proc-macro2", "quote", @@ -1970,9 +1972,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.10" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +checksum = "5d07aa9a93b00c76f71bc35d598bed923f6d4f3a9ca5c24b7737ae1a292841c0" dependencies = [ "serde", ] @@ -2137,9 +2139,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.42" +version = "4.5.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed87a9d530bb41a67537289bafcac159cb3ee28460e0a4571123d2a778a6a882" +checksum = "50fd97c9dc2399518aa331917ac6f274280ec5eb34e555dd291899745c48ec6f" dependencies = [ "clap_builder", "clap_derive", @@ -2147,9 +2149,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.42" +version = "4.5.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64f4f3f3c77c94aff3c7e9aac9a2ca1974a5adf392a8bb751e827d6d127ab966" +checksum = "c35b5830294e1fa0462034af85cc95225a4cb07092c088c55bda3147cfcd8f65" dependencies = [ "anstream", "anstyle", @@ -2827,9 +2829,9 @@ dependencies = [ [[package]] name = "derive-where" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" +checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", @@ -3580,6 +3582,14 @@ dependencies = [ "tokio", ] +[[package]] +name = "example-full-contract-state" +version = "1.6.0" +dependencies = [ + "eyre", + "reth-ethereum", +] + [[package]] name = "example-manual-p2p" version = "0.0.0" @@ -3643,6 +3653,14 @@ dependencies = [ "reth-ethereum", ] +[[package]] +name = "example-op-db-access" +version = "0.0.0" +dependencies = [ + "eyre", + "reth-op", +] + [[package]] name = "example-polygon-p2p" version = "0.0.0" @@ -4164,9 +4182,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ "atomic-waker", "bytes", @@ -4220,9 +4238,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.4" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", @@ -4827,9 +4845,9 @@ dependencies = [ [[package]] name = "indenter" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" +checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" [[package]] name = "indexmap" @@ -4850,7 +4868,7 @@ checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.4", + "hashbrown 0.15.5", "serde", ] @@ -5529,7 +5547,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.4", + "hashbrown 0.15.5", ] [[package]] @@ -5538,7 +5556,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" dependencies = [ - "hashbrown 0.15.4", + "hashbrown 0.15.5", ] [[package]] @@ -5685,7 +5703,7 @@ checksum = "b8496cc523d1f94c1385dd8f0f0c2c480b2b8aeccb5b7e4485ad6365523ae376" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.15.4", + "hashbrown 0.15.5", "indexmap 2.10.0", "metrics", "ordered-float", @@ -7248,7 +7266,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "145bb27393fe455dd64d6cbc8d059adfa392590a45eadf079c01b11857e7b010" dependencies = [ - "hashbrown 0.15.4", + "hashbrown 0.15.5", "memchr", ] @@ -7770,6 +7788,7 @@ dependencies = [ "reth-db", "reth-db-api", "reth-etl", + "reth-execution-errors", "reth-fs-util", "reth-node-types", "reth-primitives-traits", @@ -7950,6 +7969,7 @@ dependencies = [ "reth-ethereum-primitives", "reth-evm", "reth-network-api", + "reth-network-p2p", "reth-network-peers", "reth-node-api", "reth-node-builder", @@ -8048,11 +8068,11 @@ dependencies = [ "reth-chain-state", "reth-errors", "reth-ethereum-primitives", + "reth-evm", "reth-execution-types", "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives-traits", - "reth-trie", "reth-trie-common", "serde", "thiserror 2.0.12", @@ -8105,7 +8125,6 @@ dependencies = [ "derive_more", "eyre", "futures", - "itertools 0.14.0", "metrics", "mini-moka", "parking_lot", @@ -8171,6 +8190,7 @@ dependencies = [ "pin-project", "reth-chainspec", "reth-engine-primitives", + "reth-engine-tree", "reth-errors", "reth-evm", "reth-fs-util", @@ -8370,7 +8390,6 @@ dependencies = [ name = "reth-ethereum-cli" version = "1.6.0" dependencies = [ - "alloy-consensus", "clap", "eyre", "reth-chainspec", @@ -8530,6 +8549,7 @@ dependencies = [ "alloy-evm", "alloy-genesis", "alloy-primitives", + "alloy-rpc-types-engine", "derive_more", "parking_lot", "reth-chainspec", @@ -8538,6 +8558,7 @@ dependencies = [ "reth-evm", "reth-execution-types", "reth-primitives-traits", + "reth-storage-errors", "reth-testing-utils", "revm", "secp256k1 0.30.0", @@ -8992,6 +9013,7 @@ dependencies = [ "reth-db-common", "reth-downloaders", "reth-engine-local", + "reth-engine-primitives", "reth-engine-service", "reth-engine-tree", "reth-engine-util", @@ -9011,6 +9033,7 @@ dependencies = [ "reth-node-events", "reth-node-metrics", "reth-payload-builder", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-rpc", @@ -9378,6 +9401,7 @@ dependencies = [ "alloy-op-evm", "alloy-primitives", "op-alloy-consensus", + "op-alloy-rpc-types-engine", "op-revm", "reth-chainspec", "reth-evm", @@ -9390,6 +9414,7 @@ dependencies = [ "reth-primitives-traits", "reth-revm", "reth-rpc-eth-api", + "reth-storage-errors", "revm", "thiserror 2.0.12", ] @@ -9409,7 +9434,6 @@ name = "reth-optimism-node" version = "1.6.0" dependencies = [ "alloy-consensus", - "alloy-eips", "alloy-genesis", "alloy-network", "alloy-primitives", @@ -9419,6 +9443,7 @@ dependencies = [ "eyre", "futures", "op-alloy-consensus", + "op-alloy-network", "op-alloy-rpc-types-engine", "op-revm", "reth-chainspec", @@ -9426,9 +9451,9 @@ dependencies = [ "reth-db", "reth-e2e-test-utils", "reth-engine-local", - "reth-engine-primitives", "reth-evm", "reth-network", + "reth-network-api", "reth-node-api", "reth-node-builder", "reth-node-core", @@ -9448,8 +9473,10 @@ dependencies = [ "reth-primitives-traits", "reth-provider", "reth-revm", + "reth-rpc", "reth-rpc-api", "reth-rpc-engine-api", + "reth-rpc-eth-types", "reth-rpc-server-types", "reth-tasks", "reth-tracing", @@ -9459,6 +9486,7 @@ dependencies = [ "revm", "serde", "serde_json", + "tempfile", "tokio", ] @@ -10425,6 +10453,7 @@ dependencies = [ "alloy-eips", "alloy-evm", "alloy-primitives", + "alloy-rpc-types-engine", "derive_more", "eyre", "reth-chainspec", @@ -11113,6 +11142,7 @@ dependencies = [ "proptest-arbitrary-interop", "rand 0.8.5", "rand 0.9.2", + "rayon", "reth-execution-errors", "reth-metrics", "reth-primitives-traits", @@ -11137,6 +11167,7 @@ dependencies = [ "arbitrary", "assert_matches", "itertools 0.14.0", + "metrics", "pretty_assertions", "proptest", "proptest-arbitrary-interop", @@ -11144,6 +11175,7 @@ dependencies = [ "rand 0.9.2", "rayon", "reth-execution-errors", + "reth-metrics", "reth-primitives-traits", "reth-provider", "reth-trie", @@ -11649,7 +11681,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.2.0", + "security-framework 3.3.0", ] [[package]] @@ -11677,7 +11709,7 @@ dependencies = [ "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework 3.2.0", + "security-framework 3.3.0", "security-framework-sys", "webpki-root-certs 0.26.11", "windows-sys 0.59.0", @@ -11702,9 +11734,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "rusty-fork" @@ -12015,9 +12047,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +checksum = "80fb1d92c5028aa318b4b8bd7302a5bfcf48be96a37fc6fc790f806b0004ee0c" dependencies = [ "bitflags 2.9.1", "core-foundation 0.10.1", @@ -12372,9 +12404,9 @@ checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a" [[package]] name = "slab" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" @@ -12643,13 +12675,12 @@ dependencies = [ [[package]] name = "tar-no-std" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15574aa79d3c04a12f3cb53ff976d5571e53b9d8e0bdbe4021df0a06473dd1c9" +checksum = "ac9ee8b664c9f1740cd813fea422116f8ba29997bb7c878d1940424889802897" dependencies = [ "bitflags 2.9.1", "log", - "memchr", "num-traits", ] diff --git a/Cargo.toml b/Cargo.toml index ce64a8304ae..550eea1dda9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [workspace.package] version = "1.6.0" edition = "2021" -rust-version = "1.86" +rust-version = "1.88" license = "MIT OR Apache-2.0" homepage = "https://paradigmxyz.github.io/reth" repository = "https://github.com/paradigmxyz/reth" @@ -174,12 +174,14 @@ members = [ "examples/exex-hello-world", "examples/exex-subscription", "examples/exex-test", + "examples/full-contract-state", "examples/manual-p2p/", "examples/network-txpool/", "examples/network/", "examples/network-proxy/", "examples/node-custom-rpc/", "examples/node-event-hooks/", + "examples/op-db-access/", "examples/polygon-p2p/", "examples/rpc-db/", "examples/precompile-cache/", @@ -491,7 +493,7 @@ revm-inspectors = "0.27.1" alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.3.0" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.16", default-features = false } +alloy-evm = { version = "0.17", default-features = false } alloy-primitives = { version = "1.3.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.3.0" @@ -550,7 +552,7 @@ reth-scroll-trie = { path = "crates/scroll/trie" } reth-scroll-txpool = { path = "crates/scroll/txpool" } # op -alloy-op-evm = { version = "0.16", default-features = false } +alloy-op-evm = { version = "0.17", default-features = false } alloy-op-hardforks = "0.2.2" op-alloy-rpc-types = { version = "0.18.12", default-features = false } op-alloy-rpc-types-engine = { version = "0.18.12", default-features = false } diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible index 4f408ee0605..a0d4a17b5bb 100644 --- a/Dockerfile.reproducible +++ b/Dockerfile.reproducible @@ -1,5 +1,5 @@ -# Use the Rust 1.86 image based on Debian Bookworm -FROM rust:1.86-bookworm AS builder +# Use the Rust 1.88 image based on Debian Bookworm +FROM rust:1.88-bookworm AS builder # Install specific version of libclang-dev RUN apt-get update && apt-get install -y libclang-dev=1:14.0-55.7~deb12u1 diff --git a/Makefile b/Makefile index 010c1897cfd..9d54f4f905c 100644 --- a/Makefile +++ b/Makefile @@ -42,14 +42,14 @@ help: ## Display this help. ##@ Build .PHONY: install -install: ## Build and install the reth binary under `~/.cargo/bin`. +install: ## Build and install the reth binary under `$(CARGO_HOME)/bin`. cargo install --path bin/reth --bin reth --force --locked \ --features "$(FEATURES)" \ --profile "$(PROFILE)" \ $(CARGO_INSTALL_EXTRA_FLAGS) .PHONY: install-op -install-op: ## Build and install the op-reth binary under `~/.cargo/bin`. +install-op: ## Build and install the op-reth binary under `$(CARGO_HOME)/bin`. cargo install --path crates/optimism/bin --bin op-reth --force --locked \ --features "$(FEATURES)" \ --profile "$(PROFILE)" \ @@ -219,7 +219,7 @@ reth-bench: ## Build the reth-bench binary into the `target` directory. cargo build --manifest-path bin/reth-bench/Cargo.toml --features "$(FEATURES)" --profile "$(PROFILE)" .PHONY: install-reth-bech -install-reth-bench: ## Build and install the reth binary under `~/.cargo/bin`. +install-reth-bench: ## Build and install the reth binary under `$(CARGO_HOME)/bin`. cargo install --path bin/reth-bench --bin reth-bench --force --locked \ --features "$(FEATURES)" \ --profile "$(PROFILE)" diff --git a/README.md b/README.md index 390868e3976..f106f1cecb8 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,7 @@ When updating this, also update: - .github/workflows/lint.yml --> -The Minimum Supported Rust Version (MSRV) of this project is [1.86.0](https://blog.rust-lang.org/2025/04/03/Rust-1.86.0/). +The Minimum Supported Rust Version (MSRV) of this project is [1.88.0](https://blog.rust-lang.org/2025/06/26/Rust-1.88.0/). See the docs for detailed instructions on how to [build from source](https://paradigmxyz.github.io/reth/installation/source). diff --git a/clippy.toml b/clippy.toml index bdc50bb3fda..1e75cb34f32 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,4 +1,4 @@ -msrv = "1.86" +msrv = "1.88" too-large-for-stack = 128 doc-valid-idents = [ "P2P", diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 22fae8951d3..75238c5f71b 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -5,7 +5,7 @@ use crate::{ ChainInfoTracker, MemoryOverlayStateProvider, }; use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; -use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber, BlockNumHash}; +use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; @@ -13,7 +13,8 @@ use reth_ethereum_primitives::EthPrimitives; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives_traits::{ - BlockBody as _, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, SignedTransaction, + BlockBody as _, IndexedTx, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, + SignedTransaction, }; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdates, HashedPostState}; @@ -553,24 +554,8 @@ impl CanonicalInMemoryState { tx_hash: TxHash, ) -> Option<(N::SignedTx, TransactionMeta)> { for block_state in self.canonical_chain() { - if let Some((index, tx)) = block_state - .block_ref() - .recovered_block() - .body() - .transactions_iter() - .enumerate() - .find(|(_, tx)| tx.trie_hash() == tx_hash) - { - let meta = TransactionMeta { - tx_hash, - index: index as u64, - block_hash: block_state.hash(), - block_number: block_state.block_ref().recovered_block().number(), - base_fee: block_state.block_ref().recovered_block().base_fee_per_gas(), - timestamp: block_state.block_ref().recovered_block().timestamp(), - excess_blob_gas: block_state.block_ref().recovered_block().excess_blob_gas(), - }; - return Some((tx.clone(), meta)) + if let Some(indexed) = block_state.find_indexed(tx_hash) { + return Some((indexed.tx().clone(), indexed.meta())); } } None @@ -725,30 +710,14 @@ impl BlockState { tx_hash: TxHash, ) -> Option<(N::SignedTx, TransactionMeta)> { self.chain().find_map(|block_state| { - block_state - .block_ref() - .recovered_block() - .body() - .transactions_iter() - .enumerate() - .find(|(_, tx)| tx.trie_hash() == tx_hash) - .map(|(index, tx)| { - let meta = TransactionMeta { - tx_hash, - index: index as u64, - block_hash: block_state.hash(), - block_number: block_state.block_ref().recovered_block().number(), - base_fee: block_state.block_ref().recovered_block().base_fee_per_gas(), - timestamp: block_state.block_ref().recovered_block().timestamp(), - excess_blob_gas: block_state - .block_ref() - .recovered_block() - .excess_blob_gas(), - }; - (tx.clone(), meta) - }) + block_state.find_indexed(tx_hash).map(|indexed| (indexed.tx().clone(), indexed.meta())) }) } + + /// Finds a transaction by hash and returns it with its index and block context. + pub fn find_indexed(&self, tx_hash: TxHash) -> Option> { + self.block_ref().recovered_block().find_indexed(tx_hash) + } } /// Represents an executed block stored in-memory. @@ -925,14 +894,14 @@ pub enum NewCanonicalChain { impl> NewCanonicalChain { /// Returns the length of the new chain. - pub fn new_block_count(&self) -> usize { + pub const fn new_block_count(&self) -> usize { match self { Self::Commit { new } | Self::Reorg { new, .. } => new.len(), } } /// Returns the length of the reorged chain. - pub fn reorged_block_count(&self) -> usize { + pub const fn reorged_block_count(&self) -> usize { match self { Self::Commit { .. } => 0, Self::Reorg { old, .. } => old.len(), diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 3249fc98113..bc5de96ff5f 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -216,6 +216,17 @@ type FullTypesAdapter = FullNodeTypesAdapter< BlockchainProvider>>, >; +/// Trait for block headers that can be modified through CLI operations. +pub trait CliHeader { + fn set_number(&mut self, number: u64); +} + +impl CliHeader for alloy_consensus::Header { + fn set_number(&mut self, number: u64) { + self.number = number; + } +} + /// Helper trait with a common set of requirements for the /// [`NodeTypes`] in CLI. pub trait CliNodeTypes: NodeTypesForProvider { diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index 7a80997b976..fcf8adf11e2 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -1,7 +1,7 @@ //! Command that initializes the node from a genesis file. -use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use alloy_consensus::{BlockHeader as AlloyBlockHeader, Header}; +use crate::common::{AccessRights, CliHeader, CliNodeTypes, Environment, EnvironmentArgs}; +use alloy_consensus::BlockHeader as AlloyBlockHeader; use alloy_primitives::{B256, U256}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -72,7 +72,7 @@ impl> InitStateC where N: CliNodeTypes< ChainSpec = C::ChainSpec, - Primitives: NodePrimitives>, + Primitives: NodePrimitives, >, { info!(target: "reth::cli", "Reth init-state starting"); @@ -106,8 +106,10 @@ impl> InitStateC SealedHeader::new(header, header_hash), total_difficulty, |number| { - let header = Header { number, ..Default::default() }; - <::BlockHeader>::from(header) + let mut header = + <::BlockHeader>::default(); + header.set_number(number); + header }, )?; diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index ca10c80e578..c29c94dd6a9 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -15,6 +15,7 @@ reth-chainspec.workspace = true reth-tracing.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-network-api.workspace = true +reth-network-p2p.workspace = true reth-rpc-layer.workspace = true reth-rpc-server-types.workspace = true reth-rpc-builder.workspace = true diff --git a/crates/e2e-test-utils/src/testsuite/mod.rs b/crates/e2e-test-utils/src/testsuite/mod.rs index 580dc220665..84c9d126bd5 100644 --- a/crates/e2e-test-utils/src/testsuite/mod.rs +++ b/crates/e2e-test-utils/src/testsuite/mod.rs @@ -196,7 +196,7 @@ where I: EngineTypes, { /// Get the number of nodes in the environment - pub fn node_count(&self) -> usize { + pub const fn node_count(&self) -> usize { self.node_clients.len() } diff --git a/crates/e2e-test-utils/src/testsuite/setup.rs b/crates/e2e-test-utils/src/testsuite/setup.rs index c91a50d3436..18f270aca1c 100644 --- a/crates/e2e-test-utils/src/testsuite/setup.rs +++ b/crates/e2e-test-utils/src/testsuite/setup.rs @@ -11,6 +11,7 @@ use eyre::{eyre, Result}; use reth_chainspec::ChainSpec; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_ethereum_primitives::Block; +use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState}; use reth_node_api::{EngineTypes, NodeTypes, PayloadTypes, TreeConfig}; use reth_node_core::primitives::RecoveredBlock; use reth_payload_builder::EthPayloadBuilderAttributes; @@ -357,6 +358,15 @@ where self.network.node_count, initial_block_info.number, initial_block_info.hash ); + // In test environments, explicitly set sync state to Idle after initialization + // This ensures that eth_syncing returns false as expected by tests + if let Some(import_result) = &self.import_result_holder { + for (idx, node_ctx) in import_result.nodes.iter().enumerate() { + debug!("Setting sync state to Idle for node {}", idx); + node_ctx.inner.network.update_sync_state(SyncState::Idle); + } + } + Ok(()) } diff --git a/crates/e2e-test-utils/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs index 099918ad637..92af590e705 100644 --- a/crates/e2e-test-utils/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -43,7 +43,7 @@ impl Wallet { let builder = builder.clone().derivation_path(format!("{derivation_path}{idx}")).unwrap(); let wallet = builder.build().unwrap().with_chain_id(Some(self.chain_id)); - wallets.push(wallet) + wallets.push(wallet); } wallets } diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index b78cf462f52..7f37fd9c0f9 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -2,7 +2,6 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{keccak256, Address, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use pretty_assertions::Comparison; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_engine_primitives::InvalidBlockHook; use reth_evm::{execute::Executor, ConfigureEvm}; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader}; @@ -66,17 +65,15 @@ impl BundleStateSorted { .clone() .into_iter() .map(|(address, account)| { - { - ( - address, - BundleAccountSorted { - info: account.info, - original_info: account.original_info, - status: account.status, - storage: BTreeMap::from_iter(account.storage), - }, - ) - } + ( + address, + BundleAccountSorted { + info: account.info, + original_info: account.original_info, + status: account.status, + storage: BTreeMap::from_iter(account.storage), + }, + ) }) .collect(); @@ -138,11 +135,7 @@ impl InvalidBlockWitnessHook { impl InvalidBlockWitnessHook where - P: StateProviderFactory - + ChainSpecProvider - + Send - + Sync - + 'static, + P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static, E: ConfigureEvm + 'static, N: NodePrimitives, { @@ -366,11 +359,7 @@ where impl InvalidBlockHook for InvalidBlockWitnessHook where - P: StateProviderFactory - + ChainSpecProvider - + Send - + Sync - + 'static, + P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static, E: ConfigureEvm + 'static, { fn on_invalid_block( diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 290790d61f5..71523a460bc 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -27,16 +27,26 @@ use tracing::error; pub enum MiningMode { /// In this mode a block is built as soon as /// a valid transaction reaches the pool. - Instant(Fuse>), + /// If `max_transactions` is set, a block is built when that many transactions have + /// accumulated. + Instant { + /// Stream of transaction notifications. + rx: Fuse>, + /// Maximum number of transactions to accumulate before mining a block. + /// If None, mine immediately when any transaction arrives. + max_transactions: Option, + /// Counter for accumulated transactions (only used when `max_transactions` is set). + accumulated: usize, + }, /// In this mode a block is built at a fixed interval. Interval(Interval), } impl MiningMode { /// Constructor for a [`MiningMode::Instant`] - pub fn instant(pool: Pool) -> Self { + pub fn instant(pool: Pool, max_transactions: Option) -> Self { let rx = pool.pending_transactions_listener(); - Self::Instant(ReceiverStream::new(rx).fuse()) + Self::Instant { rx: ReceiverStream::new(rx).fuse(), max_transactions, accumulated: 0 } } /// Constructor for a [`MiningMode::Interval`] @@ -52,10 +62,20 @@ impl Future for MiningMode { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); match this { - Self::Instant(rx) => { - // drain all transactions notifications - if let Poll::Ready(Some(_)) = rx.poll_next_unpin(cx) { - return Poll::Ready(()) + Self::Instant { rx, max_transactions, accumulated } => { + // Poll for new transaction notifications + while let Poll::Ready(Some(_)) = rx.poll_next_unpin(cx) { + if let Some(max_tx) = max_transactions { + *accumulated += 1; + // If we've reached the max transactions threshold, mine a block + if *accumulated >= *max_tx { + *accumulated = 0; // Reset counter for next block + return Poll::Ready(()); + } + } else { + // If no max_transactions is set, mine immediately + return Poll::Ready(()); + } } Poll::Pending } diff --git a/crates/engine/primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml index 3e1f7893093..795118083b5 100644 --- a/crates/engine/primitives/Cargo.toml +++ b/crates/engine/primitives/Cargo.toml @@ -12,13 +12,13 @@ workspace = true [dependencies] # reth +reth-evm.workspace = true reth-execution-types.workspace = true reth-payload-primitives.workspace = true reth-payload-builder-primitives.workspace = true reth-primitives-traits.workspace = true reth-ethereum-primitives.workspace = true reth-chain-state.workspace = true -reth-trie.workspace = true reth-errors.workspace = true reth-trie-common.workspace = true @@ -29,8 +29,8 @@ alloy-rpc-types-engine.workspace = true alloy-eips.workspace = true # async -tokio = { workspace = true, features = ["sync"] } -futures.workspace = true +tokio = { workspace = true, features = ["sync"], optional = true } +futures = { workspace = true, optional = true } # misc auto_impl.workspace = true @@ -49,6 +49,8 @@ std = [ "alloy-rpc-types-engine/std", "alloy-eips/std", "futures/std", + "tokio", "serde/std", "thiserror/std", + "reth-evm/std", ] diff --git a/crates/engine/primitives/src/invalid_block_hook.rs b/crates/engine/primitives/src/invalid_block_hook.rs index 767fc83304f..c981f34ed65 100644 --- a/crates/engine/primitives/src/invalid_block_hook.rs +++ b/crates/engine/primitives/src/invalid_block_hook.rs @@ -1,7 +1,8 @@ +use alloc::{boxed::Box, fmt, vec::Vec}; use alloy_primitives::B256; use reth_execution_types::BlockExecutionOutput; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader}; -use reth_trie::updates::TrieUpdates; +use reth_trie_common::updates::TrieUpdates; /// An invalid block hook. pub trait InvalidBlockHook: Send + Sync { @@ -36,3 +37,42 @@ where self(parent_header, block, output, trie_updates) } } + +/// A no-op [`InvalidBlockHook`] that does nothing. +#[derive(Debug, Default)] +#[non_exhaustive] +pub struct NoopInvalidBlockHook; + +impl InvalidBlockHook for NoopInvalidBlockHook { + fn on_invalid_block( + &self, + _parent_header: &SealedHeader, + _block: &RecoveredBlock, + _output: &BlockExecutionOutput, + _trie_updates: Option<(&TrieUpdates, B256)>, + ) { + } +} + +/// Multiple [`InvalidBlockHook`]s that are executed in order. +pub struct InvalidBlockHooks(pub Vec>>); + +impl fmt::Debug for InvalidBlockHooks { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("InvalidBlockHooks").field("len", &self.0.len()).finish() + } +} + +impl InvalidBlockHook for InvalidBlockHooks { + fn on_invalid_block( + &self, + parent_header: &SealedHeader, + block: &RecoveredBlock, + output: &BlockExecutionOutput, + trie_updates: Option<(&TrieUpdates, B256)>, + ) { + for hook in &self.0 { + hook.on_invalid_block(parent_header, block, output, trie_updates); + } + } +} diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 75e3bd81ca7..e73368be561 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -22,6 +22,7 @@ use reth_trie_common::HashedPostState; use serde::{de::DeserializeOwned, Serialize}; // Re-export [`ExecutionPayload`] moved to `reth_payload_primitives` +pub use reth_evm::{ConfigureEngineEvm, ExecutableTxIterator}; pub use reth_payload_primitives::ExecutionPayload; mod error; @@ -30,14 +31,16 @@ pub use error::*; mod forkchoice; pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus}; +#[cfg(feature = "std")] mod message; +#[cfg(feature = "std")] pub use message::*; mod event; pub use event::*; mod invalid_block_hook; -pub use invalid_block_hook::InvalidBlockHook; +pub use invalid_block_hook::{InvalidBlockHook, InvalidBlockHooks, NoopInvalidBlockHook}; pub mod config; pub use config::*; @@ -104,8 +107,8 @@ pub trait EngineTypes: + 'static; } -/// Type that validates the payloads processed by the engine. -pub trait EngineValidator: PayloadValidator { +/// Type that validates the payloads processed by the engine API. +pub trait EngineApiValidator: Send + Sync + Unpin + 'static { /// Validates the presence or exclusion of fork-specific fields based on the payload attributes /// and the message version. fn validate_version_specific_fields( diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 367186995f9..320dd461fb1 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -147,11 +147,8 @@ pub struct EngineServiceError {} mod tests { use super::*; use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_engine_primitives::BeaconEngineMessage; - use reth_engine_tree::{ - test_utils::TestPipelineBuilder, - tree::{BasicEngineValidator, NoopInvalidBlockHook}, - }; + use reth_engine_primitives::{BeaconEngineMessage, NoopInvalidBlockHook}; + use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::BasicEngineValidator}; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm_ethereum::EthEvmConfig; diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 6ed37c342c5..c6e44e629a2 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -62,7 +62,6 @@ rayon.workspace = true tracing.workspace = true derive_more.workspace = true parking_lot.workspace = true -itertools.workspace = true # optional deps for test-utils reth-prune-types = { workspace = true, optional = true } diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index f4a497a709a..e00d7ba128f 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -15,9 +15,10 @@ use reth_engine_tree::tree::{ executor::WorkloadExecutor, precompile_cache::PrecompileCacheMap, PayloadProcessor, StateProviderBuilder, TreeConfig, }; +use reth_ethereum_primitives::TransactionSigned; use reth_evm::OnStateHook; use reth_evm_ethereum::EthEvmConfig; -use reth_primitives_traits::{Account as RethAccount, StorageEntry}; +use reth_primitives_traits::{Account as RethAccount, Recovered, StorageEntry}; use reth_provider::{ providers::{BlockchainProvider, ConsistentDbView}, test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, @@ -235,7 +236,9 @@ fn bench_state_root(c: &mut Criterion) { black_box({ let mut handle = payload_processor.spawn( Default::default(), - Default::default(), + core::iter::empty::< + Result, core::convert::Infallible>, + >(), StateProviderBuilder::new(provider.clone(), genesis_hash, None), ConsistentDbView::new_with_latest_tip(provider).unwrap(), TrieInput::default(), diff --git a/crates/engine/tree/src/tree/invalid_block_hook.rs b/crates/engine/tree/src/tree/invalid_block_hook.rs deleted file mode 100644 index 0670e855342..00000000000 --- a/crates/engine/tree/src/tree/invalid_block_hook.rs +++ /dev/null @@ -1,44 +0,0 @@ -use alloy_primitives::B256; -use reth_engine_primitives::InvalidBlockHook; -use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader}; -use reth_provider::BlockExecutionOutput; -use reth_trie::updates::TrieUpdates; - -/// A no-op [`InvalidBlockHook`] that does nothing. -#[derive(Debug, Default)] -#[non_exhaustive] -pub struct NoopInvalidBlockHook; - -impl InvalidBlockHook for NoopInvalidBlockHook { - fn on_invalid_block( - &self, - _parent_header: &SealedHeader, - _block: &RecoveredBlock, - _output: &BlockExecutionOutput, - _trie_updates: Option<(&TrieUpdates, B256)>, - ) { - } -} - -/// Multiple [`InvalidBlockHook`]s that are executed in order. -pub struct InvalidBlockHooks(pub Vec>>); - -impl std::fmt::Debug for InvalidBlockHooks { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("InvalidBlockHooks").field("len", &self.0.len()).finish() - } -} - -impl InvalidBlockHook for InvalidBlockHooks { - fn on_invalid_block( - &self, - parent_header: &SealedHeader, - block: &RecoveredBlock, - output: &BlockExecutionOutput, - trie_updates: Option<(&TrieUpdates, B256)>, - ) { - for hook in &self.0 { - hook.on_invalid_block(parent_header, block, output, trie_updates); - } - } -} diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 96002180049..d2c4a85a76f 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -42,6 +42,10 @@ pub(crate) struct EngineMetrics { pub(crate) pipeline_runs: Counter, /// The total count of forkchoice updated messages received. pub(crate) forkchoice_updated_messages: Counter, + /// The total count of forkchoice updated messages with payload received. + pub(crate) forkchoice_with_attributes_updated_messages: Counter, + /// Newly arriving block hash is not present in executed blocks cache storage + pub(crate) executed_new_block_cache_miss: Counter, /// The total count of new payload messages received. pub(crate) new_payload_messages: Counter, /// Histogram of persistence operation durations (in seconds) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 74d5e90fc42..47e68a25c4f 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -18,7 +18,6 @@ use reth_chain_state::{ MemoryOverlayStateProvider, NewCanonicalChain, }; use reth_consensus::{Consensus, FullConsensus}; -pub use reth_engine_primitives::InvalidBlockHook; use reth_engine_primitives::{ BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconOnNewPayloadError, ExecutionPayload, ForkchoiceStateTracker, OnForkChoiceUpdated, @@ -58,7 +57,6 @@ mod block_buffer; mod cached_state; pub mod error; mod instrumented_state; -mod invalid_block_hook; mod invalid_headers; mod metrics; mod payload_processor; @@ -73,7 +71,6 @@ mod trie_updates; use crate::tree::error::AdvancePersistenceError; pub use block_buffer::BlockBuffer; -pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use invalid_headers::InvalidHeaderCache; pub use payload_processor::*; pub use payload_validator::{BasicEngineValidator, EngineValidator}; @@ -617,6 +614,7 @@ where // get the executed new head block let Some(new_head_block) = self.state.tree_state.blocks_by_hash.get(&new_head) else { debug!(target: "engine::tree", new_head=?new_head, "New head block not found in inmemory tree state"); + self.metrics.engine.executed_new_block_cache_miss.increment(1); return Ok(None) }; @@ -746,7 +744,7 @@ where } /// Returns the persisting kind for the input block. - fn persisting_kind_for(&self, block: &N::BlockHeader) -> PersistingKind { + fn persisting_kind_for(&self, block: BlockWithParent) -> PersistingKind { // Check that we're currently persisting. let Some(action) = self.persistence_state.current_action() else { return PersistingKind::NotPersisting @@ -758,7 +756,9 @@ where // The block being validated can only be a descendant if its number is higher than // the highest block persisting. Otherwise, it's likely a fork of a lower block. - if block.number() > highest.number && self.state.tree_state.is_descendant(*highest, block) { + if block.block.number > highest.number && + self.state.tree_state.is_descendant(*highest, block) + { return PersistingKind::PersistingDescendant } @@ -783,6 +783,9 @@ where ) -> ProviderResult> { trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); self.metrics.engine.forkchoice_updated_messages.increment(1); + if attrs.is_some() { + self.metrics.engine.forkchoice_with_attributes_updated_messages.increment(1); + } self.canonical_in_memory_state.on_forkchoice_update_received(); if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { @@ -1400,9 +1403,10 @@ where .build()?; let mut trie_input = self.compute_trie_input( - self.persisting_kind_for(block.recovered_block().header()), + self.persisting_kind_for(block.recovered_block.block_with_parent()), self.provider.database_provider_ro()?, block.recovered_block().parent_hash(), + None, )?; // Extend with block we are generating trie updates for. trie_input.append_ref(block.hashed_state()); @@ -1679,10 +1683,12 @@ where } } Err(err) => { - debug!(target: "engine::tree", ?err, "failed to connect buffered block to tree"); - if let Err(fatal) = self.on_insert_block_error(err) { - warn!(target: "engine::tree", %fatal, "fatal error occurred while connecting buffered blocks"); - return Err(fatal) + if let InsertPayloadError::Block(err) = err { + debug!(target: "engine::tree", ?err, "failed to connect buffered block to tree"); + if let Err(fatal) = self.on_insert_block_error(err) { + warn!(target: "engine::tree", %fatal, "fatal error occurred while connecting buffered blocks"); + return Err(fatal) + } } } } @@ -2021,10 +2027,12 @@ where trace!(target: "engine::tree", "downloaded block already executed"); } Err(err) => { - debug!(target: "engine::tree", err=%err.kind(), "failed to insert downloaded block"); - if let Err(fatal) = self.on_insert_block_error(err) { - warn!(target: "engine::tree", %fatal, "fatal error occurred while inserting downloaded block"); - return Err(fatal) + if let InsertPayloadError::Block(err) = err { + debug!(target: "engine::tree", err=%err.kind(), "failed to insert downloaded block"); + if let Err(fatal) = self.on_insert_block_error(err) { + warn!(target: "engine::tree", %fatal, "fatal error occurred while inserting downloaded block"); + return Err(fatal) + } } } } @@ -2046,7 +2054,7 @@ where fn insert_block( &mut self, block: RecoveredBlock, - ) -> Result> { + ) -> Result> { self.insert_block_or_payload( block.block_with_parent(), block, @@ -2177,8 +2185,10 @@ where persisting_kind: PersistingKind, provider: TP, parent_hash: B256, + allocated_trie_input: Option, ) -> ProviderResult { - let mut input = TrieInput::default(); + // get allocated trie input or use a default trie input + let mut input = allocated_trie_input.unwrap_or_default(); let best_block_number = provider.best_block_number()?; diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index a6c6969049d..a85c86bdb50 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -9,15 +9,18 @@ use crate::tree::{ sparse_trie::SparseTrieTask, StateProviderBuilder, TreeConfig, }; -use alloy_consensus::{transaction::Recovered, BlockHeader}; -use alloy_evm::block::StateChangeSource; +use alloy_evm::{block::StateChangeSource, ToTxEnv}; use alloy_primitives::B256; use executor::WorkloadExecutor; use multiproof::{SparseTrieUpdate, *}; use parking_lot::RwLock; use prewarm::PrewarmMetrics; -use reth_evm::{ConfigureEvm, OnStateHook, SpecFor}; -use reth_primitives_traits::{NodePrimitives, SealedHeaderFor}; +use reth_engine_primitives::ExecutableTxIterator; +use reth_evm::{ + execute::{ExecutableTxFor, WithTxEnv}, + ConfigureEvm, EvmEnvFor, OnStateHook, SpecFor, TxEnvFor, +}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, StateCommitmentProvider, StateProviderFactory, StateReader, @@ -32,13 +35,10 @@ use reth_trie_sparse::{ provider::{TrieNodeProvider, TrieNodeProviderFactory}, ClearedSparseStateTrie, SerialSparseTrie, SparseStateTrie, SparseTrie, }; -use std::{ - collections::VecDeque, - sync::{ - atomic::AtomicBool, - mpsc::{self, channel, Sender}, - Arc, - }, +use std::sync::{ + atomic::AtomicBool, + mpsc::{self, channel, Sender}, + Arc, }; use super::precompile_cache::PrecompileCacheMap; @@ -80,6 +80,8 @@ where >, /// Whether to use the parallel sparse trie. use_parallel_sparse_trie: bool, + /// A cleared trie input, kept around to be reused so allocations can be minimized. + trie_input: Option, } impl PayloadProcessor @@ -104,6 +106,7 @@ where precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, sparse_state_trie: Arc::default(), + trie_input: None, use_parallel_sparse_trie: config.enable_parallel_sparse_trie(), } } @@ -146,15 +149,15 @@ where /// /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) - pub fn spawn

( + pub fn spawn>( &mut self, - header: SealedHeaderFor, - transactions: VecDeque>, + env: ExecutionEnv, + transactions: I, provider_builder: StateProviderBuilder, consistent_view: ConsistentDbView

, trie_input: TrieInput, config: &TreeConfig, - ) -> PayloadHandle + ) -> PayloadHandle, I::Tx>, I::Error> where P: DatabaseProviderFactory + BlockReader @@ -165,8 +168,10 @@ where + 'static, { let (to_sparse_trie, sparse_trie_rx) = channel(); - // spawn multiproof task - let state_root_config = MultiProofConfig::new_from_input(consistent_view, trie_input); + // spawn multiproof task, save the trie input + let (trie_input, state_root_config) = + MultiProofConfig::new_from_input(consistent_view, trie_input); + self.trie_input = Some(trie_input); // Create and spawn the storage proof task let task_ctx = ProofTaskCtx::new( @@ -196,8 +201,10 @@ where // wire the multiproof task to the prewarm task let to_multi_proof = Some(multi_proof_task.state_root_message_sender()); + let (prewarm_rx, execution_rx) = self.spawn_tx_iterator(transactions); + let prewarm_handle = - self.spawn_caching_with(header, transactions, provider_builder, to_multi_proof.clone()); + self.spawn_caching_with(env, prewarm_rx, provider_builder, to_multi_proof.clone()); // spawn multi-proof task self.executor.spawn_blocking(move || { @@ -222,18 +229,23 @@ where } }); - PayloadHandle { to_multi_proof, prewarm_handle, state_root: Some(state_root_rx) } + PayloadHandle { + to_multi_proof, + prewarm_handle, + state_root: Some(state_root_rx), + transactions: execution_rx, + } } /// Spawn cache prewarming exclusively. /// /// Returns a [`PayloadHandle`] to communicate with the task. - pub(super) fn spawn_cache_exclusive

( + pub(super) fn spawn_cache_exclusive>( &self, - header: SealedHeaderFor, - transactions: VecDeque>, + env: ExecutionEnv, + transactions: I, provider_builder: StateProviderBuilder, - ) -> PayloadHandle + ) -> PayloadHandle, I::Tx>, I::Error> where P: BlockReader + StateProviderFactory @@ -242,15 +254,46 @@ where + Clone + 'static, { - let prewarm_handle = self.spawn_caching_with(header, transactions, provider_builder, None); - PayloadHandle { to_multi_proof: None, prewarm_handle, state_root: None } + let (prewarm_rx, execution_rx) = self.spawn_tx_iterator(transactions); + let prewarm_handle = self.spawn_caching_with(env, prewarm_rx, provider_builder, None); + PayloadHandle { + to_multi_proof: None, + prewarm_handle, + state_root: None, + transactions: execution_rx, + } + } + + /// Spawns a task advancing transaction env iterator and streaming updates through a channel. + #[expect(clippy::type_complexity)] + fn spawn_tx_iterator>( + &self, + transactions: I, + ) -> ( + mpsc::Receiver, I::Tx>>, + mpsc::Receiver, I::Tx>, I::Error>>, + ) { + let (prewarm_tx, prewarm_rx) = mpsc::channel(); + let (execute_tx, execute_rx) = mpsc::channel(); + self.executor.spawn_blocking(move || { + for tx in transactions { + let tx = tx.map(|tx| WithTxEnv { tx_env: tx.to_tx_env(), tx }); + // only send Ok(_) variants to prewarming task + if let Ok(tx) = &tx { + let _ = prewarm_tx.send(tx.clone()); + } + let _ = execute_tx.send(tx); + } + }); + + (prewarm_rx, execute_rx) } /// Spawn prewarming optionally wired to the multiproof task for target updates. fn spawn_caching_with

( &self, - header: SealedHeaderFor, - mut transactions: VecDeque>, + env: ExecutionEnv, + mut transactions: mpsc::Receiver + Send + 'static>, provider_builder: StateProviderBuilder, to_multi_proof: Option>, ) -> CacheTaskHandle @@ -265,13 +308,13 @@ where if self.disable_transaction_prewarming { // if no transactions should be executed we clear them but still spawn the task for // caching updates - transactions.clear(); + transactions = mpsc::channel().1; } - let (cache, cache_metrics) = self.cache_for(header.parent_hash()).split(); + let (cache, cache_metrics) = self.cache_for(env.parent_hash).split(); // configure prewarming let prewarm_ctx = PrewarmContext { - header, + env, evm_config: self.evm_config.clone(), cache: cache.clone(), cache_metrics: cache_metrics.clone(), @@ -282,22 +325,29 @@ where precompile_cache_map: self.precompile_cache_map.clone(), }; - let prewarm_task = PrewarmCacheTask::new( + let (prewarm_task, to_prewarm_task) = PrewarmCacheTask::new( self.executor.clone(), self.execution_cache.clone(), prewarm_ctx, to_multi_proof, - transactions, ); - let to_prewarm_task = prewarm_task.actions_tx(); // spawn pre-warm task - self.executor.spawn_blocking(move || { - prewarm_task.run(); - }); + { + let to_prewarm_task = to_prewarm_task.clone(); + self.executor.spawn_blocking(move || { + prewarm_task.run(transactions, to_prewarm_task); + }); + } + CacheTaskHandle { cache, to_prewarm_task: Some(to_prewarm_task), cache_metrics } } + /// Takes the trie input from the inner payload processor, if it exists. + pub const fn take_trie_input(&mut self) -> Option { + self.trie_input.take() + } + /// Returns the cache for the given parent hash. /// /// If the given hash is different then what is recently cached, then this will create a new @@ -359,16 +409,18 @@ where /// Handle to all the spawned tasks. #[derive(Debug)] -pub struct PayloadHandle { +pub struct PayloadHandle { /// Channel for evm state updates to_multi_proof: Option>, // must include the receiver of the state root wired to the sparse trie prewarm_handle: CacheTaskHandle, /// Receiver for the state root state_root: Option>>, + /// Stream of block transactions + transactions: mpsc::Receiver>, } -impl PayloadHandle { +impl PayloadHandle { /// Awaits the state root /// /// # Panics @@ -418,6 +470,13 @@ impl PayloadHandle { pub(super) fn terminate_caching(&mut self, block_output: Option) { self.prewarm_handle.terminate_caching(block_output) } + + /// Returns iterator yielding transactions from the stream. + pub fn iter_transactions(&mut self) -> impl Iterator> + '_ { + core::iter::repeat_with(|| self.transactions.recv()) + .take_while(|res| res.is_ok()) + .map(|res| res.unwrap()) + } } /// Access to the spawned [`PrewarmCacheTask`]. @@ -492,6 +551,30 @@ impl ExecutionCache { } } +/// EVM context required to execute a block. +#[derive(Debug, Clone)] +pub struct ExecutionEnv { + /// Evm environment. + pub evm_env: EvmEnvFor, + /// Hash of the block being executed. + pub hash: B256, + /// Hash of the parent block. + pub parent_hash: B256, +} + +impl Default for ExecutionEnv +where + EvmEnvFor: Default, +{ + fn default() -> Self { + Self { + evm_env: Default::default(), + hash: Default::default(), + parent_hash: Default::default(), + } + } +} + #[cfg(test)] mod tests { use crate::tree::{ @@ -505,9 +588,10 @@ mod tests { use rand::Rng; use reth_chainspec::ChainSpec; use reth_db_common::init::init_genesis; + use reth_ethereum_primitives::TransactionSigned; use reth_evm::OnStateHook; use reth_evm_ethereum::EthEvmConfig; - use reth_primitives_traits::{Account, StorageEntry}; + use reth_primitives_traits::{Account, Recovered, StorageEntry}; use reth_provider::{ providers::{BlockchainProvider, ConsistentDbView}, test_utils::create_test_provider_factory_with_chain_spec, @@ -628,7 +712,7 @@ mod tests { let provider = BlockchainProvider::new(factory).unwrap(); let mut handle = payload_processor.spawn( Default::default(), - Default::default(), + core::iter::empty::, core::convert::Infallible>>(), StateProviderBuilder::new(provider.clone(), genesis_hash, None), ConsistentDbView::new_with_latest_tip(provider).unwrap(), TrieInput::from_state(hashed_state), diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 0b62bc73eae..f09720eb31a 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -82,16 +82,20 @@ pub(super) struct MultiProofConfig { impl MultiProofConfig { /// Creates a new state root config from the consistent view and the trie input. + /// + /// This returns a cleared [`TrieInput`] so that we can reuse any allocated space in the + /// [`TrieInput`]. pub(super) fn new_from_input( consistent_view: ConsistentDbView, - input: TrieInput, - ) -> Self { - Self { + mut input: TrieInput, + ) -> (TrieInput, Self) { + let config = Self { consistent_view, - nodes_sorted: Arc::new(input.nodes.into_sorted()), - state_sorted: Arc::new(input.state.into_sorted()), - prefix_sets: Arc::new(input.prefix_sets), - } + nodes_sorted: Arc::new(input.nodes.drain_into_sorted()), + state_sorted: Arc::new(input.state.drain_into_sorted()), + prefix_sets: Arc::new(input.prefix_sets.clone()), + }; + (input.cleared(), config) } } diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 85e9e803305..fb9c97117f2 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -6,24 +6,21 @@ use crate::tree::{ executor::WorkloadExecutor, multiproof::MultiProofMessage, ExecutionCache, }, precompile_cache::{CachedPrecompile, PrecompileCacheMap}, - StateProviderBuilder, + ExecutionEnv, StateProviderBuilder, }; -use alloy_consensus::transaction::Recovered; use alloy_evm::Database; use alloy_primitives::{keccak256, map::B256Set, B256}; -use itertools::Itertools; use metrics::{Gauge, Histogram}; -use reth_evm::{ConfigureEvm, Evm, EvmFor, SpecFor}; +use reth_evm::{execute::ExecutableTxFor, ConfigureEvm, Evm, EvmFor, SpecFor}; use reth_metrics::Metrics; -use reth_primitives_traits::{header::SealedHeaderFor, NodePrimitives, SignedTransaction}; +use reth_primitives_traits::{NodePrimitives, SignedTransaction}; use reth_provider::{BlockReader, StateCommitmentProvider, StateProviderFactory, StateReader}; use reth_revm::{database::StateProviderDatabase, db::BundleState, state::EvmState}; use reth_trie::MultiProofTargets; use std::{ - collections::VecDeque, sync::{ atomic::{AtomicBool, Ordering}, - mpsc::{channel, Receiver, Sender}, + mpsc::{self, channel, Receiver, Sender}, Arc, }, time::Instant, @@ -43,8 +40,6 @@ where executor: WorkloadExecutor, /// Shared execution cache. execution_cache: ExecutionCache, - /// Transactions pending execution. - pending: VecDeque>, /// Context provided to execution tasks ctx: PrewarmContext, /// How many transactions should be executed in parallel @@ -53,10 +48,6 @@ where to_multi_proof: Option>, /// Receiver for events produced by tx execution actions_rx: Receiver, - /// Sender the transactions use to send their result back - actions_tx: Sender, - /// Total prewarming tasks spawned - prewarm_outcomes_left: usize, } impl PrewarmCacheTask @@ -71,41 +62,64 @@ where execution_cache: ExecutionCache, ctx: PrewarmContext, to_multi_proof: Option>, - pending: VecDeque>, - ) -> Self { + ) -> (Self, Sender) { let (actions_tx, actions_rx) = channel(); - Self { - executor, - execution_cache, - pending, - ctx, - max_concurrency: 64, - to_multi_proof, - actions_rx, + ( + Self { + executor, + execution_cache, + ctx, + max_concurrency: 64, + to_multi_proof, + actions_rx, + }, actions_tx, - prewarm_outcomes_left: 0, - } - } - - /// Returns the sender that can communicate with this task. - pub(super) fn actions_tx(&self) -> Sender { - self.actions_tx.clone() + ) } /// Spawns all pending transactions as blocking tasks by first chunking them. - fn spawn_all(&mut self) { - let chunk_size = (self.pending.len() / self.max_concurrency).max(1); + fn spawn_all( + &self, + pending: mpsc::Receiver + Send + 'static>, + actions_tx: Sender, + ) { + let executor = self.executor.clone(); + let ctx = self.ctx.clone(); + let max_concurrency = self.max_concurrency; + + self.executor.spawn_blocking(move || { + let mut handles = Vec::new(); + let (done_tx, done_rx) = mpsc::channel(); + let mut executing = 0; + while let Ok(executable) = pending.recv() { + let task_idx = executing % max_concurrency; + + if handles.len() <= task_idx { + let (tx, rx) = mpsc::channel(); + let sender = actions_tx.clone(); + let ctx = ctx.clone(); + let done_tx = done_tx.clone(); + + executor.spawn_blocking(move || { + ctx.transact_batch(rx, sender, done_tx); + }); + + handles.push(tx); + } - for chunk in &self.pending.drain(..).chunks(chunk_size) { - let sender = self.actions_tx.clone(); - let ctx = self.ctx.clone(); - let pending_chunk = chunk.collect::>(); + let _ = handles[task_idx].send(executable); - self.prewarm_outcomes_left += pending_chunk.len(); - self.executor.spawn_blocking(move || { - ctx.transact_batch(&pending_chunk, sender); - }); - } + executing += 1; + } + + // drop handle and wait for all tasks to finish and drop theirs + drop(done_tx); + drop(handles); + while done_rx.recv().is_ok() {} + + let _ = actions_tx + .send(PrewarmTaskEvent::FinishedTxExecution { executed_transactions: executing }); + }); } /// If configured and the tx returned proof targets, emit the targets the transaction produced @@ -119,7 +133,7 @@ where fn save_cache(self, state: BundleState) { let start = Instant::now(); let cache = SavedCache::new( - self.ctx.header.hash(), + self.ctx.env.hash, self.ctx.cache.clone(), self.ctx.cache_metrics.clone(), ); @@ -136,32 +150,20 @@ where self.ctx.metrics.cache_saving_duration.set(start.elapsed().as_secs_f64()); } - /// Removes the `actions_tx` currently stored in the struct, replacing it with a new one that - /// does not point to any active receiver. - /// - /// This is used to drop the `actions_tx` after all tasks have been spawned, and should not be - /// used in any context other than the `run` method. - fn drop_actions_tx(&mut self) { - self.actions_tx = channel().0; - } - /// Executes the task. /// /// This will execute the transactions until all transactions have been processed or the task /// was cancelled. - pub(super) fn run(mut self) { - self.ctx.metrics.transactions.set(self.pending.len() as f64); - self.ctx.metrics.transactions_histogram.record(self.pending.len() as f64); - + pub(super) fn run( + self, + pending: mpsc::Receiver + Send + 'static>, + actions_tx: Sender, + ) { // spawn execution tasks. - self.spawn_all(); - - // drop the actions sender after we've spawned all execution tasks. This is so that the - // following loop can terminate even if one of the prewarm tasks ends in an error (i.e., - // does not return an Outcome) or panics. - self.drop_actions_tx(); + self.spawn_all(pending, actions_tx); let mut final_block_output = None; + let mut finished_execution = false; while let Ok(event) = self.actions_rx.recv() { match event { PrewarmTaskEvent::TerminateTransactionExecution => { @@ -171,19 +173,22 @@ where PrewarmTaskEvent::Outcome { proof_targets } => { // completed executing a set of transactions self.send_multi_proof_targets(proof_targets); + } + PrewarmTaskEvent::Terminate { block_output } => { + final_block_output = Some(block_output); - // decrement the number of tasks left - self.prewarm_outcomes_left -= 1; - - if self.prewarm_outcomes_left == 0 && final_block_output.is_some() { - // all tasks are done, and we have the block output, we can exit + if finished_execution { + // all tasks are done, we can exit, which will save caches and exit break } } - PrewarmTaskEvent::Terminate { block_output } => { - final_block_output = Some(block_output); + PrewarmTaskEvent::FinishedTxExecution { executed_transactions } => { + self.ctx.metrics.transactions.set(executed_transactions as f64); + self.ctx.metrics.transactions_histogram.record(executed_transactions as f64); - if self.prewarm_outcomes_left == 0 { + finished_execution = true; + + if final_block_output.is_some() { // all tasks are done, we can exit, which will save caches and exit break } @@ -205,7 +210,7 @@ where N: NodePrimitives, Evm: ConfigureEvm, { - pub(super) header: SealedHeaderFor, + pub(super) env: ExecutionEnv, pub(super) evm_config: Evm, pub(super) cache: ProviderCaches, pub(super) cache_metrics: CachedStateMetrics, @@ -226,11 +231,9 @@ where { /// Splits this context into an evm, an evm config, metrics, and the atomic bool for terminating /// execution. - fn evm_for_ctx( - self, - ) -> Option<(EvmFor, Evm, PrewarmMetrics, Arc)> { + fn evm_for_ctx(self) -> Option<(EvmFor, PrewarmMetrics, Arc)> { let Self { - header, + env, evm_config, cache: caches, cache_metrics, @@ -259,7 +262,7 @@ where let state_provider = StateProviderDatabase::new(state_provider); - let mut evm_env = evm_config.evm_env(&header); + let mut evm_env = env.evm_env; // we must disable the nonce check so that we can execute the transaction even if the nonce // doesn't match what's on chain. @@ -280,39 +283,42 @@ where }); } - Some((evm, evm_config, metrics, terminate_execution)) + Some((evm, metrics, terminate_execution)) } - /// Transacts the vec of transactions and returns the state outcome. + /// Accepts an [`mpsc::Receiver`] of transactions and a handle to prewarm task. Executes + /// transactions and streams [`PrewarmTaskEvent::Outcome`] messages for each transaction. /// /// Returns `None` if executing the transactions failed to a non Revert error. /// Returns the touched+modified state of the transaction. /// /// Note: Since here are no ordering guarantees this won't the state the txs produce when /// executed sequentially. - fn transact_batch(self, txs: &[Recovered], sender: Sender) { - let Some((mut evm, evm_config, metrics, terminate_execution)) = self.evm_for_ctx() else { - return - }; + fn transact_batch( + self, + txs: mpsc::Receiver>, + sender: Sender, + done_tx: Sender<()>, + ) { + let Some((mut evm, metrics, terminate_execution)) = self.evm_for_ctx() else { return }; - for tx in txs { + while let Ok(tx) = txs.recv() { // If the task was cancelled, stop execution, send an empty result to notify the task, // and exit. if terminate_execution.load(Ordering::Relaxed) { let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: None }); - return + break } // create the tx env - let tx_env = evm_config.tx_env(tx); let start = Instant::now(); - let res = match evm.transact(tx_env) { + let res = match evm.transact(&tx) { Ok(res) => res, Err(err) => { trace!( target: "engine::tree", %err, - tx_hash=%tx.tx_hash(), + tx_hash=%tx.tx().tx_hash(), sender=%tx.signer(), "Error when executing prewarm transaction", ); @@ -327,6 +333,9 @@ where let _ = sender.send(PrewarmTaskEvent::Outcome { proof_targets: Some(targets) }); } + + // send a message to the main task to flag that we're done + let _ = done_tx.send(()); } } @@ -380,6 +389,11 @@ pub(super) enum PrewarmTaskEvent { /// The prepared proof targets based on the evm state outcome proof_targets: Option, }, + /// Finished executing all transactions + FinishedTxExecution { + /// Number of transactions executed + executed_transactions: usize, + }, } /// Metrics for transactions prewarming. diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 4677845fc0b..ee0bffe02dd 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -9,18 +9,25 @@ use crate::tree::{ persistence_state::CurrentPersistenceAction, precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}, sparse_trie::StateRootComputeOutcome, - ConsistentDbView, EngineApiMetrics, EngineApiTreeState, PayloadHandle, PersistenceState, - PersistingKind, StateProviderBuilder, StateProviderDatabase, TreeConfig, + ConsistentDbView, EngineApiMetrics, EngineApiTreeState, ExecutionEnv, PayloadHandle, + PersistenceState, PersistingKind, StateProviderBuilder, StateProviderDatabase, TreeConfig, }; -use alloy_evm::{block::BlockExecutor, Evm}; +use alloy_consensus::transaction::Either; +use alloy_eips::{eip1898::BlockWithParent, NumHash}; +use alloy_evm::Evm; use alloy_primitives::B256; use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, }; use reth_consensus::{ConsensusError, FullConsensus, HeaderValidator}; -use reth_engine_primitives::{InvalidBlockHook, PayloadValidator}; -use reth_errors::ProviderResult; -use reth_evm::{ConfigureEvm, SpecFor}; +use reth_engine_primitives::{ + ConfigureEngineEvm, ExecutableTxIterator, ExecutionPayload, InvalidBlockHook, PayloadValidator, +}; +use reth_errors::{BlockExecutionError, ProviderResult}; +use reth_evm::{ + block::BlockExecutor, execute::ExecutableTxFor, ConfigureEvm, EvmEnvFor, ExecutionCtxFor, + SpecFor, +}; use reth_payload_primitives::{ BuiltPayload, InvalidPayloadAttributesError, NewPayloadError, PayloadTypes, }; @@ -106,7 +113,7 @@ impl<'a, N: NodePrimitives> TreeCtx<'a, N> { /// currently in progress. /// /// This is adapted from the `persisting_kind_for` method in `EngineApiTreeHandler`. - pub fn persisting_kind_for(&self, block: &N::BlockHeader) -> PersistingKind { + pub fn persisting_kind_for(&self, block: BlockWithParent) -> PersistingKind { // Check that we're currently persisting. let Some(action) = self.persistence().current_action() else { return PersistingKind::NotPersisting @@ -118,7 +125,8 @@ impl<'a, N: NodePrimitives> TreeCtx<'a, N> { // The block being validated can only be a descendant if its number is higher than // the highest block persisting. Otherwise, it's likely a fork of a lower block. - if block.number() > highest.number && self.state().tree_state.is_descendant(*highest, block) + if block.block.number > highest.number && + self.state().tree_state.is_descendant(*highest, block) { return PersistingKind::PersistingDescendant } @@ -207,6 +215,70 @@ where } } + /// Converts a [`BlockOrPayload`] to a recovered block. + pub fn convert_to_block>>( + &self, + input: BlockOrPayload, + ) -> Result, NewPayloadError> + where + V: PayloadValidator, + { + match input { + BlockOrPayload::Payload(payload) => self.validator.ensure_well_formed_payload(payload), + BlockOrPayload::Block(block) => Ok(block), + } + } + + /// Returns EVM environment for the given payload or block. + pub fn evm_env_for>>( + &self, + input: &BlockOrPayload, + ) -> EvmEnvFor + where + V: PayloadValidator, + Evm: ConfigureEngineEvm, + { + match input { + BlockOrPayload::Payload(payload) => self.evm_config.evm_env_for_payload(payload), + BlockOrPayload::Block(block) => self.evm_config.evm_env(block.header()), + } + } + + /// Returns [`ExecutableTxIterator`] for the given payload or block. + pub fn tx_iterator_for<'a, T: PayloadTypes>>( + &'a self, + input: &'a BlockOrPayload, + ) -> Result + 'a, NewPayloadError> + where + V: PayloadValidator, + Evm: ConfigureEngineEvm, + { + match input { + BlockOrPayload::Payload(payload) => Ok(Either::Left( + self.evm_config.tx_iterator_for_payload(payload).map(|res| res.map(Either::Left)), + )), + BlockOrPayload::Block(block) => { + let transactions = block.clone_transactions_recovered().collect::>(); + Ok(Either::Right(transactions.into_iter().map(|tx| Ok(Either::Right(tx))))) + } + } + } + + /// Returns a [`ExecutionCtxFor`] for the given payload or block. + pub fn execution_ctx_for<'a, T: PayloadTypes>>( + &self, + input: &'a BlockOrPayload, + ) -> ExecutionCtxFor<'a, Evm> + where + V: PayloadValidator, + Evm: ConfigureEngineEvm, + { + match input { + BlockOrPayload::Payload(payload) => self.evm_config.context_for_payload(payload), + BlockOrPayload::Block(block) => self.evm_config.context_for_block(block), + } + } + /// Validates a block that has already been converted from a payload. /// /// This method performs: @@ -216,61 +288,56 @@ where /// - Fork detection pub fn validate_block_with_state>>( &mut self, - block: RecoveredBlock, + input: BlockOrPayload, mut ctx: TreeCtx<'_, N>, - ) -> ValidationOutcome)> + ) -> ValidationOutcome> where V: PayloadValidator, + Evm: ConfigureEngineEvm, { /// A helper macro that returns the block in case there was an error macro_rules! ensure_ok { ($expr:expr) => { match $expr { Ok(val) => val, - Err(e) => return Err((e.into(), block)), + Err(e) => { + let block = self.convert_to_block(input)?; + return Err(InsertBlockError::new(block.into_sealed_block(), e.into()).into()) + } } }; } - let block_num_hash = block.num_hash(); - - trace!(target: "engine::tree", block=?block_num_hash, "Validating block consensus"); - // validate block consensus rules - ensure_ok!(self.validate_block_inner(&block)); + let parent_hash = input.parent_hash(); + let block_num_hash = input.num_hash(); - trace!(target: "engine::tree", block=?block_num_hash, parent=?block.parent_hash(), "Fetching block state provider"); + trace!(target: "engine::tree", block=?block_num_hash, parent=?parent_hash, "Fetching block state provider"); let Some(provider_builder) = - ensure_ok!(self.state_provider_builder(block.parent_hash(), ctx.state())) + ensure_ok!(self.state_provider_builder(parent_hash, ctx.state())) else { // this is pre-validated in the tree - return Err(( - InsertBlockErrorKind::Provider(ProviderError::HeaderNotFound( - block.parent_hash().into(), - )), - block, - )) + return Err(InsertBlockError::new( + self.convert_to_block(input)?.into_sealed_block(), + ProviderError::HeaderNotFound(parent_hash.into()).into(), + ) + .into()) }; - // now validate against the parent - let Some(parent_block) = - ensure_ok!(self.sealed_header_by_hash(block.parent_hash(), ctx.state())) + let state_provider = ensure_ok!(provider_builder.build()); + + // fetch parent block + let Some(parent_block) = ensure_ok!(self.sealed_header_by_hash(parent_hash, ctx.state())) else { - return Err(( - InsertBlockErrorKind::Provider(ProviderError::HeaderNotFound( - block.parent_hash().into(), - )), - block, - )) + return Err(InsertBlockError::new( + self.convert_to_block(input)?.into_sealed_block(), + ProviderError::HeaderNotFound(parent_hash.into()).into(), + ) + .into()) }; - if let Err(e) = - self.consensus.validate_header_against_parent(block.sealed_header(), &parent_block) - { - warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); - return Err((e.into(), block)) - } + let evm_env = self.evm_env_for(&input); - let state_provider = ensure_ok!(provider_builder.build()); + let env = ExecutionEnv { evm_env, hash: input.hash(), parent_hash: input.parent_hash() }; // We only run the parallel state root if we are not currently persisting any blocks or // persisting blocks that are all ancestors of the one we are executing. @@ -281,7 +348,7 @@ where // collect in `compute_state_root_parallel`. // // See https://github.com/paradigmxyz/reth/issues/12688 for more details - let persisting_kind = ctx.persisting_kind_for(block.header()); + let persisting_kind = ctx.persisting_kind_for(input.block_with_parent()); // don't run parallel if state root fallback is set let run_parallel_state_root = persisting_kind.can_run_parallel_state_root() && !self.config.state_root_fallback(); @@ -294,7 +361,7 @@ where // It's cheaper to run a parallel state root that does one walk over trie tables while // accounting for the prefix sets. let has_ancestors_with_missing_trie_updates = - self.has_ancestors_with_missing_trie_updates(block.sealed_header(), ctx.state()); + self.has_ancestors_with_missing_trie_updates(input.block_with_parent(), ctx.state()); let mut use_state_root_task = run_parallel_state_root && self.config.use_state_root_task() && !has_ancestors_with_missing_trie_updates; @@ -310,25 +377,24 @@ where ); // use prewarming background task - let header = block.clone_sealed_header(); - let txs = block.clone_transactions_recovered().collect(); + let txs = self.tx_iterator_for(&input)?; let mut handle = if use_state_root_task { // use background tasks for state root calc let consistent_view = ensure_ok!(ConsistentDbView::new_with_latest_tip(self.provider.clone())); + // get allocated trie input if it exists + let allocated_trie_input = self.payload_processor.take_trie_input(); + // Compute trie input let trie_input_start = Instant::now(); - let res = self.compute_trie_input( + let trie_input = ensure_ok!(self.compute_trie_input( persisting_kind, ensure_ok!(consistent_view.provider_ro()), - block.header().parent_hash(), + parent_hash, ctx.state(), - ); - let trie_input = match res { - Ok(val) => val, - Err(e) => return Err((InsertBlockErrorKind::Other(Box::new(e)), block)), - }; + allocated_trie_input, + )); self.metrics .block_validation @@ -340,7 +406,7 @@ where // proof. if trie_input.prefix_sets.is_empty() { self.payload_processor.spawn( - header, + env.clone(), txs, provider_builder, consistent_view, @@ -350,10 +416,10 @@ where } else { debug!(target: "engine::tree", block=?block_num_hash, "Disabling state root task due to non-empty prefix sets"); use_state_root_task = false; - self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) + self.payload_processor.spawn_cache_exclusive(env.clone(), txs, provider_builder) } } else { - self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) + self.payload_processor.spawn_cache_exclusive(env.clone(), txs, provider_builder) }; // Use cached state provider before executing, used in execution after prewarming threads @@ -367,22 +433,44 @@ where let (output, execution_finish) = if self.config.state_provider_metrics() { let state_provider = InstrumentedStateProvider::from_state_provider(&state_provider); let (output, execution_finish) = - ensure_ok!(self.execute_block(&state_provider, &block, &handle)); + ensure_ok!(self.execute_block(&state_provider, env, &input, &mut handle)); state_provider.record_total_latency(); (output, execution_finish) } else { - let (output, execution_finish) = - ensure_ok!(self.execute_block(&state_provider, &block, &handle)); - (output, execution_finish) + ensure_ok!(self.execute_block(&state_provider, env, &input, &mut handle)) }; // after executing the block we can stop executing transactions handle.stop_prewarming_execution(); + let block = self.convert_to_block(input)?; + + // A helper macro that returns the block in case there was an error + macro_rules! ensure_ok { + ($expr:expr) => { + match $expr { + Ok(val) => val, + Err(e) => return Err(InsertBlockError::new(block.into_sealed_block(), e.into()).into()), + } + }; + } + + trace!(target: "engine::tree", block=?block_num_hash, "Validating block consensus"); + // validate block consensus rules + ensure_ok!(self.validate_block_inner(&block)); + + // now validate against the parent + if let Err(e) = + self.consensus.validate_header_against_parent(block.sealed_header(), &parent_block) + { + warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); + return Err(InsertBlockError::new(block.into_sealed_block(), e.into()).into()) + } + if let Err(err) = self.consensus.validate_block_post_execution(&block, &output) { // call post-block hook self.on_invalid_block(&parent_block, &block, &output, None, ctx.state_mut()); - return Err((err.into(), block)) + return Err(InsertBlockError::new(block.into_sealed_block(), err.into()).into()) } let hashed_state = self.provider.hashed_post_state(&output.state); @@ -392,7 +480,7 @@ where { // call post-block hook self.on_invalid_block(&parent_block, &block, &output, None, ctx.state_mut()); - return Err((err.into(), block)) + return Err(InsertBlockError::new(block.into_sealed_block(), err.into()).into()) } debug!(target: "engine::tree", block=?block_num_hash, "Calculating block state root"); @@ -430,7 +518,7 @@ where debug!(target: "engine::tree", block=?block_num_hash, "Using parallel state root algorithm"); match self.compute_state_root_parallel( persisting_kind, - block.header().parent_hash(), + block.parent_hash(), &hashed_state, ctx.state(), ) { @@ -446,7 +534,13 @@ where Err(ParallelStateRootError::Provider(ProviderError::ConsistentView(error))) => { debug!(target: "engine::tree", %error, "Parallel state root computation failed consistency check, falling back"); } - Err(error) => return Err((InsertBlockErrorKind::Other(Box::new(error)), block)), + Err(error) => { + return Err(InsertBlockError::new( + block.into_sealed_block(), + InsertBlockErrorKind::Other(Box::new(error)), + ) + .into()) + } } } } @@ -482,13 +576,15 @@ where Some((&trie_output, state_root)), ctx.state_mut(), ); - return Err(( + let block_state_root = block.header().state_root(); + return Err(InsertBlockError::new( + block.into_sealed_block(), ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.header().state_root() }.into(), + GotExpected { got: state_root, expected: block_state_root }.into(), ) .into(), - block, - )) + ) + .into()) } // terminate prewarming task with good state output @@ -546,19 +642,31 @@ where } /// Executes a block with the given state provider - fn execute_block( + fn execute_block( &mut self, state_provider: S, - block: &RecoveredBlock, - handle: &PayloadHandle, - ) -> Result<(BlockExecutionOutput, Instant), InsertBlockErrorKind> { - debug!(target: "engine::tree", block=?block.num_hash(), "Executing block"); + env: ExecutionEnv, + input: &BlockOrPayload, + handle: &mut PayloadHandle, Err>, + ) -> Result<(BlockExecutionOutput, Instant), InsertBlockErrorKind> + where + S: StateProvider, + Err: core::error::Error + Send + Sync + 'static, + V: PayloadValidator, + T: PayloadTypes>, + Evm: ConfigureEngineEvm, + { + let num_hash = NumHash::new(env.evm_env.block_env.number.to(), env.hash); + debug!(target: "engine::tree", block=?num_hash, "Executing block"); let mut db = State::builder() .with_database(StateProviderDatabase::new(&state_provider)) .with_bundle_update() .without_state_clear() .build(); - let mut executor = self.evm_config.executor_for_block(&mut db, block); + + let evm = self.evm_config.evm_with_env(&mut db, env.evm_env.clone()); + let ctx = self.execution_ctx_for(input); + let mut executor = self.evm_config.create_executor(evm, ctx); if !self.config.precompile_cache_disabled() { executor.evm_mut().precompiles_mut().map_precompiles(|address, precompile| { @@ -570,21 +678,22 @@ where CachedPrecompile::wrap( precompile, self.precompile_cache_map.cache_for_address(*address), - *self.evm_config.evm_env(block.header()).spec_id(), + *env.evm_env.spec_id(), Some(metrics), ) }); } let execution_start = Instant::now(); + let state_hook = Box::new(handle.state_hook()); let output = self.metrics.executor.execute_metered( executor, - block, - Box::new(handle.state_hook()), + handle.iter_transactions().map(|res| res.map_err(BlockExecutionError::other)), + state_hook, )?; let execution_finish = Instant::now(); let execution_time = execution_finish.duration_since(execution_start); - debug!(target: "engine::tree", elapsed = ?execution_time, number=?block.number(), "Executed block"); + debug!(target: "engine::tree", elapsed = ?execution_time, number=?num_hash.number, "Executed block"); Ok((output, execution_finish)) } @@ -610,6 +719,7 @@ where consistent_view.provider_ro()?, parent_hash, state, + None, )?; // Extend with block we are validating root for. input.append_ref(hashed_state); @@ -620,11 +730,11 @@ where /// Check if the given block has any ancestors with missing trie updates. fn has_ancestors_with_missing_trie_updates( &self, - target_header: &SealedHeader, + target_header: BlockWithParent, state: &EngineApiTreeState, ) -> bool { // Walk back through the chain starting from the parent of the target block - let mut current_hash = target_header.parent_hash(); + let mut current_hash = target_header.parent; while let Some(block) = state.tree_state.blocks_by_hash.get(¤t_hash) { // Check if this block is missing trie updates if block.trie.is_missing() { @@ -706,8 +816,10 @@ where provider: TP, parent_hash: B256, state: &EngineApiTreeState, + allocated_trie_input: Option, ) -> ProviderResult { - let mut input = TrieInput::default(); + // get allocated trie input or use a default trie input + let mut input = allocated_trie_input.unwrap_or_default(); let best_block_number = provider.best_block_number()?; @@ -787,7 +899,7 @@ where } /// Output of block or payload validation. -pub type ValidationOutcome>> = +pub type ValidationOutcome>> = Result, E>; /// Type that validates the payloads processed by the engine. @@ -831,7 +943,7 @@ pub trait EngineValidator< &mut self, payload: Types::ExecutionData, ctx: TreeCtx<'_, N>, - ) -> ValidationOutcome>; + ) -> ValidationOutcome; /// Validates a block downloaded from the network. fn validate_block( @@ -852,9 +964,9 @@ where + Clone + 'static, N: NodePrimitives, - Evm: ConfigureEvm + 'static, - Types: PayloadTypes>, V: PayloadValidator, + Evm: ConfigureEngineEvm + 'static, + Types: PayloadTypes>, { fn validate_payload_attributes_against_header( &self, @@ -876,9 +988,8 @@ where &mut self, payload: Types::ExecutionData, ctx: TreeCtx<'_, N>, - ) -> ValidationOutcome> { - let block = self.validator.ensure_well_formed_payload(payload)?; - Ok(EngineValidator::::validate_block(self, block, ctx)?) + ) -> ValidationOutcome { + self.validate_block_with_state(BlockOrPayload::Payload(payload), ctx) } fn validate_block( @@ -886,7 +997,49 @@ where block: RecoveredBlock, ctx: TreeCtx<'_, N>, ) -> ValidationOutcome { - self.validate_block_with_state(block, ctx) - .map_err(|(kind, block)| InsertBlockError::new(block.into_sealed_block(), kind)) + self.validate_block_with_state(BlockOrPayload::Block(block), ctx) + } +} + +/// Enum representing either block or payload being validated. +#[derive(Debug)] +pub enum BlockOrPayload { + /// Payload. + Payload(T::ExecutionData), + /// Block. + Block(RecoveredBlock::Primitives>>), +} + +impl BlockOrPayload { + /// Returns the hash of the block. + pub fn hash(&self) -> B256 { + match self { + Self::Payload(payload) => payload.block_hash(), + Self::Block(block) => block.hash(), + } + } + + /// Returns the number and hash of the block. + pub fn num_hash(&self) -> NumHash { + match self { + Self::Payload(payload) => payload.num_hash(), + Self::Block(block) => block.num_hash(), + } + } + + /// Returns the parent hash of the block. + pub fn parent_hash(&self) -> B256 { + match self { + Self::Payload(payload) => payload.parent_hash(), + Self::Block(block) => block.parent_hash(), + } + } + + /// Returns [`BlockWithParent`] for the block. + pub fn block_with_parent(&self) -> BlockWithParent { + match self { + Self::Payload(payload) => payload.block_with_parent(), + Self::Block(block) => block.block_with_parent(), + } } } diff --git a/crates/engine/tree/src/tree/state.rs b/crates/engine/tree/src/tree/state.rs index 380e100b475..0fcc51d59e6 100644 --- a/crates/engine/tree/src/tree/state.rs +++ b/crates/engine/tree/src/tree/state.rs @@ -1,7 +1,7 @@ //! Functionality related to tree state. use crate::engine::EngineApiKind; -use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; +use alloy_eips::{eip1898::BlockWithParent, merge::EPOCH_SLOTS, BlockNumHash}; use alloy_primitives::{ map::{HashMap, HashSet}, BlockNumber, B256, @@ -348,21 +348,21 @@ impl TreeState { /// Determines if the second block is a direct descendant of the first block. /// /// If the two blocks are the same, this returns `false`. - pub(crate) fn is_descendant(&self, first: BlockNumHash, second: &N::BlockHeader) -> bool { + pub(crate) fn is_descendant(&self, first: BlockNumHash, second: BlockWithParent) -> bool { // If the second block's parent is the first block's hash, then it is a direct descendant // and we can return early. - if second.parent_hash() == first.hash { + if second.parent == first.hash { return true } // If the second block is lower than, or has the same block number, they are not // descendants. - if second.number() <= first.number { + if second.block.number <= first.number { return false } // iterate through parents of the second until we reach the number - let Some(mut current_block) = self.blocks_by_hash.get(&second.parent_hash()) else { + let Some(mut current_block) = self.blocks_by_hash.get(&second.parent) else { // If we can't find its parent in the tree, we can't continue, so return false return false }; @@ -416,18 +416,18 @@ mod tests { tree_state.insert_executed(blocks[0].clone()); assert!(tree_state.is_descendant( blocks[0].recovered_block().num_hash(), - blocks[1].recovered_block().header() + blocks[1].recovered_block().block_with_parent() )); tree_state.insert_executed(blocks[1].clone()); assert!(tree_state.is_descendant( blocks[0].recovered_block().num_hash(), - blocks[2].recovered_block().header() + blocks[2].recovered_block().block_with_parent() )); assert!(tree_state.is_descendant( blocks[1].recovered_block().num_hash(), - blocks[2].recovered_block().header() + blocks[2].recovered_block().block_with_parent() )); } diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index fde19023ece..ffb327e63d5 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -10,7 +10,7 @@ use alloy_rpc_types_engine::{ExecutionData, ExecutionPayloadSidecar, ExecutionPa use assert_matches::assert_matches; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; -use reth_engine_primitives::{EngineValidator, ForkchoiceStatus}; +use reth_engine_primitives::{EngineApiValidator, ForkchoiceStatus, NoopInvalidBlockHook}; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_ethereum_primitives::{Block, EthPrimitives}; @@ -47,7 +47,7 @@ impl reth_engine_primitives::PayloadValidator for MockEngineVali } } -impl EngineValidator for MockEngineValidator { +impl EngineApiValidator for MockEngineValidator { fn validate_version_specific_fields( &self, _version: reth_payload_primitives::EngineApiMessageVersion, diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index c6a34bcc164..58ee6ac255c 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -17,6 +17,7 @@ reth-errors.workspace = true reth-chainspec.workspace = true reth-fs-util.workspace = true reth-engine-primitives.workspace = true +reth-engine-tree.workspace = true reth-evm.workspace = true reth-revm.workspace = true reth-storage-api.workspace = true diff --git a/crates/engine/util/src/lib.rs b/crates/engine/util/src/lib.rs index 9c2e9449bb3..0bf9ee89c18 100644 --- a/crates/engine/util/src/lib.rs +++ b/crates/engine/util/src/lib.rs @@ -8,7 +8,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -use futures::Stream; +use futures::{Future, Stream}; use reth_engine_primitives::BeaconEngineMessage; use reth_payload_primitives::PayloadTypes; use std::path::PathBuf; @@ -26,6 +26,10 @@ use skip_new_payload::EngineSkipNewPayload; pub mod reorg; use reorg::EngineReorg; +/// The result type for `maybe_reorg` method. +type MaybeReorgResult = + Result, S>, E>; + /// The collection of stream extensions for engine API message stream. pub trait EngineMessageStreamExt: Stream> { /// Skips the specified number of [`BeaconEngineMessage::ForkchoiceUpdated`] messages from the @@ -123,28 +127,38 @@ pub trait EngineMessageStreamExt: Stream( + /// + /// The `payload_validator_fn` closure is only called if `frequency` is `Some`, + /// allowing for lazy initialization of the validator. + fn maybe_reorg( self, provider: Provider, evm_config: Evm, - payload_validator: Validator, + payload_validator_fn: F, frequency: Option, depth: Option, - ) -> Either, Self> + ) -> impl Future> + Send where - Self: Sized, + Self: Sized + Send, + Provider: Send, + Evm: Send, + F: FnOnce() -> Fut + Send, + Fut: Future> + Send, { - if let Some(frequency) = frequency { - Either::Left(reorg::EngineReorg::new( - self, - provider, - evm_config, - payload_validator, - frequency, - depth.unwrap_or_default(), - )) - } else { - Either::Right(self) + async move { + if let Some(frequency) = frequency { + let validator = payload_validator_fn().await?; + Ok(Either::Left(reorg::EngineReorg::new( + self, + provider, + evm_config, + validator, + frequency, + depth.unwrap_or_default(), + ))) + } else { + Ok(Either::Right(self)) + } } } } diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 269c9eb1500..2b76a438589 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -7,8 +7,8 @@ use itertools::Either; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_engine_primitives::{ BeaconEngineMessage, BeaconOnNewPayloadError, ExecutionPayload as _, OnForkChoiceUpdated, - PayloadValidator, }; +use reth_engine_tree::tree::EngineValidator; use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult}; use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome}, @@ -103,7 +103,7 @@ where + StateProviderFactory + ChainSpecProvider, Evm: ConfigureEvm, - Validator: PayloadValidator>, + Validator: EngineValidator, { type Item = S::Item; @@ -248,8 +248,8 @@ where + StateProviderFactory + ChainSpecProvider, Evm: ConfigureEvm, - T: PayloadTypes, - Validator: PayloadValidator>, + T: PayloadTypes>, + Validator: EngineValidator, { // Ensure next payload is valid. let next_block = diff --git a/crates/era-downloader/src/client.rs b/crates/era-downloader/src/client.rs index ea4894cadbd..bce670271a1 100644 --- a/crates/era-downloader/src/client.rs +++ b/crates/era-downloader/src/client.rs @@ -128,7 +128,7 @@ impl EraClient { if let Some(name) = entry.file_name().to_str() { if let Some(number) = self.file_name_to_number(name) { if number < index || number >= last { - eprintln!("Deleting kokot {}", entry.path().display()); + eprintln!("Deleting file {}", entry.path().display()); eprintln!("{number} < {index} || {number} > {last}"); reth_fs_util::remove_file(entry.path())?; } diff --git a/crates/era/src/era_types.rs b/crates/era/src/era_types.rs index 65b80f5b384..7a3ed404839 100644 --- a/crates/era/src/era_types.rs +++ b/crates/era/src/era_types.rs @@ -56,7 +56,7 @@ impl EraGroup { } /// Check if this is a genesis era - no blocks yet - pub fn is_genesis(&self) -> bool { + pub const fn is_genesis(&self) -> bool { self.blocks.is_empty() && self.slot_index.is_none() } @@ -89,7 +89,7 @@ impl SlotIndex { } /// Get the number of slots covered by this index - pub fn slot_count(&self) -> usize { + pub const fn slot_count(&self) -> usize { self.offsets.len() } diff --git a/crates/era/tests/it/main.rs b/crates/era/tests/it/main.rs index 86bfb3b3ac5..17af9dc0015 100644 --- a/crates/era/tests/it/main.rs +++ b/crates/era/tests/it/main.rs @@ -32,18 +32,20 @@ const fn main() {} const MAINNET: &str = "mainnet"; /// Default mainnet url /// for downloading mainnet `.era1` files -const MAINNET_URL: &str = "https://era.ithaca.xyz/era1/index.html"; +const MAINNET_URL: &str = "https://era.ithaca.xyz/era1/"; /// Succinct list of mainnet files we want to download /// from /// for testing purposes -const ERA1_MAINNET_FILES_NAMES: [&str; 6] = [ +const ERA1_MAINNET_FILES_NAMES: [&str; 8] = [ "mainnet-00000-5ec1ffb8.era1", "mainnet-00003-d8b8a40b.era1", "mainnet-00151-e322efe1.era1", "mainnet-00293-0d6c5812.era1", "mainnet-00443-ea71b6f9.era1", "mainnet-01367-d7efc68f.era1", + "mainnet-01610-99fdde4b.era1", + "mainnet-01895-3f81607c.era1", ]; /// Sepolia network name @@ -56,8 +58,12 @@ const SEPOLIA_URL: &str = "https://era.ithaca.xyz/sepolia-era1/"; /// Succinct list of sepolia files we want to download /// from /// for testing purposes -const ERA1_SEPOLIA_FILES_NAMES: [&str; 3] = - ["sepolia-00000-643a00f7.era1", "sepolia-00074-0e81003c.era1", "sepolia-00173-b6924da5.era1"]; +const ERA1_SEPOLIA_FILES_NAMES: [&str; 4] = [ + "sepolia-00000-643a00f7.era1", + "sepolia-00074-0e81003c.era1", + "sepolia-00173-b6924da5.era1", + "sepolia-00182-a4f0a8a1.era1 ", +]; /// Utility for downloading `.era1` files for tests /// in a temporary directory diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index a32ead66fba..491d818eb92 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -24,9 +24,6 @@ reth-node-metrics.workspace = true reth-tracing.workspace = true reth-node-api.workspace = true -# alloy -alloy-consensus.workspace = true - # misc clap.workspace = true eyre.workspace = true diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index ee6fe5698ea..b41c61de7cb 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -1,12 +1,11 @@ //! CLI definition and entrypoint to executable use crate::chainspec::EthereumChainSpecParser; -use alloy_consensus::Header; use clap::{Parser, Subcommand}; use reth_chainspec::{ChainSpec, EthChainSpec, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::{ - common::{CliComponentsBuilder, CliNodeTypes}, + common::{CliComponentsBuilder, CliHeader, CliNodeTypes}, config_cmd, db, download, dump_genesis, export_era, import, import_era, init_cmd, init_state, launcher::FnLauncher, node::{self, NoArgs}, @@ -125,10 +124,7 @@ impl Cli { ) -> eyre::Result<()>, ) -> eyre::Result<()> where - N: CliNodeTypes< - Primitives: NodePrimitives>, - ChainSpec: Hardforks, - >, + N: CliNodeTypes, ChainSpec: Hardforks>, C: ChainSpecParser, { self.with_runner_and_components(CliRunner::try_default_runtime()?, components, launcher) @@ -182,10 +178,7 @@ impl Cli { ) -> eyre::Result<()>, ) -> eyre::Result<()> where - N: CliNodeTypes< - Primitives: NodePrimitives>, - ChainSpec: Hardforks, - >, + N: CliNodeTypes, ChainSpec: Hardforks>, C: ChainSpecParser, { // Add network name if available to the logs dir @@ -248,7 +241,6 @@ impl Cli { /// Commands to be executed #[derive(Debug, Subcommand)] -#[expect(clippy::large_enum_variant)] pub enum Commands { /// Start the node #[command(name = "node")] diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 82a37b2386e..85d049e65b9 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -220,8 +220,6 @@ where validate_against_parent_timestamp(header.header(), parent.header())?; - // TODO Check difficulty increment between parent and self - // Ace age did increment it by some formula that we need to follow. self.validate_against_parent_gas_limit(header, parent)?; validate_against_parent_eip1559_base_fee( diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 744bcdc5368..fbbbeeed836 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -19,12 +19,14 @@ reth-primitives-traits.workspace = true reth-ethereum-primitives.workspace = true revm.workspace = true reth-evm.workspace = true +reth-storage-errors.workspace = true # Alloy alloy-primitives.workspace = true alloy-eips.workspace = true alloy-evm.workspace = true alloy-consensus.workspace = true +alloy-rpc-types-engine.workspace = true # Misc parking_lot = { workspace = true, optional = true } @@ -53,6 +55,8 @@ std = [ "revm/std", "reth-ethereum-primitives/std", "derive_more?/std", + "alloy-rpc-types-engine/std", + "reth-storage-errors/std", ] test-utils = [ "dep:parking_lot", diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index c91fe4cee79..ae7defc328a 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -19,20 +19,23 @@ extern crate alloc; use alloc::{borrow::Cow, sync::Arc}; use alloy_consensus::{BlockHeader, Header}; +use alloy_eips::Decodable2718; pub use alloy_evm::EthEvm; use alloy_evm::{ eth::{EthBlockExecutionCtx, EthBlockExecutorFactory}, EthEvmFactory, FromRecoveredTx, FromTxWithEncoded, }; use alloy_primitives::{Bytes, U256}; +use alloy_rpc_types_engine::ExecutionData; use core::{convert::Infallible, fmt::Debug}; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_ethereum_primitives::{Block, EthPrimitives, TransactionSigned}; use reth_evm::{ - precompiles::PrecompilesMap, ConfigureEvm, EvmEnv, EvmFactory, NextBlockEnvAttributes, - TransactionEnv, + precompiles::PrecompilesMap, ConfigureEngineEvm, ConfigureEvm, EvmEnv, EvmEnvFor, EvmFactory, + ExecutableTxIterator, ExecutionCtxFor, NextBlockEnvAttributes, TransactionEnv, }; -use reth_primitives_traits::{SealedBlock, SealedHeader}; +use reth_primitives_traits::{SealedBlock, SealedHeader, SignedTransaction, TxTy}; +use reth_storage_errors::any::AnyError; use revm::{ context::{BlockEnv, CfgEnv}, context_interface::block::BlobExcessGasAndPrice, @@ -275,6 +278,88 @@ where } } +impl ConfigureEngineEvm for EthEvmConfig +where + ChainSpec: EthExecutorSpec + EthChainSpec

+ Hardforks + 'static, + EvmF: EvmFactory< + Tx: TransactionEnv + + FromRecoveredTx + + FromTxWithEncoded, + Spec = SpecId, + Precompiles = PrecompilesMap, + > + Clone + + Debug + + Send + + Sync + + Unpin + + 'static, +{ + fn evm_env_for_payload(&self, payload: &ExecutionData) -> EvmEnvFor { + let timestamp = payload.payload.timestamp(); + let block_number = payload.payload.block_number(); + + let blob_params = self.chain_spec().blob_params_at_timestamp(timestamp); + let spec = + revm_spec_by_timestamp_and_block_number(self.chain_spec(), timestamp, block_number); + + // configure evm env based on parent block + let mut cfg_env = + CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec); + + if let Some(blob_params) = &blob_params { + cfg_env.set_max_blobs_per_tx(blob_params.max_blobs_per_tx); + } + + // derive the EIP-4844 blob fees from the header's `excess_blob_gas` and the current + // blobparams + let blob_excess_gas_and_price = + payload.payload.as_v3().map(|v3| v3.excess_blob_gas).zip(blob_params).map( + |(excess_blob_gas, params)| { + let blob_gasprice = params.calc_blob_fee(excess_blob_gas); + BlobExcessGasAndPrice { excess_blob_gas, blob_gasprice } + }, + ); + + let block_env = BlockEnv { + number: U256::from(block_number), + beneficiary: payload.payload.as_v1().fee_recipient, + timestamp: U256::from(timestamp), + difficulty: if spec >= SpecId::MERGE { + U256::ZERO + } else { + payload.payload.as_v1().prev_randao.into() + }, + prevrandao: (spec >= SpecId::MERGE).then(|| payload.payload.as_v1().prev_randao), + gas_limit: payload.payload.as_v1().gas_limit, + basefee: payload.payload.as_v1().base_fee_per_gas.to(), + blob_excess_gas_and_price, + }; + + EvmEnv { cfg_env, block_env } + } + + fn context_for_payload<'a>(&self, payload: &'a ExecutionData) -> ExecutionCtxFor<'a, Self> { + EthBlockExecutionCtx { + parent_hash: payload.parent_hash(), + parent_beacon_block_root: payload.sidecar.parent_beacon_block_root(), + ommers: &[], + withdrawals: payload + .payload + .as_v2() + .map(|v2| Cow::Owned(v2.withdrawals.clone().into())), + } + } + + fn tx_iterator_for_payload(&self, payload: &ExecutionData) -> impl ExecutableTxIterator { + payload.payload.transactions().clone().into_iter().map(|tx| { + let tx = + TxTy::::decode_2718_exact(tx.as_ref()).map_err(AnyError::new)?; + let signer = tx.try_recover().map_err(AnyError::new)?; + Ok::<_, AnyError>(tx.with_signer(signer)) + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/ethereum/evm/src/test_utils.rs b/crates/ethereum/evm/src/test_utils.rs index 827aa8f43be..a4b3090aa8b 100644 --- a/crates/ethereum/evm/src/test_utils.rs +++ b/crates/ethereum/evm/src/test_utils.rs @@ -3,6 +3,7 @@ use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_evm::precompiles::PrecompilesMap; +use alloy_rpc_types_engine::ExecutionData; use parking_lot::Mutex; use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_evm::{ @@ -10,7 +11,8 @@ use reth_evm::{ BlockExecutionError, BlockExecutor, BlockExecutorFactory, BlockExecutorFor, CommitChanges, }, eth::{EthBlockExecutionCtx, EthEvmContext}, - ConfigureEvm, Database, EthEvm, EthEvmFactory, Evm, EvmEnvFor, EvmFactory, + ConfigureEngineEvm, ConfigureEvm, Database, EthEvm, EthEvmFactory, Evm, EvmEnvFor, EvmFactory, + ExecutableTxIterator, ExecutionCtxFor, }; use reth_execution_types::{BlockExecutionResult, ExecutionOutcome}; use reth_primitives_traits::{BlockTy, SealedBlock, SealedHeader}; @@ -168,3 +170,17 @@ impl ConfigureEvm for MockEvmConfig { self.inner.context_for_next_block(parent, attributes) } } + +impl ConfigureEngineEvm for MockEvmConfig { + fn evm_env_for_payload(&self, payload: &ExecutionData) -> EvmEnvFor { + self.inner.evm_env_for_payload(payload) + } + + fn context_for_payload<'a>(&self, payload: &'a ExecutionData) -> ExecutionCtxFor<'a, Self> { + self.inner.context_for_payload(payload) + } + + fn tx_iterator_for_payload(&self, payload: &ExecutionData) -> impl ExecutableTxIterator { + self.inner.tx_iterator_for_payload(payload) + } +} diff --git a/crates/ethereum/node/src/engine.rs b/crates/ethereum/node/src/engine.rs index 34cda0e9d60..441e05d1cc7 100644 --- a/crates/ethereum/node/src/engine.rs +++ b/crates/ethereum/node/src/engine.rs @@ -6,7 +6,7 @@ pub use alloy_rpc_types_engine::{ ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, }; use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_engine_primitives::{EngineValidator, PayloadValidator}; +use reth_engine_primitives::{EngineApiValidator, PayloadValidator}; use reth_ethereum_payload_builder::EthereumExecutionPayloadValidator; use reth_ethereum_primitives::Block; use reth_node_api::PayloadTypes; @@ -52,7 +52,7 @@ where } } -impl EngineValidator for EthereumEngineValidator +impl EngineApiValidator for EthereumEngineValidator where ChainSpec: EthChainSpec + EthereumHardforks + 'static, Types: PayloadTypes, diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 36dec1a2192..511394e8407 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -29,11 +29,13 @@ use reth_node_builder::{ }, node::{FullNodeTypes, NodeTypes}, rpc::{ - BasicEngineApiBuilder, EngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, - EthApiBuilder, EthApiCtx, Identity, RethRpcAddOns, RpcAddOns, RpcHandle, + BasicEngineApiBuilder, BasicEngineValidatorBuilder, EngineApiBuilder, EngineValidatorAddOn, + EngineValidatorBuilder, EthApiBuilder, EthApiCtx, Identity, PayloadValidatorBuilder, + RethRpcAddOns, RpcAddOns, RpcHandle, }, - BuilderContext, DebugNode, Node, NodeAdapter, PayloadBuilderConfig, PayloadTypes, + BuilderContext, DebugNode, Node, NodeAdapter, PayloadBuilderConfig, }; +use reth_payload_primitives::PayloadTypes; use reth_provider::{providers::ProviderFactoryBuilder, EthStorage}; use reth_rpc::{ eth::core::{EthApiFor, EthRpcConverterFor}, @@ -173,31 +175,49 @@ where pub struct EthereumAddOns< N: FullNodeComponents, EthB: EthApiBuilder, - EV, - EB = BasicEngineApiBuilder, + PVB, + EB = BasicEngineApiBuilder, + EVB = BasicEngineValidatorBuilder, RpcMiddleware = Identity, > { - inner: RpcAddOns, + inner: RpcAddOns, } -impl Default for EthereumAddOns +impl EthereumAddOns where N: FullNodeComponents, + EthB: EthApiBuilder, +{ + /// Creates a new instance from the inner `RpcAddOns`. + pub const fn new(inner: RpcAddOns) -> Self { + Self { inner } + } +} + +impl Default for EthereumAddOns +where + N: FullNodeComponents< + Types: NodeTypes< + ChainSpec: EthereumHardforks + Clone + 'static, + Payload: EngineTypes + + PayloadTypes, + Primitives = EthPrimitives, + >, + >, EthereumEthApiBuilder: EthApiBuilder, { fn default() -> Self { - Self { - inner: RpcAddOns::new( - EthereumEthApiBuilder::default(), - EthereumEngineValidatorBuilder::default(), - BasicEngineApiBuilder::default(), - Default::default(), - ), - } + Self::new(RpcAddOns::new( + EthereumEthApiBuilder::default(), + EthereumEngineValidatorBuilder::default(), + BasicEngineApiBuilder::default(), + BasicEngineValidatorBuilder::default(), + Default::default(), + )) } } -impl EthereumAddOns +impl EthereumAddOns where N: FullNodeComponents, EthB: EthApiBuilder, @@ -206,38 +226,38 @@ where pub fn with_engine_api( self, engine_api_builder: T, - ) -> EthereumAddOns + ) -> EthereumAddOns where T: Send, { let Self { inner } = self; - EthereumAddOns { inner: inner.with_engine_api(engine_api_builder) } + EthereumAddOns::new(inner.with_engine_api(engine_api_builder)) } - /// Replace the engine validator builder. - pub fn with_engine_validator( + /// Replace the payload validator builder. + pub fn with_payload_validator( self, - engine_validator_builder: T, - ) -> EthereumAddOns - where - T: Send, - { + payload_validator_builder: T, + ) -> EthereumAddOns { let Self { inner } = self; - EthereumAddOns { inner: inner.with_engine_validator(engine_validator_builder) } + EthereumAddOns::new(inner.with_payload_validator(payload_validator_builder)) } /// Sets rpc middleware - pub fn with_rpc_middleware(self, rpc_middleware: T) -> EthereumAddOns + pub fn with_rpc_middleware( + self, + rpc_middleware: T, + ) -> EthereumAddOns where T: Send, { let Self { inner } = self; - EthereumAddOns { inner: inner.with_rpc_middleware(rpc_middleware) } + EthereumAddOns::new(inner.with_rpc_middleware(rpc_middleware)) } } -impl NodeAddOns - for EthereumAddOns +impl NodeAddOns + for EthereumAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -248,8 +268,9 @@ where Evm: ConfigureEvm, >, EthB: EthApiBuilder, - EV: EngineValidatorBuilder, + PVB: Send, EB: EngineApiBuilder, + EVB: EngineValidatorBuilder, EthApiError: FromEvmError, EvmFactoryFor: EvmFactory, RpcMiddleware: RethRpcMiddleware, @@ -282,7 +303,7 @@ where } } -impl RethRpcAddOns for EthereumAddOns +impl RethRpcAddOns for EthereumAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -293,8 +314,9 @@ where Evm: ConfigureEvm, >, EthB: EthApiBuilder, - EV: EngineValidatorBuilder, + PVB: PayloadValidatorBuilder, EB: EngineApiBuilder, + EVB: EngineValidatorBuilder, EthApiError: FromEvmError, EvmFactoryFor: EvmFactory, { @@ -305,7 +327,7 @@ where } } -impl EngineValidatorAddOn for EthereumAddOns +impl EngineValidatorAddOn for EthereumAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -316,15 +338,16 @@ where Evm: ConfigureEvm, >, EthB: EthApiBuilder, - EV: EngineValidatorBuilder, + PVB: Send, EB: EngineApiBuilder, + EVB: EngineValidatorBuilder, EthApiError: FromEvmError, EvmFactoryFor: EvmFactory, { - type Validator = EV::Validator; + type ValidatorBuilder = EVB; - async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { - self.inner.engine_validator(ctx).await + fn engine_validator_builder(&self) -> Self::ValidatorBuilder { + self.inner.engine_validator_builder() } } @@ -517,10 +540,10 @@ where #[non_exhaustive] pub struct EthereumEngineValidatorBuilder; -impl EngineValidatorBuilder for EthereumEngineValidatorBuilder +impl PayloadValidatorBuilder for EthereumEngineValidatorBuilder where Types: NodeTypes< - ChainSpec: EthereumHardforks + Clone + 'static, + ChainSpec: Hardforks + EthereumHardforks + Clone + 'static, Payload: EngineTypes + PayloadTypes, Primitives = EthPrimitives, diff --git a/crates/evm/evm/src/engine.rs b/crates/evm/evm/src/engine.rs new file mode 100644 index 00000000000..a1cf824d7c9 --- /dev/null +++ b/crates/evm/evm/src/engine.rs @@ -0,0 +1,33 @@ +use crate::{execute::ExecutableTxFor, ConfigureEvm, EvmEnvFor, ExecutionCtxFor}; + +/// [`ConfigureEvm`] extension providing methods for executing payloads. +pub trait ConfigureEngineEvm: ConfigureEvm { + /// Returns an [`EvmEnvFor`] for the given payload. + fn evm_env_for_payload(&self, payload: &ExecutionData) -> EvmEnvFor; + + /// Returns an [`ExecutionCtxFor`] for the given payload. + fn context_for_payload<'a>(&self, payload: &'a ExecutionData) -> ExecutionCtxFor<'a, Self>; + + /// Returns an [`ExecutableTxIterator`] for the given payload. + fn tx_iterator_for_payload(&self, payload: &ExecutionData) -> impl ExecutableTxIterator; +} + +/// Iterator over executable transactions. +pub trait ExecutableTxIterator: + Iterator> + Send + 'static +{ + /// The executable transaction type iterator yields. + type Tx: ExecutableTxFor + Clone + Send + 'static; + /// Errors that may occur while recovering or decoding transactions. + type Error: core::error::Error + Send + Sync + 'static; +} + +impl ExecutableTxIterator for T +where + Tx: ExecutableTxFor + Clone + Send + 'static, + Err: core::error::Error + Send + Sync + 'static, + T: Iterator> + Send + 'static, +{ + type Tx = Tx; + type Error = Err; +} diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 45e929d23dd..82e9ae2d562 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -1,15 +1,15 @@ //! Traits for execution. -use crate::{ConfigureEvm, Database, OnStateHook}; +use crate::{ConfigureEvm, Database, OnStateHook, TxEnvFor}; use alloc::{boxed::Box, vec::Vec}; use alloy_consensus::{BlockHeader, Header}; use alloy_eips::eip2718::WithEncoded; pub use alloy_evm::block::{BlockExecutor, BlockExecutorFactory}; use alloy_evm::{ block::{CommitChanges, ExecutableTx}, - Evm, EvmEnv, EvmFactory, + Evm, EvmEnv, EvmFactory, RecoveredTx, ToTxEnv, }; -use alloy_primitives::B256; +use alloy_primitives::{Address, B256}; use core::fmt::Debug; pub use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, @@ -552,6 +552,43 @@ where } } +/// A helper trait marking a 'static type that can be converted into an [`ExecutableTx`] for block +/// executor. +pub trait ExecutableTxFor: + ToTxEnv> + RecoveredTx> +{ +} + +impl ExecutableTxFor for T where + T: ToTxEnv> + RecoveredTx> +{ +} + +/// A container for a transaction and a transaction environment. +#[derive(Debug, Clone)] +pub struct WithTxEnv { + /// The transaction environment for EVM. + pub tx_env: TxEnv, + /// The recovered transaction. + pub tx: T, +} + +impl> RecoveredTx for WithTxEnv { + fn tx(&self) -> &Tx { + self.tx.tx() + } + + fn signer(&self) -> &Address { + self.tx.signer() + } +} + +impl ToTxEnv for WithTxEnv { + fn to_tx_env(&self) -> TxEnv { + self.tx_env.clone() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/evm/evm/src/lib.rs b/crates/evm/evm/src/lib.rs index ed8d6d1cde7..412abb5db1d 100644 --- a/crates/evm/evm/src/lib.rs +++ b/crates/evm/evm/src/lib.rs @@ -44,6 +44,9 @@ pub mod execute; mod aliases; pub use aliases::*; +mod engine; +pub use engine::{ConfigureEngineEvm, ExecutableTxIterator}; + #[cfg(feature = "metrics")] pub mod metrics; pub mod noop; @@ -191,9 +194,10 @@ pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin { type NextBlockEnvCtx: Debug + Clone; /// Configured [`BlockExecutorFactory`], contains [`EvmFactory`] internally. - type BlockExecutorFactory: BlockExecutorFactory< + type BlockExecutorFactory: for<'a> BlockExecutorFactory< Transaction = TxTy, Receipt = ReceiptTy, + ExecutionCtx<'a>: Debug + Send, EvmFactory: EvmFactory< Tx: TransactionEnv + FromRecoveredTx> diff --git a/crates/evm/evm/src/metrics.rs b/crates/evm/evm/src/metrics.rs index 586c1c154d6..eaa376ec311 100644 --- a/crates/evm/evm/src/metrics.rs +++ b/crates/evm/evm/src/metrics.rs @@ -5,7 +5,7 @@ use crate::{Database, OnStateHook}; use alloy_consensus::BlockHeader; use alloy_evm::{ - block::{BlockExecutor, StateChangeSource}, + block::{BlockExecutor, ExecutableTx, StateChangeSource}, Evm, }; use core::borrow::BorrowMut; @@ -13,7 +13,7 @@ use metrics::{Counter, Gauge, Histogram}; use reth_execution_errors::BlockExecutionError; use reth_execution_types::BlockExecutionOutput; use reth_metrics::Metrics; -use reth_primitives_traits::{Block, BlockBody, RecoveredBlock}; +use reth_primitives_traits::RecoveredBlock; use revm::{ database::{states::bundle_state::BundleRetention, State}, state::EvmState, @@ -75,20 +75,19 @@ pub struct ExecutorMetrics { } impl ExecutorMetrics { - fn metered(&self, block: &RecoveredBlock, f: F) -> R + fn metered(&self, f: F) -> R where - F: FnOnce() -> R, - B: reth_primitives_traits::Block, + F: FnOnce() -> (u64, R), { // Execute the block and record the elapsed time. let execute_start = Instant::now(); - let output = f(); + let (gas_used, output) = f(); let execution_duration = execute_start.elapsed().as_secs_f64(); // Update gas metrics. - self.gas_processed_total.increment(block.header().gas_used()); - self.gas_per_second.set(block.header().gas_used() as f64 / execution_duration); - self.gas_used_histogram.record(block.header().gas_used() as f64); + self.gas_processed_total.increment(gas_used); + self.gas_per_second.set(gas_used as f64 / execution_duration); + self.gas_used_histogram.record(gas_used as f64); self.execution_histogram.record(execution_duration); self.execution_duration.set(execution_duration); @@ -105,7 +104,7 @@ impl ExecutorMetrics { pub fn execute_metered( &self, executor: E, - input: &RecoveredBlock>>, + transactions: impl Iterator, BlockExecutionError>>, state_hook: Box, ) -> Result, BlockExecutionError> where @@ -119,13 +118,19 @@ impl ExecutorMetrics { let mut executor = executor.with_state_hook(Some(Box::new(wrapper))); - // Use metered to execute and track timing/gas metrics - let (mut db, result) = self.metered(input, || { + let f = || { executor.apply_pre_execution_changes()?; - for tx in input.transactions_recovered() { - executor.execute_transaction(tx)?; + for tx in transactions { + executor.execute_transaction(tx?)?; } executor.finish().map(|(evm, result)| (evm.into_db(), result)) + }; + + // Use metered to execute and track timing/gas metrics + let (mut db, result) = self.metered(|| { + let res = f(); + let gas_used = res.as_ref().map(|r| r.1.gas_used).unwrap_or(0); + (gas_used, res) })?; // merge transactions into bundle state @@ -151,7 +156,7 @@ impl ExecutorMetrics { F: FnOnce(&RecoveredBlock) -> R, B: reth_primitives_traits::Block, { - self.metered(input, || f(input)) + self.metered(|| (input.header().gas_used(), f(input))) } } @@ -297,7 +302,13 @@ mod tests { state }; let executor = MockExecutor::new(state); - let _result = metrics.execute_metered::<_, EmptyDB>(executor, &input, state_hook).unwrap(); + let _result = metrics + .execute_metered::<_, EmptyDB>( + executor, + input.clone_transactions_recovered().map(Ok::<_, BlockExecutionError>), + state_hook, + ) + .unwrap(); let snapshot = snapshotter.snapshot().into_vec(); @@ -329,7 +340,13 @@ mod tests { let state = EvmState::default(); let executor = MockExecutor::new(state); - let _result = metrics.execute_metered::<_, EmptyDB>(executor, &input, state_hook).unwrap(); + let _result = metrics + .execute_metered::<_, EmptyDB>( + executor, + input.clone_transactions_recovered().map(Ok::<_, BlockExecutionError>), + state_hook, + ) + .unwrap(); let actual_output = rx.try_recv().unwrap(); assert_eq!(actual_output, expected_output); diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index b198713a2e0..098b3c8aeed 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -201,7 +201,7 @@ impl ExecutionOutcome { } /// Transform block number to the index of block. - pub fn block_number_to_index(&self, block_number: BlockNumber) -> Option { + pub const fn block_number_to_index(&self, block_number: BlockNumber) -> Option { if self.first_block > block_number { return None } @@ -240,12 +240,12 @@ impl ExecutionOutcome { } /// Is execution outcome empty. - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.len() == 0 } /// Number of blocks in the execution outcome. - pub fn len(&self) -> usize { + pub const fn len(&self) -> usize { self.receipts.len() } @@ -255,7 +255,7 @@ impl ExecutionOutcome { } /// Return last block of the execution outcome - pub fn last_block(&self) -> BlockNumber { + pub const fn last_block(&self) -> BlockNumber { (self.first_block + self.len() as u64).saturating_sub(1) } diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index 2525f804224..aa7cacdba4a 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -239,21 +239,34 @@ where #[cfg(test)] mod tests { + use super::*; use crate::{ backfill::test_utils::{ blocks_and_execution_outcome, blocks_and_execution_outputs, chain_spec, + execute_block_and_commit_to_database, }, BackfillJobFactory, }; + use alloy_consensus::{constants::ETH_TO_WEI, Header, TxEip2930}; + use alloy_primitives::{b256, Address, TxKind, U256}; + use eyre::Result; use futures::StreamExt; + use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_db_common::init::init_genesis; + use reth_ethereum_primitives::{Block, BlockBody, Transaction}; use reth_evm_ethereum::EthEvmConfig; - use reth_primitives_traits::crypto::secp256k1::public_key_to_address; + use reth_primitives_traits::{ + crypto::secp256k1::public_key_to_address, Block as _, FullNodePrimitives, + }; use reth_provider::{ - providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, + providers::{BlockchainProvider, ProviderNodeTypes}, + test_utils::create_test_provider_factory_with_chain_spec, + ProviderFactory, }; use reth_stages_api::ExecutionStageThresholds; - use reth_testing_utils::generators; + use reth_testing_utils::{generators, generators::sign_tx_with_key_pair}; + use secp256k1::Keypair; + use std::sync::Arc; #[tokio::test] async fn test_single_blocks() -> eyre::Result<()> { @@ -327,4 +340,131 @@ mod tests { Ok(()) } + + fn create_blocks( + chain_spec: &Arc, + key_pair: Keypair, + n: u64, + ) -> Result>> { + let mut blocks = Vec::with_capacity(n as usize); + let mut parent_hash = chain_spec.genesis_hash(); + + for (i, nonce) in (1..=n).zip(0..n) { + let block = Block { + header: Header { + parent_hash, + // Hardcoded receipts_root matching the original test (same tx in each block) + receipts_root: b256!( + "0xd3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: i, + gas_limit: MIN_TRANSACTION_GAS, + gas_used: MIN_TRANSACTION_GAS, + ..Default::default() + }, + body: BlockBody { + transactions: vec![sign_tx_with_key_pair( + key_pair, + Transaction::Eip2930(TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce, + gas_limit: MIN_TRANSACTION_GAS, + gas_price: 1_500_000_000, + to: TxKind::Call(Address::ZERO), + value: U256::from(0.1 * ETH_TO_WEI as f64), + ..Default::default() + }), + )], + ..Default::default() + }, + } + .try_into_recovered()?; + + parent_hash = block.hash(); + blocks.push(block); + } + + Ok(blocks) + } + + fn execute_and_commit_blocks( + provider_factory: &ProviderFactory, + chain_spec: &Arc, + blocks: &[RecoveredBlock], + ) -> Result<()> + where + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + Block = reth_ethereum_primitives::Block, + BlockBody = reth_ethereum_primitives::BlockBody, + Receipt = reth_ethereum_primitives::Receipt, + >, + >, + { + for block in blocks { + execute_block_and_commit_to_database(provider_factory, chain_spec.clone(), block)?; + } + Ok(()) + } + + #[tokio::test] + async fn test_batch_parallel_range_advance() -> Result<()> { + reth_tracing::init_test_tracing(); + + // Create a key pair for the sender + let key_pair = generators::generate_key(&mut generators::rng()); + let address = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec(address); + + let executor = EthEvmConfig::ethereum(chain_spec.clone()); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory)?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; + + // Create and commit 4 blocks + let blocks = create_blocks(&chain_spec, key_pair, 4)?; + execute_and_commit_blocks(&provider_factory, &chain_spec, &blocks)?; + + // Create factory with batch size 2 (via thresholds max_blocks=2) and parallelism=2 + let factory = BackfillJobFactory::new(executor.clone(), blockchain_db.clone()) + .with_thresholds(ExecutionStageThresholds { max_blocks: Some(2), ..Default::default() }) + .with_stream_parallelism(2); + + // Stream backfill for range 1..=4 + let mut backfill_stream = factory.backfill(1..=4).into_stream(); + + // Collect the two expected chains from the stream + let mut chain1 = backfill_stream.next().await.unwrap()?; + let mut chain2 = backfill_stream.next().await.unwrap()?; + assert!(backfill_stream.next().await.is_none()); + + // Sort reverts for comparison + chain1.execution_outcome_mut().state_mut().reverts.sort(); + chain2.execution_outcome_mut().state_mut().reverts.sort(); + + // Compute expected chains using non-stream BackfillJob (sequential) + let factory_seq = + BackfillJobFactory::new(executor.clone(), blockchain_db.clone()).with_thresholds( + ExecutionStageThresholds { max_blocks: Some(2), ..Default::default() }, + ); + + let mut expected_chain1 = + factory_seq.backfill(1..=2).collect::, _>>()?.into_iter().next().unwrap(); + let mut expected_chain2 = + factory_seq.backfill(3..=4).collect::, _>>()?.into_iter().next().unwrap(); + + // Sort reverts for expected + expected_chain1.execution_outcome_mut().state_mut().reverts.sort(); + expected_chain2.execution_outcome_mut().state_mut().reverts.sort(); + + // Assert the streamed chains match the expected sequential ones + assert_eq!(chain1.blocks(), expected_chain1.blocks()); + assert_eq!(chain1.execution_outcome(), expected_chain1.execution_outcome()); + assert_eq!(chain2.blocks(), expected_chain2.blocks()); + assert_eq!(chain2.execution_outcome(), expected_chain2.execution_outcome()); + + Ok(()) + } } diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index 17fa68754e5..e4e93bce787 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -547,58 +547,54 @@ pub fn spawn_populate_kbuckets_bg( metrics: Discv5Metrics, discv5: Arc, ) { - task::spawn({ - let local_node_id = discv5.local_enr().node_id(); - let lookup_interval = Duration::from_secs(lookup_interval); - let metrics = metrics.discovered_peers; - let mut kbucket_index = MAX_KBUCKET_INDEX; - let pulse_lookup_interval = Duration::from_secs(bootstrap_lookup_interval); - // todo: graceful shutdown - - async move { - // make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest - // log2distance from local node - for i in (0..bootstrap_lookup_countdown).rev() { - let target = discv5::enr::NodeId::random(); + let local_node_id = discv5.local_enr().node_id(); + let lookup_interval = Duration::from_secs(lookup_interval); + let metrics = metrics.discovered_peers; + let mut kbucket_index = MAX_KBUCKET_INDEX; + let pulse_lookup_interval = Duration::from_secs(bootstrap_lookup_interval); + task::spawn(Box::pin(async move { + // make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest + // log2distance from local node + for i in (0..bootstrap_lookup_countdown).rev() { + let target = discv5::enr::NodeId::random(); - trace!(target: "net::discv5", - %target, - bootstrap_boost_runs_countdown=i, - lookup_interval=format!("{:#?}", pulse_lookup_interval), - "starting bootstrap boost lookup query" - ); - - lookup(target, &discv5, &metrics).await; + trace!(target: "net::discv5", + %target, + bootstrap_boost_runs_countdown=i, + lookup_interval=format!("{:#?}", pulse_lookup_interval), + "starting bootstrap boost lookup query" + ); - tokio::time::sleep(pulse_lookup_interval).await; - } + lookup(target, &discv5, &metrics).await; - // initiate regular lookups to populate kbuckets - loop { - // make sure node is connected to each subtree in the network by target - // selection (ref kademlia) - let target = get_lookup_target(kbucket_index, local_node_id); + tokio::time::sleep(pulse_lookup_interval).await; + } - trace!(target: "net::discv5", - %target, - lookup_interval=format!("{:#?}", lookup_interval), - "starting periodic lookup query" - ); + // initiate regular lookups to populate kbuckets + loop { + // make sure node is connected to each subtree in the network by target + // selection (ref kademlia) + let target = get_lookup_target(kbucket_index, local_node_id); - lookup(target, &discv5, &metrics).await; + trace!(target: "net::discv5", + %target, + lookup_interval=format!("{:#?}", lookup_interval), + "starting periodic lookup query" + ); - if kbucket_index > DEFAULT_MIN_TARGET_KBUCKET_INDEX { - // try to populate bucket one step closer - kbucket_index -= 1 - } else { - // start over with bucket furthest away - kbucket_index = MAX_KBUCKET_INDEX - } + lookup(target, &discv5, &metrics).await; - tokio::time::sleep(lookup_interval).await; + if kbucket_index > DEFAULT_MIN_TARGET_KBUCKET_INDEX { + // try to populate bucket one step closer + kbucket_index -= 1 + } else { + // start over with bucket furthest away + kbucket_index = MAX_KBUCKET_INDEX } + + tokio::time::sleep(lookup_interval).await; } - }); + })); } /// Gets the next lookup target, based on which bucket is currently being targeted. diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index bd7fc602340..0c7b1e62012 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -449,7 +449,7 @@ struct OrderedBodiesResponse { impl OrderedBodiesResponse { #[inline] - fn len(&self) -> usize { + const fn len(&self) -> usize { self.resp.len() } diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index a0b83d72d44..53d8c7faa12 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -437,7 +437,7 @@ impl ChunkedFileReader { /// Calculates the number of bytes to read from the chain file. Returns a tuple of the chunk /// length and the remaining file length. - fn chunk_len(&self) -> u64 { + const fn chunk_len(&self) -> u64 { let Self { chunk_byte_len, file_byte_len, .. } = *self; let file_byte_len = file_byte_len + self.chunk.len() as u64; diff --git a/crates/net/downloaders/src/test_utils/bodies_client.rs b/crates/net/downloaders/src/test_utils/bodies_client.rs index 6b0c65a38a9..103557a6162 100644 --- a/crates/net/downloaders/src/test_utils/bodies_client.rs +++ b/crates/net/downloaders/src/test_utils/bodies_client.rs @@ -61,7 +61,7 @@ impl TestBodiesClient { /// empty_response_mod == 0`. pub(crate) fn should_respond_empty(&self) -> bool { if let Some(empty_response_mod) = self.empty_response_mod { - self.times_requested.load(Ordering::Relaxed) % empty_response_mod == 0 + self.times_requested.load(Ordering::Relaxed).is_multiple_of(empty_response_mod) } else { false } diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index c877b673c78..15e7bb70eba 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -228,7 +228,7 @@ impl NewPooledTransactionHashes { } /// Returns true if the message is empty - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { match self { Self::Eth66(msg) => msg.0.is_empty(), Self::Eth68(msg) => msg.hashes.is_empty(), @@ -236,7 +236,7 @@ impl NewPooledTransactionHashes { } /// Returns the number of hashes in the message - pub fn len(&self) -> usize { + pub const fn len(&self) -> usize { match self { Self::Eth66(msg) => msg.0.len(), Self::Eth68(msg) => msg.hashes.len(), diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index 613ec87a4be..a716fcea6e2 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -238,13 +238,13 @@ impl SharedCapabilities { /// Returns the number of shared capabilities. #[inline] - pub fn len(&self) -> usize { + pub const fn len(&self) -> usize { self.0.len() } /// Returns true if there are no shared capabilities. #[inline] - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.0.is_empty() } } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 05ab9ecbf71..48f9e81295d 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1107,6 +1107,10 @@ where /// This fetches all transaction from the pool, including the 4844 blob transactions but /// __without__ their sidecar, because 4844 transactions are only ever announced as hashes. fn propagate_all(&mut self, hashes: Vec) { + if self.peers.is_empty() { + // nothing to propagate + return + } let propagated = self.propagate_transactions( self.pool.get_all(hashes).into_iter().map(PropagateTransaction::pool_tx).collect(), PropagationMode::Basic, diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index ba02aba2649..f0c62f3252a 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -24,6 +24,7 @@ reth-db-api.workspace = true reth-db-common.workspace = true reth-downloaders.workspace = true reth-engine-local.workspace = true +reth-engine-primitives.workspace = true reth-engine-service.workspace = true reth-engine-tree.workspace = true reth-engine-util.workspace = true @@ -39,6 +40,7 @@ reth-node-core.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true reth-payload-builder.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-rpc.workspace = true @@ -75,8 +77,8 @@ secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] ## misc aquamarine.workspace = true eyre.workspace = true -fdlimit.workspace = true jsonrpsee.workspace = true +fdlimit.workspace = true rayon.workspace = true serde_json.workspace = true @@ -112,10 +114,12 @@ test-utils = [ "reth-transaction-pool/test-utils", "reth-evm-ethereum/test-utils", "reth-node-ethereum/test-utils", + "reth-primitives-traits/test-utils", ] op = [ "reth-db?/op", "reth-db-api/op", "reth-engine-local/op", "reth-evm/op", + "reth-primitives-traits/op", ] diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 97033320e3c..5014576761a 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -839,15 +839,15 @@ impl BuilderContext { .request_handler(self.provider().clone()) .split_with_handle(); - self.executor.spawn_critical("p2p txpool", txpool); - self.executor.spawn_critical("p2p eth request handler", eth); + self.executor.spawn_critical("p2p txpool", Box::pin(txpool)); + self.executor.spawn_critical("p2p eth request handler", Box::pin(eth)); let default_peers_path = self.config().datadir().known_peers(); let known_peers_file = self.config().network.persistent_peers_file(default_peers_path); self.executor.spawn_critical_with_graceful_shutdown_signal( "p2p network task", |shutdown| { - network.run_until_graceful_shutdown(shutdown, |network| { + Box::pin(network.run_until_graceful_shutdown(shutdown, |network| { if let Some(peers_file) = known_peers_file { let num_known_peers = network.num_known_peers(); trace!(target: "reth::cli", peers_file=?peers_file, num_peers=%num_known_peers, "Saving current peers"); @@ -860,7 +860,7 @@ impl BuilderContext { } } } - }) + })) }, ); diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 57a200617da..2fcafeb4e91 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -7,11 +7,15 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; -use reth_consensus::{ConsensusError, FullConsensus}; -use reth_network::types::NetPrimitivesFor; -use reth_network_api::FullNetwork; -use reth_node_api::{PrimitivesTy, TxTy}; -use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool}; +use reth_consensus::{noop::NoopConsensus, ConsensusError, FullConsensus}; +use reth_network::{types::NetPrimitivesFor, EthNetworkPrimitives, NetworkPrimitives}; +use reth_network_api::{noop::NoopNetwork, FullNetwork}; +use reth_node_api::{BlockTy, BodyTy, HeaderTy, NodeTypes, PrimitivesTy, ReceiptTy, TxTy}; +use reth_payload_builder::PayloadBuilderHandle; +use reth_transaction_pool::{ + noop::NoopTransactionPool, EthPoolTransaction, EthPooledTransaction, PoolPooledTx, + PoolTransaction, TransactionPool, +}; use std::{future::Future, marker::PhantomData}; /// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. @@ -165,6 +169,21 @@ where _marker, } } + + /// Sets [`NoopTransactionPoolBuilder`]. + pub fn noop_pool( + self, + ) -> ComponentsBuilder, PayloadB, NetworkB, ExecB, ConsB> + { + ComponentsBuilder { + pool_builder: NoopTransactionPoolBuilder::::default(), + payload_builder: self.payload_builder, + network_builder: self.network_builder, + executor_builder: self.executor_builder, + consensus_builder: self.consensus_builder, + _marker: self._marker, + } + } } impl @@ -290,6 +309,48 @@ where _marker, } } + + /// Sets [`NoopNetworkBuilder`]. + pub fn noop_network( + self, + ) -> ComponentsBuilder, ExecB, ConsB> { + ComponentsBuilder { + pool_builder: self.pool_builder, + payload_builder: self.payload_builder, + network_builder: NoopNetworkBuilder::::default(), + executor_builder: self.executor_builder, + consensus_builder: self.consensus_builder, + _marker: self._marker, + } + } + + /// Sets [`NoopPayloadBuilder`]. + pub fn noop_payload( + self, + ) -> ComponentsBuilder { + ComponentsBuilder { + pool_builder: self.pool_builder, + payload_builder: NoopPayloadBuilder, + network_builder: self.network_builder, + executor_builder: self.executor_builder, + consensus_builder: self.consensus_builder, + _marker: self._marker, + } + } + + /// Sets [`NoopConsensusBuilder`]. + pub fn noop_consensus( + self, + ) -> ComponentsBuilder { + ComponentsBuilder { + pool_builder: self.pool_builder, + payload_builder: self.payload_builder, + network_builder: self.network_builder, + executor_builder: self.executor_builder, + consensus_builder: NoopConsensusBuilder, + _marker: self._marker, + } + } } impl NodeComponentsBuilder @@ -405,3 +466,92 @@ where self(ctx) } } + +/// Builds [`NoopTransactionPool`]. +#[derive(Debug, Clone)] +pub struct NoopTransactionPoolBuilder(PhantomData); + +impl PoolBuilder for NoopTransactionPoolBuilder +where + N: FullNodeTypes, + Tx: EthPoolTransaction> + Unpin, +{ + type Pool = NoopTransactionPool; + + async fn build_pool(self, _ctx: &BuilderContext) -> eyre::Result { + Ok(NoopTransactionPool::::new()) + } +} + +impl Default for NoopTransactionPoolBuilder { + fn default() -> Self { + Self(PhantomData) + } +} + +/// Builds [`NoopNetwork`]. +#[derive(Debug, Clone)] +pub struct NoopNetworkBuilder(PhantomData); + +impl NetworkBuilder for NoopNetworkBuilder +where + N: FullNodeTypes, + Pool: TransactionPool, + Net: NetworkPrimitives< + BlockHeader = HeaderTy, + BlockBody = BodyTy, + Block = BlockTy, + Receipt = ReceiptTy, + >, +{ + type Network = NoopNetwork; + + async fn build_network( + self, + _ctx: &BuilderContext, + _pool: Pool, + ) -> eyre::Result { + Ok(NoopNetwork::new()) + } +} + +impl Default for NoopNetworkBuilder { + fn default() -> Self { + Self(PhantomData) + } +} + +/// Builds [`NoopConsensus`]. +#[derive(Debug, Clone, Default)] +pub struct NoopConsensusBuilder; + +impl ConsensusBuilder for NoopConsensusBuilder +where + N: FullNodeTypes, +{ + type Consensus = NoopConsensus; + + async fn build_consensus(self, _ctx: &BuilderContext) -> eyre::Result { + Ok(NoopConsensus::default()) + } +} + +/// Builds [`PayloadBuilderHandle::noop`]. +#[derive(Debug, Clone, Default)] +pub struct NoopPayloadBuilder; + +impl PayloadServiceBuilder for NoopPayloadBuilder +where + N: FullNodeTypes, + Pool: TransactionPool, + EVM: ConfigureEvm> + 'static, +{ + async fn spawn_payload_builder_service( + self, + _ctx: &BuilderContext, + _pool: Pool, + _evm_config: EVM, + ) -> eyre::Result::Payload>> { + Ok(PayloadBuilderHandle::<::Payload>::noop()) + } +} diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 49381462fa9..2dea663b4ab 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -37,7 +37,7 @@ use crate::{ use alloy_consensus::BlockHeader as _; use alloy_eips::eip2124::Head; use alloy_primitives::{BlockNumber, B256}; -use eyre::{Context, OptionExt}; +use eyre::Context; use rayon::ThreadPoolBuilder; use reth_chainspec::{Chain, EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; @@ -46,15 +46,13 @@ use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitStorageError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_engine_local::MiningMode; -use reth_engine_tree::tree::{InvalidBlockHook, InvalidBlockHooks, NoopInvalidBlockHook}; use reth_evm::{noop::NoopEvmConfig, ConfigureEvm}; use reth_exex::ExExManagerHandle; use reth_fs_util as fs; -use reth_invalid_block_hooks::InvalidBlockWitnessHook; use reth_network_p2p::headers::client::HeadersClient; use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::{ - args::{DefaultEraHost, InvalidBlockHookType}, + args::DefaultEraHost, dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, primitives::BlockHeader, @@ -77,7 +75,6 @@ use reth_provider::{ StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; -use reth_rpc_api::clients::EthApiClient; use reth_rpc_builder::config::RethRpcServerConfig; use reth_rpc_layer::JwtSecret; use reth_stages::{ @@ -452,7 +449,7 @@ impl LaunchContextWith, CB: NodeComponentsBuilder, { - /// Returns the [`InvalidBlockHook`] to use for the node. - pub async fn invalid_block_hook( - &self, - ) -> eyre::Result::Primitives>>> { - let Some(ref hook) = self.node_config().debug.invalid_block_hook else { - return Ok(Box::new(NoopInvalidBlockHook::default())) - }; - let healthy_node_rpc_client = self.get_healthy_node_client().await?; - - let output_directory = self.data_dir().invalid_block_hooks(); - let hooks = hook - .iter() - .copied() - .map(|hook| { - let output_directory = output_directory.join(hook.to_string()); - fs::create_dir_all(&output_directory)?; - - Ok(match hook { - InvalidBlockHookType::Witness => Box::new(InvalidBlockWitnessHook::new( - self.blockchain_db().clone(), - self.components().evm_config().clone(), - output_directory, - healthy_node_rpc_client.clone(), - )), - InvalidBlockHookType::PreState | InvalidBlockHookType::Opcode => { - eyre::bail!("invalid block hook {hook:?} is not implemented yet") - } - } as Box>) - }) - .collect::>()?; - - Ok(Box::new(InvalidBlockHooks(hooks))) - } - - /// Returns an RPC client for the healthy node, if configured in the node config. - async fn get_healthy_node_client( - &self, - ) -> eyre::Result> { - let Some(url) = self.node_config().debug.healthy_node_rpc_url.as_ref() else { - return Ok(None); - }; - - let client = jsonrpsee::http_client::HttpClientBuilder::default().build(url)?; - - // Verify that the healthy node is running the same chain as the current node. - let chain_id = EthApiClient::< - alloy_rpc_types::TransactionRequest, - alloy_rpc_types::Transaction, - alloy_rpc_types::Block, - alloy_rpc_types::Receipt, - alloy_rpc_types::Header, - >::chain_id(&client) - .await? - .ok_or_eyre("healthy node rpc client didn't return a chain id")?; - - if chain_id.to::() != self.chain_id().id() { - eyre::bail!("invalid chain id for healthy node: {chain_id}") - } - - Ok(Some(client)) - } } /// Joins two attachments together, preserving access to both values. diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index f0cb7a4c085..80850730497 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -3,7 +3,7 @@ use crate::{ common::{Attached, LaunchContextWith, WithConfigs}, hooks::NodeHooks, - rpc::{EngineValidatorAddOn, RethRpcAddOns, RpcHandleProvider}, + rpc::{EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcHandleProvider}, setup::build_networked_pipeline, AddOns, AddOnsContext, FullNode, LaunchContext, LaunchNode, NodeAdapter, NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter, @@ -15,7 +15,7 @@ use reth_db_api::{database_metrics::DatabaseMetrics, Database}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, - tree::{BasicEngineValidator, TreeConfig}, + tree::TreeConfig, }; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; @@ -196,32 +196,31 @@ where jwt_secret, engine_events: event_sender.clone(), }; - let engine_payload_validator = add_ons.engine_validator(&add_ons_ctx).await?; + let validator_builder = add_ons.engine_validator_builder(); + // Build the engine validator with all required components + let engine_validator = validator_builder + .clone() + .build_tree_validator(&add_ons_ctx, engine_tree_config.clone()) + .await?; + + // Create the consensus engine stream with optional reorg let consensus_engine_stream = UnboundedReceiverStream::from(consensus_engine_rx) .maybe_skip_fcu(node_config.debug.skip_fcu) .maybe_skip_new_payload(node_config.debug.skip_new_payload) .maybe_reorg( ctx.blockchain_db().clone(), ctx.components().evm_config().clone(), - engine_payload_validator.clone(), + || validator_builder.build_tree_validator(&add_ons_ctx, engine_tree_config.clone()), node_config.debug.reorg_frequency, node_config.debug.reorg_depth, ) + .await? // Store messages _after_ skipping so that `replay-engine` command // would replay only the messages that were observed by the engine // during this run. .maybe_store_messages(node_config.debug.engine_api_store.clone()); - let engine_validator = BasicEngineValidator::new( - ctx.blockchain_db().clone(), - consensus.clone(), - ctx.components().evm_config().clone(), - engine_payload_validator, - engine_tree_config.clone(), - ctx.invalid_block_hook().await?, - ); - let mut engine_service = EngineService::new( consensus.clone(), ctx.chain_spec(), @@ -251,11 +250,11 @@ where ctx.task_executor().spawn_critical( "events task", - node::handle_events( + Box::pin(node::handle_events( Some(Box::new(ctx.components().network().clone())), Some(ctx.head().number), events, - ), + )), ); let add_ons_handle = add_ons.launch_add_ons(add_ons_ctx).await?; @@ -277,7 +276,7 @@ where let terminate_after_backfill = ctx.terminate_after_initial_backfill(); info!(target: "reth::cli", "Starting consensus engine"); - ctx.task_executor().spawn_critical("consensus engine", async move { + ctx.task_executor().spawn_critical("consensus engine", Box::pin(async move { if let Some(initial_target) = initial_target { debug!(target: "reth::cli", %initial_target, "start backfill sync"); engine_service.orchestrator_mut().start_backfill_sync(initial_target); @@ -340,7 +339,7 @@ where } let _ = exit.send(res); - }); + })); let full_node = FullNode { evm_config: ctx.components().evm_config().clone(), diff --git a/crates/node/builder/src/launch/invalid_block_hook.rs b/crates/node/builder/src/launch/invalid_block_hook.rs new file mode 100644 index 00000000000..7221077847a --- /dev/null +++ b/crates/node/builder/src/launch/invalid_block_hook.rs @@ -0,0 +1,141 @@ +//! Invalid block hook helpers for the node builder. + +use crate::AddOnsContext; +use alloy_rpc_types::{Block, Header, Receipt, Transaction, TransactionRequest}; +use eyre::OptionExt; +use reth_chainspec::EthChainSpec; +use reth_engine_primitives::InvalidBlockHook; +use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_node_core::{ + args::InvalidBlockHookType, + dirs::{ChainPath, DataDirPath}, + node_config::NodeConfig, +}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::ChainSpecProvider; +use reth_rpc_api::EthApiClient; + +/// Extension trait for [`AddOnsContext`] to create invalid block hooks. +pub trait InvalidBlockHookExt { + /// Node primitives type. + type Primitives: NodePrimitives; + + /// Creates an invalid block hook based on the node configuration. + fn create_invalid_block_hook( + &self, + data_dir: &ChainPath, + ) -> impl std::future::Future>>> + + Send; +} + +impl InvalidBlockHookExt for AddOnsContext<'_, N> +where + N: FullNodeComponents, +{ + type Primitives = ::Primitives; + + async fn create_invalid_block_hook( + &self, + data_dir: &ChainPath, + ) -> eyre::Result>> { + create_invalid_block_hook( + self.config, + data_dir, + self.node.provider().clone(), + self.node.evm_config().clone(), + self.node.provider().chain_spec().chain().id(), + ) + .await + } +} + +/// Creates an invalid block hook based on the node configuration. +/// +/// This function constructs the appropriate [`InvalidBlockHook`] based on the debug +/// configuration in the node config. It supports: +/// - Witness hooks for capturing block witness data +/// - Healthy node verification via RPC +/// +/// # Arguments +/// * `config` - The node configuration containing debug settings +/// * `data_dir` - The data directory for storing hook outputs +/// * `provider` - The blockchain database provider +/// * `evm_config` - The EVM configuration +/// * `chain_id` - The chain ID for verification +pub async fn create_invalid_block_hook( + config: &NodeConfig, + data_dir: &ChainPath, + provider: P, + evm_config: E, + chain_id: u64, +) -> eyre::Result>> +where + N: NodePrimitives, + P: reth_provider::StateProviderFactory + + reth_provider::ChainSpecProvider + + Clone + + Send + + Sync + + 'static, + E: reth_evm::ConfigureEvm + Clone + 'static, +{ + use reth_engine_primitives::{InvalidBlockHooks, NoopInvalidBlockHook}; + use reth_invalid_block_hooks::InvalidBlockWitnessHook; + + let Some(ref hook) = config.debug.invalid_block_hook else { + return Ok(Box::new(NoopInvalidBlockHook::default())) + }; + + let healthy_node_rpc_client = get_healthy_node_client(config, chain_id).await?; + + let output_directory = data_dir.invalid_block_hooks(); + let hooks = hook + .iter() + .copied() + .map(|hook| { + let output_directory = output_directory.join(hook.to_string()); + std::fs::create_dir_all(&output_directory)?; + + Ok(match hook { + InvalidBlockHookType::Witness => Box::new(InvalidBlockWitnessHook::new( + provider.clone(), + evm_config.clone(), + output_directory, + healthy_node_rpc_client.clone(), + )), + InvalidBlockHookType::PreState | InvalidBlockHookType::Opcode => { + eyre::bail!("invalid block hook {hook:?} is not implemented yet") + } + } as Box>) + }) + .collect::>()?; + + Ok(Box::new(InvalidBlockHooks(hooks))) +} + +/// Returns an RPC client for the healthy node, if configured in the node config. +async fn get_healthy_node_client( + config: &NodeConfig, + chain_id: u64, +) -> eyre::Result> +where + C: EthChainSpec, +{ + let Some(url) = config.debug.healthy_node_rpc_url.as_ref() else { + return Ok(None); + }; + + let client = jsonrpsee::http_client::HttpClientBuilder::default().build(url)?; + + // Verify that the healthy node is running the same chain as the current node. + let healthy_chain_id = + EthApiClient::::chain_id(&client) + .await? + .ok_or_eyre("healthy node rpc client didn't return a chain id")?; + + if healthy_chain_id.to::() != chain_id { + eyre::bail!("Invalid chain ID. Expected {}, got {}", chain_id, healthy_chain_id); + } + + Ok(Some(client)) +} diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 2f770e69564..30ae2cd49ea 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -2,6 +2,7 @@ pub mod common; mod exex; +pub mod invalid_block_hook; pub(crate) mod debug; pub(crate) mod engine; diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index c124129eece..ec9832d7e97 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -1,17 +1,21 @@ //! Builder support for rpc components. pub use jsonrpsee::server::middleware::rpc::{RpcService, RpcServiceBuilder}; -pub use reth_rpc_builder::{middleware::RethRpcMiddleware, Identity}; +pub use reth_engine_tree::tree::{BasicEngineValidator, EngineValidator}; +pub use reth_rpc_builder::{middleware::RethRpcMiddleware, Identity, Stack}; -use crate::{BeaconConsensusEngineEvent, BeaconConsensusEngineHandle}; +use crate::{ + invalid_block_hook::InvalidBlockHookExt, BeaconConsensusEngineEvent, + BeaconConsensusEngineHandle, ConfigureEngineEvm, +}; use alloy_rpc_types::engine::ClientVersionV1; use alloy_rpc_types_engine::ExecutionData; use jsonrpsee::{core::middleware::layer::Either, RpcModule}; use reth_chain_state::CanonStateSubscriptions; -use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_node_api::{ - AddOnsContext, BlockTy, EngineTypes, EngineValidator, FullNodeComponents, FullNodeTypes, - NodeAddOns, NodeTypes, PayloadTypes, PrimitivesTy, + AddOnsContext, BlockTy, EngineApiValidator, EngineTypes, FullNodeComponents, FullNodeTypes, + NodeAddOns, NodeTypes, PayloadTypes, PayloadValidator, PrimitivesTy, TreeConfig, }; use reth_node_core::{ node_config::NodeConfig, @@ -23,8 +27,7 @@ use reth_rpc_api::{eth::helpers::AddDevSigners, IntoEngineApiRpcModule}; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, config::RethRpcServerConfig, - RpcModuleBuilder, RpcRegistryInner, RpcServerConfig, RpcServerHandle, Stack, - TransportRpcModules, + RpcModuleBuilder, RpcRegistryInner, RpcServerConfig, RpcServerHandle, TransportRpcModules, }; use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_rpc_eth_types::{cache::cache_new_blocks_task, EthConfig, EthStateCache}; @@ -435,18 +438,21 @@ struct RpcSetupContext<'a, Node: FullNodeComponents, EthApi: EthApiTypes> { pub struct RpcAddOns< Node: FullNodeComponents, EthB: EthApiBuilder, - EV, - EB = BasicEngineApiBuilder, + PVB, + EB = BasicEngineApiBuilder, + EVB = BasicEngineValidatorBuilder, RpcMiddleware = Identity, > { /// Additional RPC add-ons. pub hooks: RpcHooks, /// Builder for `EthApi` pub eth_api_builder: EthB, - /// Engine validator - engine_validator_builder: EV, + /// Payload validator builder + payload_validator_builder: PVB, /// Builder for `EngineApi` engine_api_builder: EB, + /// Builder for tree validator + engine_validator_builder: EVB, /// Configurable RPC middleware stack. /// /// This middleware is applied to all RPC requests across all transports (HTTP, WS, IPC). @@ -454,25 +460,28 @@ pub struct RpcAddOns< rpc_middleware: RpcMiddleware, } -impl Debug for RpcAddOns +impl Debug + for RpcAddOns where Node: FullNodeComponents, EthB: EthApiBuilder, - EV: Debug, + PVB: Debug, EB: Debug, + EVB: Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RpcAddOns") .field("hooks", &self.hooks) .field("eth_api_builder", &"...") - .field("engine_validator_builder", &self.engine_validator_builder) + .field("payload_validator_builder", &self.payload_validator_builder) .field("engine_api_builder", &self.engine_api_builder) + .field("engine_validator_builder", &self.engine_validator_builder) .field("rpc_middleware", &"...") .finish() } } -impl RpcAddOns +impl RpcAddOns where Node: FullNodeComponents, EthB: EthApiBuilder, @@ -480,15 +489,17 @@ where /// Creates a new instance of the RPC add-ons. pub fn new( eth_api_builder: EthB, - engine_validator_builder: EV, + payload_validator_builder: PVB, engine_api_builder: EB, + engine_validator_builder: EVB, rpc_middleware: RpcMiddleware, ) -> Self { Self { hooks: RpcHooks::default(), eth_api_builder, - engine_validator_builder, + payload_validator_builder, engine_api_builder, + engine_validator_builder, rpc_middleware, } } @@ -497,13 +508,44 @@ where pub fn with_engine_api( self, engine_api_builder: T, - ) -> RpcAddOns { - let Self { hooks, eth_api_builder, engine_validator_builder, rpc_middleware, .. } = self; + ) -> RpcAddOns { + let Self { + hooks, + eth_api_builder, + payload_validator_builder, + engine_validator_builder, + rpc_middleware, + .. + } = self; RpcAddOns { hooks, eth_api_builder, + payload_validator_builder, + engine_api_builder, engine_validator_builder, + rpc_middleware, + } + } + + /// Maps the [`PayloadValidatorBuilder`] builder type. + pub fn with_payload_validator( + self, + payload_validator_builder: T, + ) -> RpcAddOns { + let Self { + hooks, + eth_api_builder, + engine_api_builder, + engine_validator_builder, + rpc_middleware, + .. + } = self; + RpcAddOns { + hooks, + eth_api_builder, + payload_validator_builder, engine_api_builder, + engine_validator_builder, rpc_middleware, } } @@ -512,13 +554,21 @@ where pub fn with_engine_validator( self, engine_validator_builder: T, - ) -> RpcAddOns { - let Self { hooks, eth_api_builder, engine_api_builder, rpc_middleware, .. } = self; + ) -> RpcAddOns { + let Self { + hooks, + eth_api_builder, + payload_validator_builder, + engine_api_builder, + rpc_middleware, + .. + } = self; RpcAddOns { hooks, eth_api_builder, - engine_validator_builder, + payload_validator_builder, engine_api_builder, + engine_validator_builder, rpc_middleware, } } @@ -561,14 +611,24 @@ where /// - Middleware is applied to the RPC service layer, not the HTTP transport layer /// - The default middleware is `Identity` (no-op), which passes through requests unchanged /// - Middleware layers are applied in the order they are added via `.layer()` - pub fn with_rpc_middleware(self, rpc_middleware: T) -> RpcAddOns { - let Self { hooks, eth_api_builder, engine_validator_builder, engine_api_builder, .. } = - self; - RpcAddOns { + pub fn with_rpc_middleware( + self, + rpc_middleware: T, + ) -> RpcAddOns { + let Self { hooks, eth_api_builder, + payload_validator_builder, + engine_api_builder, engine_validator_builder, + .. + } = self; + RpcAddOns { + hooks, + eth_api_builder, + payload_validator_builder, engine_api_builder, + engine_validator_builder, rpc_middleware, } } @@ -577,29 +637,32 @@ where pub fn layer_rpc_middleware( self, layer: T, - ) -> RpcAddOns> { + ) -> RpcAddOns> { let Self { hooks, eth_api_builder, - engine_validator_builder, + payload_validator_builder, engine_api_builder, + engine_validator_builder, rpc_middleware, } = self; let rpc_middleware = Stack::new(rpc_middleware, layer); RpcAddOns { hooks, eth_api_builder, - engine_validator_builder, + payload_validator_builder, engine_api_builder, + engine_validator_builder, rpc_middleware, } } /// Optionally adds a new layer `T` to the configured [`RpcServiceBuilder`]. + #[expect(clippy::type_complexity)] pub fn option_layer_rpc_middleware( self, layer: Option, - ) -> RpcAddOns>> { + ) -> RpcAddOns>> { let layer = layer.map(Either::Left).unwrap_or(Either::Right(Identity::new())); self.layer_rpc_middleware(layer) } @@ -625,25 +688,32 @@ where } } -impl Default for RpcAddOns +impl Default for RpcAddOns where Node: FullNodeComponents, EthB: EthApiBuilder, EV: Default, EB: Default, + Engine: Default, { fn default() -> Self { - Self::new(EthB::default(), EV::default(), EB::default(), Default::default()) + Self::new( + EthB::default(), + EV::default(), + EB::default(), + Engine::default(), + Default::default(), + ) } } -impl RpcAddOns +impl RpcAddOns where N: FullNodeComponents, N::Provider: ChainSpecProvider, EthB: EthApiBuilder, - EV: EngineValidatorBuilder, EB: EngineApiBuilder, + EVB: EngineValidatorBuilder, RpcMiddleware: RethRpcMiddleware, { /// Launches only the regular RPC server (HTTP/WS/IPC), without the authenticated Engine API @@ -917,13 +987,15 @@ where } } -impl NodeAddOns for RpcAddOns +impl NodeAddOns + for RpcAddOns where N: FullNodeComponents, ::Provider: ChainSpecProvider, EthB: EthApiBuilder, - EV: EngineValidatorBuilder, + PVB: PayloadValidatorBuilder, EB: EngineApiBuilder, + EVB: EngineValidatorBuilder, RpcMiddleware: RethRpcMiddleware, { type Handle = RpcHandle; @@ -946,8 +1018,8 @@ where fn hooks_mut(&mut self) -> &mut RpcHooks; } -impl RethRpcAddOns - for RpcAddOns +impl RethRpcAddOns + for RpcAddOns where Self: NodeAddOns>, EthB: EthApiBuilder, @@ -1002,63 +1074,27 @@ pub trait EthApiBuilder: Default + Send + 'static { ) -> impl Future> + Send; } -/// Helper trait that provides the validator for the engine API +/// Helper trait that provides the validator builder for the engine API pub trait EngineValidatorAddOn: Send { - /// The Validator type to use for the engine API. - type Validator: EngineValidator<::Payload, Block = BlockTy> - + Clone; + /// The validator builder type to use. + type ValidatorBuilder: EngineValidatorBuilder; - /// Creates the engine validator for an engine API based node. - fn engine_validator( - &self, - ctx: &AddOnsContext<'_, Node>, - ) -> impl Future>; + /// Returns the validator builder. + fn engine_validator_builder(&self) -> Self::ValidatorBuilder; } -impl EngineValidatorAddOn for RpcAddOns +impl EngineValidatorAddOn for RpcAddOns where N: FullNodeComponents, EthB: EthApiBuilder, - EV: EngineValidatorBuilder, + PVB: Send, EB: EngineApiBuilder, + EVB: EngineValidatorBuilder, { - type Validator = EV::Validator; + type ValidatorBuilder = EVB; - async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { - self.engine_validator_builder.clone().build(ctx).await - } -} - -/// A type that knows how to build the engine validator. -pub trait EngineValidatorBuilder: Send + Sync + Clone { - /// The consensus implementation to build. - type Validator: EngineValidator<::Payload, Block = BlockTy> - + Clone; - - /// Creates the engine validator. - fn build( - self, - ctx: &AddOnsContext<'_, Node>, - ) -> impl Future> + Send; -} - -impl EngineValidatorBuilder for F -where - Node: FullNodeComponents, - Validator: EngineValidator<::Payload, Block = BlockTy> - + Clone - + Unpin - + 'static, - F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send + Sync + Clone, - Fut: Future> + Send, -{ - type Validator = Validator; - - fn build( - self, - ctx: &AddOnsContext<'_, Node>, - ) -> impl Future> { - self(ctx) + fn engine_validator_builder(&self) -> Self::ValidatorBuilder { + self.engine_validator_builder.clone() } } @@ -1082,17 +1118,115 @@ pub trait EngineApiBuilder: Send + Sync { ) -> impl Future> + Send; } +/// Builder trait for creating payload validators specifically for the Engine API. +/// +/// This trait is responsible for building validators that the Engine API will use +/// to validate payloads. +pub trait PayloadValidatorBuilder: Send + Sync + Clone { + /// The validator type that will be used by the Engine API. + type Validator: PayloadValidator<::Payload>; + + /// Builds the engine API validator. + /// + /// Returns a validator that validates engine API version-specific fields and payload + /// attributes. + fn build( + self, + ctx: &AddOnsContext<'_, Node>, + ) -> impl Future> + Send; +} + +/// Builder trait for creating engine validators for the consensus engine. +/// +/// This trait is responsible for building validators that the consensus engine will use +/// for block execution, state validation, and fork handling. +pub trait EngineValidatorBuilder: Send + Sync + Clone { + /// The tree validator type that will be used by the consensus engine. + type EngineValidator: EngineValidator< + ::Payload, + ::Primitives, + >; + + /// Builds the tree validator for the consensus engine. + /// + /// Returns a validator that handles block execution, state validation, and fork handling. + fn build_tree_validator( + self, + ctx: &AddOnsContext<'_, Node>, + tree_config: TreeConfig, + ) -> impl Future> + Send; +} + +/// Basic implementation of [`EngineValidatorBuilder`]. +/// +/// This builder creates a [`BasicEngineValidator`] using the provided payload validator builder. +#[derive(Debug, Clone)] +pub struct BasicEngineValidatorBuilder { + /// The payload validator builder used to create the engine validator. + payload_validator_builder: EV, +} + +impl BasicEngineValidatorBuilder { + /// Creates a new instance with the given payload validator builder. + pub const fn new(payload_validator_builder: EV) -> Self { + Self { payload_validator_builder } + } +} + +impl Default for BasicEngineValidatorBuilder +where + EV: Default, +{ + fn default() -> Self { + Self::new(EV::default()) + } +} + +impl EngineValidatorBuilder for BasicEngineValidatorBuilder +where + Node: FullNodeComponents< + Evm: ConfigureEngineEvm< + <::Payload as PayloadTypes>::ExecutionData, + >, + >, + EV: PayloadValidatorBuilder, + EV::Validator: reth_engine_primitives::PayloadValidator< + ::Payload, + Block = BlockTy, + >, +{ + type EngineValidator = BasicEngineValidator; + + async fn build_tree_validator( + self, + ctx: &AddOnsContext<'_, Node>, + tree_config: TreeConfig, + ) -> eyre::Result { + let validator = self.payload_validator_builder.build(ctx).await?; + let data_dir = ctx.config.datadir.clone().resolve_datadir(ctx.config.chain.chain()); + let invalid_block_hook = ctx.create_invalid_block_hook(&data_dir).await?; + Ok(BasicEngineValidator::new( + ctx.node.provider().clone(), + std::sync::Arc::new(ctx.node.consensus().clone()), + ctx.node.evm_config().clone(), + validator, + tree_config, + invalid_block_hook, + )) + } +} + /// Builder for basic [`EngineApi`] implementation. /// /// This provides a basic default implementation for opstack and ethereum engine API via /// [`EngineTypes`] and uses the general purpose [`EngineApi`] implementation as the builder's /// output. #[derive(Debug, Default)] -pub struct BasicEngineApiBuilder { - engine_validator_builder: EV, +pub struct BasicEngineApiBuilder { + payload_validator_builder: PVB, } -impl EngineApiBuilder for BasicEngineApiBuilder +impl EngineApiBuilder for BasicEngineApiBuilder where N: FullNodeComponents< Types: NodeTypes< @@ -1100,20 +1234,21 @@ where Payload: PayloadTypes + EngineTypes, >, >, - EV: EngineValidatorBuilder, + PVB: PayloadValidatorBuilder, + PVB::Validator: EngineApiValidator<::Payload>, { type EngineApi = EngineApi< N::Provider, ::Payload, N::Pool, - EV::Validator, + PVB::Validator, ::ChainSpec, >; async fn build_engine_api(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { - let Self { engine_validator_builder } = self; + let Self { payload_validator_builder } = self; - let engine_validator = engine_validator_builder.build(ctx).await?; + let engine_validator = payload_validator_builder.build(ctx).await?; let client = ClientVersionV1 { code: CLIENT_CODE, name: NAME_CLIENT.to_string(), diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 1a490bc2722..09b8f15ef68 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -31,6 +31,9 @@ pub struct DatabaseArgs { /// Read transaction timeout in seconds, 0 means no timeout. #[arg(long = "db.read-transaction-timeout")] pub read_transaction_timeout: Option, + /// Maximum number of readers allowed to access the database concurrently. + #[arg(long = "db.max-readers")] + pub max_readers: Option, } impl DatabaseArgs { @@ -57,6 +60,7 @@ impl DatabaseArgs { .with_max_read_transaction_duration(max_read_transaction_duration) .with_geometry_max_size(self.max_size) .with_growth_step(self.growth_step) + .with_max_readers(self.max_readers) } } diff --git a/crates/node/core/src/args/network.rs b/crates/node/core/src/args/network.rs index 2f5908aaf46..57c820e9852 100644 --- a/crates/node/core/src/args/network.rs +++ b/crates/node/core/src/args/network.rs @@ -161,6 +161,13 @@ pub struct NetworkArgs { /// The policy determines which peers transactions are gossiped to. #[arg(long = "tx-propagation-policy", default_value_t = TransactionPropagationKind::All)] pub tx_propagation_policy: TransactionPropagationKind, + + /// Disable transaction pool gossip + /// + /// Disables gossiping of transactions in the mempool to peers. This can be omitted for + /// personal nodes, though providers should always opt to enable this flag. + #[arg(long = "disable-tx-gossip")] + pub disable_tx_gossip: bool, } impl NetworkArgs { @@ -272,6 +279,7 @@ impl NetworkArgs { // set discovery port based on instance number self.discovery.port, )) + .disable_tx_gossip(self.disable_tx_gossip) } /// If `no_persist_peers` is false then this returns the path to the persistent peers file path. @@ -342,7 +350,8 @@ impl Default for NetworkArgs { max_seen_tx_history: DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER, max_capacity_cache_txns_pending_fetch: DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, net_if: None, - tx_propagation_policy: TransactionPropagationKind::default() + tx_propagation_policy: TransactionPropagationKind::default(), + disable_tx_gossip: false, } } } @@ -617,6 +626,12 @@ mod tests { } } + #[test] + fn parse_disable_tx_gossip_args() { + let args = CommandParser::::parse_from(["reth", "--disable-tx-gossip"]).args; + assert!(args.disable_tx_gossip); + } + #[test] fn network_args_default_sanity_test() { let default_args = NetworkArgs::default(); diff --git a/crates/node/core/src/cli/config.rs b/crates/node/core/src/cli/config.rs index ca9ebedcc5d..186d0c7d88f 100644 --- a/crates/node/core/src/cli/config.rs +++ b/crates/node/core/src/cli/config.rs @@ -7,6 +7,9 @@ use reth_network::{protocol::IntoRlpxSubProtocol, NetworkPrimitives}; use reth_transaction_pool::PoolConfig; use std::{borrow::Cow, time::Duration}; +/// 45M gas limit +const ETHEREUM_BLOCK_GAS_LIMIT_45M: u64 = 45_000_000; + /// 60M gas limit const ETHEREUM_BLOCK_GAS_LIMIT_60M: u64 = 60_000_000; @@ -42,9 +45,10 @@ pub trait PayloadBuilderConfig { } match chain.kind() { - ChainKind::Named( - NamedChain::Mainnet | NamedChain::Sepolia | NamedChain::Holesky | NamedChain::Hoodi, - ) => ETHEREUM_BLOCK_GAS_LIMIT_60M, + ChainKind::Named(NamedChain::Sepolia | NamedChain::Holesky | NamedChain::Hoodi) => { + ETHEREUM_BLOCK_GAS_LIMIT_60M + } + ChainKind::Named(NamedChain::Mainnet) => ETHEREUM_BLOCK_GAS_LIMIT_45M, _ => ETHEREUM_BLOCK_GAS_LIMIT_36M, } } diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index f2962e0f236..66a5b2b5153 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -498,7 +498,7 @@ impl NodeConfig { if let Some(interval) = self.dev.block_time { MiningMode::interval(interval) } else { - MiningMode::instant(pool) + MiningMode::instant(pool, self.dev.block_max_transactions) } } } diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index 96a466f7f65..c029b773718 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -84,43 +84,46 @@ impl MetricServer { .await .wrap_err("Could not bind to address")?; - task_executor.spawn_with_graceful_shutdown_signal(|mut signal| async move { - loop { - let io = tokio::select! { - _ = &mut signal => break, - io = listener.accept() => { - match io { - Ok((stream, _remote_addr)) => stream, - Err(err) => { - tracing::error!(%err, "failed to accept connection"); - continue; + task_executor.spawn_with_graceful_shutdown_signal(|mut signal| { + Box::pin(async move { + loop { + let io = tokio::select! { + _ = &mut signal => break, + io = listener.accept() => { + match io { + Ok((stream, _remote_addr)) => stream, + Err(err) => { + tracing::error!(%err, "failed to accept connection"); + continue; + } } } - } - }; + }; - let handle = install_prometheus_recorder(); - let hook = hook.clone(); - let service = tower::service_fn(move |_| { - (hook)(); - let metrics = handle.handle().render(); - let mut response = Response::new(metrics); - response - .headers_mut() - .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain")); - async move { Ok::<_, Infallible>(response) } - }); + let handle = install_prometheus_recorder(); + let hook = hook.clone(); + let service = tower::service_fn(move |_| { + (hook)(); + let metrics = handle.handle().render(); + let mut response = Response::new(metrics); + response + .headers_mut() + .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain")); + async move { Ok::<_, Infallible>(response) } + }); - let mut shutdown = signal.clone().ignore_guard(); - tokio::task::spawn(async move { - let _ = - jsonrpsee_server::serve_with_graceful_shutdown(io, service, &mut shutdown) - .await - .inspect_err( - |error| tracing::debug!(%error, "failed to serve request"), - ); - }); - } + let mut shutdown = signal.clone().ignore_guard(); + tokio::task::spawn(async move { + let _ = jsonrpsee_server::serve_with_graceful_shutdown( + io, + service, + &mut shutdown, + ) + .await + .inspect_err(|error| tracing::debug!(%error, "failed to serve request")); + }); + } + }) }); Ok(()) diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 8ec95381ddb..33e3655df8d 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -194,9 +194,16 @@ impl OpChainSpecBuilder { self } + /// Enable Jovian at genesis + pub fn jovian_activated(mut self) -> Self { + self = self.isthmus_activated(); + self.inner = self.inner.with_fork(OpHardfork::Jovian, ForkCondition::Timestamp(0)); + self + } + /// Enable Interop at genesis pub fn interop_activated(mut self) -> Self { - self = self.isthmus_activated(); + self = self.jovian_activated(); self.inner = self.inner.with_fork(OpHardfork::Interop, ForkCondition::Timestamp(0)); self } @@ -390,6 +397,7 @@ impl From for OpChainSpec { (OpHardfork::Granite.boxed(), genesis_info.granite_time), (OpHardfork::Holocene.boxed(), genesis_info.holocene_time), (OpHardfork::Isthmus.boxed(), genesis_info.isthmus_time), + (OpHardfork::Jovian.boxed(), genesis_info.jovian_time), (OpHardfork::Interop.boxed(), genesis_info.interop_time), ]; @@ -564,8 +572,13 @@ mod tests { // Isthmus ( Head { number: 0, timestamp: 1746806401, ..Default::default() }, - ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, + ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ ), + // // Jovian + // ( + // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: + // update timestamp when Jovian is planned */ ForkId { hash: + // ForkHash([0xef, 0x0e, 0x58, 0x33]), next: 0 }, ), ], ); } @@ -615,11 +628,16 @@ mod tests { Head { number: 0, timestamp: 1732633200, ..Default::default() }, ForkId { hash: ForkHash([0x4a, 0x1c, 0x79, 0x2e]), next: 1744905600 }, ), - // isthmus + // Isthmus ( Head { number: 0, timestamp: 1744905600, ..Default::default() }, - ForkId { hash: ForkHash([0x6c, 0x62, 0x5e, 0xe1]), next: 0 }, + ForkId { hash: ForkHash([0x6c, 0x62, 0x5e, 0xe1]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ ), + // // Jovian + // ( + // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: + // update timestamp when Jovian is planned */ ForkId { hash: + // ForkHash([0x04, 0x2a, 0x5c, 0x14]), next: 0 }, ), ], ); } @@ -682,8 +700,13 @@ mod tests { // Isthmus ( Head { number: 105235063, timestamp: 1746806401, ..Default::default() }, - ForkId { hash: ForkHash([0x37, 0xbe, 0x75, 0x8f]), next: 0 }, + ForkId { hash: ForkHash([0x37, 0xbe, 0x75, 0x8f]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ ), + // Jovian + // ( + // Head { number: 105235063, timestamp: u64::MAX, ..Default::default() }, /* + // TODO: update timestamp when Jovian is planned */ ForkId { + // hash: ForkHash([0x26, 0xce, 0xa1, 0x75]), next: 0 }, ), ], ); } @@ -733,11 +756,16 @@ mod tests { Head { number: 0, timestamp: 1732633200, ..Default::default() }, ForkId { hash: ForkHash([0x8b, 0x5e, 0x76, 0x29]), next: 1744905600 }, ), - // isthmus + // Isthmus ( Head { number: 0, timestamp: 1744905600, ..Default::default() }, - ForkId { hash: ForkHash([0x06, 0x0a, 0x4d, 0x1d]), next: 0 }, + ForkId { hash: ForkHash([0x06, 0x0a, 0x4d, 0x1d]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ ), + // // Jovian + // ( + // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: + // update timestamp when Jovian is planned */ ForkId { hash: + // ForkHash([0xcd, 0xfd, 0x39, 0x99]), next: 0 }, ), ], ); } @@ -813,6 +841,7 @@ mod tests { "fjordTime": 50, "graniteTime": 51, "holoceneTime": 52, + "isthmusTime": 53, "optimism": { "eip1559Elasticity": 60, "eip1559Denominator": 70 @@ -836,6 +865,8 @@ mod tests { assert_eq!(actual_granite_timestamp, Some(serde_json::Value::from(51)).as_ref()); let actual_holocene_timestamp = genesis.config.extra_fields.get("holoceneTime"); assert_eq!(actual_holocene_timestamp, Some(serde_json::Value::from(52)).as_ref()); + let actual_isthmus_timestamp = genesis.config.extra_fields.get("isthmusTime"); + assert_eq!(actual_isthmus_timestamp, Some(serde_json::Value::from(53)).as_ref()); let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); assert_eq!( @@ -882,6 +913,7 @@ mod tests { "fjordTime": 50, "graniteTime": 51, "holoceneTime": 52, + "isthmusTime": 53, "optimism": { "eip1559Elasticity": 60, "eip1559Denominator": 70, @@ -906,6 +938,8 @@ mod tests { assert_eq!(actual_granite_timestamp, Some(serde_json::Value::from(51)).as_ref()); let actual_holocene_timestamp = genesis.config.extra_fields.get("holoceneTime"); assert_eq!(actual_holocene_timestamp, Some(serde_json::Value::from(52)).as_ref()); + let actual_isthmus_timestamp = genesis.config.extra_fields.get("isthmusTime"); + assert_eq!(actual_isthmus_timestamp, Some(serde_json::Value::from(53)).as_ref()); let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); assert_eq!( @@ -1059,6 +1093,7 @@ mod tests { (String::from("graniteTime"), 0.into()), (String::from("holoceneTime"), 0.into()), (String::from("isthmusTime"), 0.into()), + (String::from("jovianTime"), 0.into()), ] .into_iter() .collect(), @@ -1096,6 +1131,7 @@ mod tests { OpHardfork::Holocene.boxed(), EthereumHardfork::Prague.boxed(), OpHardfork::Isthmus.boxed(), + OpHardfork::Jovian.boxed(), // OpHardfork::Interop.boxed(), ]; diff --git a/crates/optimism/chainspec/src/superchain/chain_metadata.rs b/crates/optimism/chainspec/src/superchain/chain_metadata.rs index 90330817b70..bf6228c099a 100644 --- a/crates/optimism/chainspec/src/superchain/chain_metadata.rs +++ b/crates/optimism/chainspec/src/superchain/chain_metadata.rs @@ -26,6 +26,7 @@ pub(crate) struct HardforkConfig { pub granite_time: Option, pub holocene_time: Option, pub isthmus_time: Option, + pub jovian_time: Option, } #[derive(Clone, Debug, Deserialize)] @@ -58,6 +59,8 @@ pub(crate) struct ChainConfigExtraFields { #[serde(skip_serializing_if = "Option::is_none")] pub isthmus_time: Option, #[serde(skip_serializing_if = "Option::is_none")] + pub jovian_time: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub optimism: Option, } @@ -137,6 +140,7 @@ pub(crate) fn to_genesis_chain_config(chain_config: &ChainMetadata) -> ChainConf granite_time: chain_config.hardforks.granite_time, holocene_time: chain_config.hardforks.holocene_time, isthmus_time: chain_config.hardforks.isthmus_time, + jovian_time: chain_config.hardforks.jovian_time, optimism: chain_config.optimism.as_ref().map(|o| o.into()), }; res.extra_fields = @@ -158,7 +162,8 @@ mod tests { "ecotone_time": 1710374401, "fjord_time": 1720627201, "granite_time": 1726070401, - "holocene_time": 1736445601 + "holocene_time": 1736445601, + "isthmus_time": 1746806401 }, "optimism": { "eip1559_elasticity": 6, @@ -179,6 +184,7 @@ mod tests { assert_eq!(config.hardforks.fjord_time, Some(1720627201)); assert_eq!(config.hardforks.granite_time, Some(1726070401)); assert_eq!(config.hardforks.holocene_time, Some(1736445601)); + assert_eq!(config.hardforks.isthmus_time, Some(1746806401)); // optimism assert_eq!(config.optimism.as_ref().unwrap().eip1559_elasticity, 6); assert_eq!(config.optimism.as_ref().unwrap().eip1559_denominator, 50); @@ -196,7 +202,8 @@ mod tests { fjord_time: Some(1720627201), granite_time: Some(1726070401), holocene_time: Some(1736445601), - isthmus_time: None, + isthmus_time: Some(1746806401), + jovian_time: None, optimism: Option::from(ChainConfigExtraFieldsOptimism { eip1559_elasticity: 6, eip1559_denominator: 50, @@ -212,7 +219,8 @@ mod tests { assert_eq!(value.get("fjordTime").unwrap(), 1720627201); assert_eq!(value.get("graniteTime").unwrap(), 1726070401); assert_eq!(value.get("holoceneTime").unwrap(), 1736445601); - assert_eq!(value.get("isthmusTime"), None); + assert_eq!(value.get("isthmusTime").unwrap(), 1746806401); + assert_eq!(value.get("jovianTime"), None); let optimism = value.get("optimism").unwrap(); assert_eq!(optimism.get("eip1559Elasticity").unwrap(), 6); assert_eq!(optimism.get("eip1559Denominator").unwrap(), 50); @@ -242,7 +250,7 @@ mod tests { assert_eq!(chain_config.merge_netsplit_block, Some(0)); assert_eq!(chain_config.shanghai_time, Some(1704992401)); assert_eq!(chain_config.cancun_time, Some(1710374401)); - assert_eq!(chain_config.prague_time, None); + assert_eq!(chain_config.prague_time, Some(1746806401)); assert_eq!(chain_config.osaka_time, None); assert_eq!(chain_config.terminal_total_difficulty, Some(U256::ZERO)); assert!(chain_config.terminal_total_difficulty_passed); @@ -256,7 +264,8 @@ mod tests { assert_eq!(chain_config.extra_fields.get("fjordTime").unwrap(), 1720627201); assert_eq!(chain_config.extra_fields.get("graniteTime").unwrap(), 1726070401); assert_eq!(chain_config.extra_fields.get("holoceneTime").unwrap(), 1736445601); - assert_eq!(chain_config.extra_fields.get("isthmusTime"), None); + assert_eq!(chain_config.extra_fields.get("isthmusTime").unwrap(), 1746806401); + assert_eq!(chain_config.extra_fields.get("jovianTime"), None); let optimism = chain_config.extra_fields.get("optimism").unwrap(); assert_eq!(optimism.get("eip1559Elasticity").unwrap(), 6); assert_eq!(optimism.get("eip1559Denominator").unwrap(), 50); @@ -274,7 +283,8 @@ mod tests { "ecotone_time": 1710374401, "fjord_time": 1720627201, "granite_time": 1726070401, - "holocene_time": 1736445601 + "holocene_time": 1736445601, + "isthmus_time": 1746806401 }, "optimism": { "eip1559_elasticity": 6, @@ -289,7 +299,7 @@ mod tests { assert_eq!(chain_config.chain_id, 10); assert_eq!(chain_config.shanghai_time, Some(1704992401)); assert_eq!(chain_config.cancun_time, Some(1710374401)); - assert_eq!(chain_config.prague_time, None); + assert_eq!(chain_config.prague_time, Some(1746806401)); assert_eq!(chain_config.berlin_block, Some(3950000)); assert_eq!(chain_config.london_block, Some(105235063)); assert_eq!(chain_config.arrow_glacier_block, Some(105235063)); @@ -303,7 +313,9 @@ mod tests { assert_eq!(chain_config.extra_fields.get("fjordTime").unwrap(), 1720627201); assert_eq!(chain_config.extra_fields.get("graniteTime").unwrap(), 1726070401); assert_eq!(chain_config.extra_fields.get("holoceneTime").unwrap(), 1736445601); - assert_eq!(chain_config.extra_fields.get("isthmusTime"), None); + assert_eq!(chain_config.extra_fields.get("isthmusTime").unwrap(), 1746806401); + assert_eq!(chain_config.extra_fields.get("jovianTime"), None); + let optimism = chain_config.extra_fields.get("optimism").unwrap(); assert_eq!(optimism.get("eip1559Elasticity").unwrap(), 6); assert_eq!(optimism.get("eip1559Denominator").unwrap(), 50); diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index da574239d5b..92cd92de0a3 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -3,7 +3,7 @@ use alloy_consensus::Header; use clap::Parser; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment}; +use reth_cli_commands::common::{AccessRights, CliHeader, CliNodeTypes, Environment}; use reth_db_common::init::init_from_state_dump; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::{ @@ -59,7 +59,11 @@ impl> InitStateCommandOp { &provider_rw, SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), BEDROCK_HEADER_TTD, - |number| Header { number, ..Default::default() }, + |number| { + let mut header = Header::default(); + header.set_number(number); + header + }, )?; // SAFETY: it's safe to commit static files, since in the event of a crash, they diff --git a/crates/optimism/cli/src/commands/mod.rs b/crates/optimism/cli/src/commands/mod.rs index 161aa1d0bab..32e531a6710 100644 --- a/crates/optimism/cli/src/commands/mod.rs +++ b/crates/optimism/cli/src/commands/mod.rs @@ -20,7 +20,6 @@ pub mod test_vectors; /// Commands to be executed #[derive(Debug, Subcommand)] -#[expect(clippy::large_enum_variant)] pub enum Commands { /// Start the node diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 7cdc297a769..f2dce0a9ba0 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -17,6 +17,7 @@ reth-evm = { workspace = true, features = ["op"] } reth-primitives-traits.workspace = true reth-execution-errors.workspace = true reth-execution-types.workspace = true +reth-storage-errors.workspace = true reth-rpc-eth-api = { workspace = true, optional = true } @@ -26,6 +27,7 @@ alloy-evm.workspace = true alloy-primitives.workspace = true alloy-op-evm.workspace = true op-alloy-consensus.workspace = true +op-alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true # Optimism @@ -70,6 +72,8 @@ std = [ "alloy-op-evm/std", "op-revm/std", "reth-evm/std", + "op-alloy-rpc-types-engine/std", + "reth-storage-errors/std", ] portable = ["reth-revm/portable"] rpc = ["reth-rpc-eth-api"] diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index db42bf929dc..4973600d3d3 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -13,18 +13,25 @@ extern crate alloc; use alloc::sync::Arc; use alloy_consensus::{BlockHeader, Header}; +use alloy_eips::Decodable2718; use alloy_evm::{FromRecoveredTx, FromTxWithEncoded}; -use alloy_op_evm::{block::receipt_builder::OpReceiptBuilder, OpBlockExecutionCtx}; +use alloy_op_evm::block::receipt_builder::OpReceiptBuilder; use alloy_primitives::U256; use core::fmt::Debug; use op_alloy_consensus::EIP1559ParamError; +use op_alloy_rpc_types_engine::OpExecutionData; use op_revm::{OpSpecId, OpTransaction}; use reth_chainspec::EthChainSpec; -use reth_evm::{ConfigureEvm, EvmEnv}; +use reth_evm::{ + ConfigureEngineEvm, ConfigureEvm, EvmEnv, EvmEnvFor, ExecutableTxIterator, ExecutionCtxFor, +}; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{DepositReceipt, OpPrimitives}; -use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader, SignedTransaction}; +use reth_primitives_traits::{ + NodePrimitives, SealedBlock, SealedHeader, SignedTransaction, TxTy, WithEncoded, +}; +use reth_storage_errors::any::AnyError; use revm::{ context::{BlockEnv, CfgEnv, TxEnv}, context_interface::block::BlobExcessGasAndPrice, @@ -45,7 +52,7 @@ pub use build::OpBlockAssembler; mod error; pub use error::OpBlockExecutionError; -pub use alloy_op_evm::{OpBlockExecutorFactory, OpEvm, OpEvmFactory}; +pub use alloy_op_evm::{OpBlockExecutionCtx, OpBlockExecutorFactory, OpEvm, OpEvmFactory}; /// Optimism-related EVM configuration. #[derive(Debug)] @@ -217,6 +224,75 @@ where } } } + +impl ConfigureEngineEvm for OpEvmConfig +where + ChainSpec: EthChainSpec
+ OpHardforks, + N: NodePrimitives< + Receipt = R::Receipt, + SignedTx = R::Transaction, + BlockHeader = Header, + BlockBody = alloy_consensus::BlockBody, + Block = alloy_consensus::Block, + >, + OpTransaction: FromRecoveredTx + FromTxWithEncoded, + R: OpReceiptBuilder, + Self: Send + Sync + Unpin + Clone + 'static, +{ + fn evm_env_for_payload(&self, payload: &OpExecutionData) -> EvmEnvFor { + let timestamp = payload.payload.timestamp(); + let block_number = payload.payload.block_number(); + + let spec = revm_spec_by_timestamp_after_bedrock(self.chain_spec(), timestamp); + + let cfg_env = CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec); + + let blob_excess_gas_and_price = spec + .into_eth_spec() + .is_enabled_in(SpecId::CANCUN) + .then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 1 }); + + let block_env = BlockEnv { + number: U256::from(block_number), + beneficiary: payload.payload.as_v1().fee_recipient, + timestamp: U256::from(timestamp), + difficulty: if spec.into_eth_spec() >= SpecId::MERGE { + U256::ZERO + } else { + payload.payload.as_v1().prev_randao.into() + }, + prevrandao: (spec.into_eth_spec() >= SpecId::MERGE) + .then(|| payload.payload.as_v1().prev_randao), + gas_limit: payload.payload.as_v1().gas_limit, + basefee: payload.payload.as_v1().base_fee_per_gas.to(), + // EIP-4844 excess blob gas of this block, introduced in Cancun + blob_excess_gas_and_price, + }; + + EvmEnv { cfg_env, block_env } + } + + fn context_for_payload<'a>(&self, payload: &'a OpExecutionData) -> ExecutionCtxFor<'a, Self> { + OpBlockExecutionCtx { + parent_hash: payload.parent_hash(), + parent_beacon_block_root: payload.sidecar.parent_beacon_block_root(), + extra_data: payload.payload.as_v1().extra_data.clone(), + } + } + + fn tx_iterator_for_payload( + &self, + payload: &OpExecutionData, + ) -> impl ExecutableTxIterator { + payload.payload.transactions().clone().into_iter().map(|encoded| { + let tx = TxTy::::decode_2718_exact(encoded.as_ref()) + .map_err(AnyError::new)?; + let signer = tx.try_recover().map_err(AnyError::new)?; + Ok::<_, AnyError>(WithEncoded::new(encoded, tx.with_signer(signer))) + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index aad509a11bf..e3a6df6db31 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -58,6 +58,7 @@ pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Granite.boxed(), ForkCondition::Timestamp(0)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(0)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(0)), + // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(0)), ]) }); @@ -96,6 +97,8 @@ pub static OP_MAINNET_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1736445601)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1746806401)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1746806401)), + // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update + // timestamp when Jovian is planned */ ]) }); /// Optimism Sepolia list of hardforks. @@ -133,6 +136,8 @@ pub static OP_SEPOLIA_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1744905600)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1744905600)), + // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update + // timestamp when Jovian is planned */ ]) }); @@ -171,6 +176,8 @@ pub static BASE_SEPOLIA_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1744905600)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1744905600)), + // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update + // timestamp when Jovian is planned */ ]) }); @@ -209,5 +216,7 @@ pub static BASE_MAINNET_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1736445601)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1746806401)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1746806401)), + // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update + // timestamp when Jovian is planned */ ]) }); diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 9ef5e5f7a78..da5f4675efc 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -30,7 +30,6 @@ reth-tasks = { workspace = true, optional = true } reth-trie-common.workspace = true reth-node-core.workspace = true reth-rpc-engine-api.workspace = true -reth-engine-primitives.workspace = true reth-engine-local = { workspace = true, features = ["op"] } reth-rpc-api.workspace = true @@ -71,17 +70,21 @@ serde_json = { workspace = true, optional = true } [dev-dependencies] reth-optimism-node = { workspace = true, features = ["test-utils"] } -reth-db = { workspace = true, features = ["op"] } +reth-db = { workspace = true, features = ["op", "test-utils"] } reth-node-builder = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-payload-util.workspace = true reth-payload-validator.workspace = true reth-revm = { workspace = true, features = ["std"] } +reth-rpc.workspace = true +reth-rpc-eth-types.workspace = true +reth-network-api.workspace = true alloy-network.workspace = true futures.workspace = true -alloy-eips.workspace = true +op-alloy-network.workspace = true +tempfile.workspace = true [features] default = ["reth-codec"] diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 75012d34374..39bad862594 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -6,14 +6,14 @@ use op_alloy_rpc_types_engine::{ OpPayloadAttributes, }; use reth_consensus::ConsensusError; -use reth_engine_primitives::EngineValidator; use reth_node_api::{ payload::{ validate_parent_beacon_block_root_presence, EngineApiMessageVersion, EngineObjectValidationError, MessageValidationKind, NewPayloadError, PayloadOrAttributes, PayloadTypes, VersionSpecificValidationError, }, - validate_version_specific_fields, BuiltPayload, EngineTypes, NodePrimitives, PayloadValidator, + validate_version_specific_fields, BuiltPayload, EngineApiValidator, EngineTypes, + NodePrimitives, PayloadValidator, }; use reth_optimism_consensus::isthmus; use reth_optimism_forks::OpHardforks; @@ -161,7 +161,7 @@ where } } -impl EngineValidator for OpEngineValidator +impl EngineApiValidator for OpEngineValidator where Types: PayloadTypes< PayloadAttributes = OpPayloadAttributes, @@ -290,7 +290,6 @@ mod test { use alloy_primitives::{b64, Address, B256, B64}; use alloy_rpc_types_engine::PayloadAttributes; use reth_chainspec::ChainSpec; - use reth_engine_primitives::EngineValidator; use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; use reth_provider::noop::NoopProvider; use reth_trie_common::KeccakKeyHasher; @@ -334,7 +333,7 @@ mod test { OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); let attributes = get_attributes(None, 1732633199); - let result = as EngineValidator< + let result = as EngineApiValidator< OpEngineTypes, >>::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes, @@ -348,7 +347,7 @@ mod test { OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); let attributes = get_attributes(None, 1732633200); - let result = as EngineValidator< + let result = as EngineApiValidator< OpEngineTypes, >>::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes, @@ -362,7 +361,7 @@ mod test { OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); let attributes = get_attributes(Some(b64!("0000000000000008")), 1732633200); - let result = as EngineValidator< + let result = as EngineApiValidator< OpEngineTypes, >>::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes, @@ -376,7 +375,7 @@ mod test { OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); let attributes = get_attributes(Some(b64!("0000000800000008")), 1732633200); - let result = as EngineValidator< + let result = as EngineApiValidator< OpEngineTypes, >>::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes, @@ -390,7 +389,7 @@ mod test { OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); let attributes = get_attributes(Some(b64!("0000000000000000")), 1732633200); - let result = as EngineValidator< + let result = as EngineApiValidator< OpEngineTypes, >>::ensure_well_formed_attributes( &validator, EngineApiMessageVersion::V3, &attributes, diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index ed9e9b08f16..3c238aaf4a8 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -27,8 +27,9 @@ use reth_node_builder::{ }, node::{FullNodeTypes, NodeTypes}, rpc::{ - EngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, EthApiBuilder, Identity, - RethRpcAddOns, RethRpcMiddleware, RethRpcServerHandles, RpcAddOns, RpcContext, RpcHandle, + BasicEngineValidatorBuilder, EngineApiBuilder, EngineValidatorAddOn, + EngineValidatorBuilder, EthApiBuilder, Identity, PayloadValidatorBuilder, RethRpcAddOns, + RethRpcMiddleware, RethRpcServerHandles, RpcAddOns, RpcContext, RpcHandle, }, BuilderContext, DebugNode, Node, NodeAdapter, NodeComponentsBuilder, }; @@ -229,6 +230,7 @@ where OpEthApiBuilder, OpEngineValidatorBuilder, OpEngineApiBuilder, + BasicEngineValidatorBuilder, >; fn components_builder(&self) -> Self::ComponentsBuilder { @@ -270,11 +272,17 @@ impl NodeTypes for OpNode { /// This type provides optimism-specific addons to the node and exposes the RPC server and engine /// API. #[derive(Debug)] -pub struct OpAddOns, EV, EB, RpcMiddleware = Identity> -{ +pub struct OpAddOns< + N: FullNodeComponents, + EthB: EthApiBuilder, + PVB, + EB = OpEngineApiBuilder, + EVB = BasicEngineValidatorBuilder, + RpcMiddleware = Identity, +> { /// Rpc add-ons responsible for launching the RPC servers and instantiating the RPC handlers /// and eth-api. - pub rpc_add_ons: RpcAddOns, + pub rpc_add_ons: RpcAddOns, /// Data availability configuration for the OP builder. pub da_config: OpDAConfig, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP @@ -291,16 +299,36 @@ pub struct OpAddOns, EV, EB, RpcMi min_suggested_priority_fee: u64, } -impl Default - for OpAddOns< - N, - OpEthApiBuilder, - OpEngineValidatorBuilder, - OpEngineApiBuilder, - Identity, - > +impl OpAddOns +where + N: FullNodeComponents, + EthB: EthApiBuilder, +{ + /// Creates a new instance from components. + pub const fn new( + rpc_add_ons: RpcAddOns, + da_config: OpDAConfig, + sequencer_url: Option, + sequencer_headers: Vec, + historical_rpc: Option, + enable_tx_conditional: bool, + min_suggested_priority_fee: u64, + ) -> Self { + Self { + rpc_add_ons, + da_config, + sequencer_url, + sequencer_headers, + historical_rpc, + enable_tx_conditional, + min_suggested_priority_fee, + } + } +} + +impl Default for OpAddOns where - N: FullNodeComponents, + N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, { fn default() -> Self { @@ -317,7 +345,7 @@ impl RpcMiddleware, > where - N: FullNodeComponents, + N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, { /// Build a [`OpAddOns`] using [`OpAddOnsBuilder`]. @@ -326,7 +354,7 @@ where } } -impl OpAddOns +impl OpAddOns where N: FullNodeComponents, EthB: EthApiBuilder, @@ -335,7 +363,7 @@ where pub fn with_engine_api( self, engine_api_builder: T, - ) -> OpAddOns { + ) -> OpAddOns { let Self { rpc_add_ons, da_config, @@ -344,23 +372,24 @@ where historical_rpc, enable_tx_conditional, min_suggested_priority_fee, + .. } = self; - OpAddOns { - rpc_add_ons: rpc_add_ons.with_engine_api(engine_api_builder), + OpAddOns::new( + rpc_add_ons.with_engine_api(engine_api_builder), da_config, sequencer_url, sequencer_headers, - enable_tx_conditional, historical_rpc, + enable_tx_conditional, min_suggested_priority_fee, - } + ) } - /// Maps the [`EngineValidatorBuilder`] builder type. - pub fn with_engine_validator( + /// Maps the [`PayloadValidatorBuilder`] builder type. + pub fn with_payload_validator( self, - engine_validator_builder: T, - ) -> OpAddOns { + payload_validator_builder: T, + ) -> OpAddOns { let Self { rpc_add_ons, da_config, @@ -369,16 +398,17 @@ where enable_tx_conditional, min_suggested_priority_fee, historical_rpc, + .. } = self; - OpAddOns { - rpc_add_ons: rpc_add_ons.with_engine_validator(engine_validator_builder), + OpAddOns::new( + rpc_add_ons.with_payload_validator(payload_validator_builder), da_config, sequencer_url, sequencer_headers, + historical_rpc, enable_tx_conditional, min_suggested_priority_fee, - historical_rpc, - } + ) } /// Sets the RPC middleware stack for processing RPC requests. @@ -388,7 +418,7 @@ where /// layer, allowing you to intercept, modify, or enhance RPC request processing. /// /// See also [`RpcAddOns::with_rpc_middleware`]. - pub fn with_rpc_middleware(self, rpc_middleware: T) -> OpAddOns { + pub fn with_rpc_middleware(self, rpc_middleware: T) -> OpAddOns { let Self { rpc_add_ons, da_config, @@ -397,16 +427,17 @@ where enable_tx_conditional, min_suggested_priority_fee, historical_rpc, + .. } = self; - OpAddOns { - rpc_add_ons: rpc_add_ons.with_rpc_middleware(rpc_middleware), + OpAddOns::new( + rpc_add_ons.with_rpc_middleware(rpc_middleware), da_config, sequencer_url, sequencer_headers, + historical_rpc, enable_tx_conditional, min_suggested_priority_fee, - historical_rpc, - } + ) } /// Sets the hook that is run once the rpc server is started. @@ -430,8 +461,8 @@ where } } -impl NodeAddOns - for OpAddOns +impl NodeAddOns + for OpAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -449,8 +480,9 @@ where Pool: TransactionPool, >, EthB: EthApiBuilder, - EV: EngineValidatorBuilder, + PVB: Send, EB: EngineApiBuilder, + EVB: EngineValidatorBuilder, RpcMiddleware: RethRpcMiddleware, Attrs: OpAttributes, RpcPayloadAttributes: DeserializeOwned>, { @@ -557,8 +589,8 @@ where } } -impl RethRpcAddOns - for OpAddOns +impl RethRpcAddOns + for OpAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -576,8 +608,9 @@ where >, <::Pool as TransactionPool>::Transaction: OpPooledTx, EthB: EthApiBuilder, - EV: EngineValidatorBuilder, + PVB: PayloadValidatorBuilder, EB: EngineApiBuilder, + EVB: EngineValidatorBuilder, RpcMiddleware: RethRpcMiddleware, Attrs: OpAttributes, RpcPayloadAttributes: DeserializeOwned>, { @@ -588,19 +621,19 @@ where } } -impl EngineValidatorAddOn - for OpAddOns, EV, EB, RpcMiddleware> +impl EngineValidatorAddOn + for OpAddOns, PVB, EB, EVB> where - N: FullNodeComponents, + N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, - EV: EngineValidatorBuilder + Default, + PVB: Send, EB: EngineApiBuilder, - RpcMiddleware: Send, + EVB: EngineValidatorBuilder, { - type Validator = >::Validator; + type ValidatorBuilder = EVB; - async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { - EV::default().build(ctx).await + fn engine_validator_builder(&self) -> Self::ValidatorBuilder { + EngineValidatorAddOn::engine_validator_builder(&self.rpc_add_ons) } } @@ -706,12 +739,15 @@ impl OpAddOnsBuilder { impl OpAddOnsBuilder { /// Builds an instance of [`OpAddOns`]. - pub fn build(self) -> OpAddOns, EV, EB, RpcMiddleware> + pub fn build( + self, + ) -> OpAddOns, PVB, EB, EVB, RpcMiddleware> where N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, - EV: Default, + PVB: PayloadValidatorBuilder + Default, EB: Default, + EVB: Default, { let Self { sequencer_url, @@ -724,23 +760,24 @@ impl OpAddOnsBuilder { .. } = self; - OpAddOns { - rpc_add_ons: RpcAddOns::new( + OpAddOns::new( + RpcAddOns::new( OpEthApiBuilder::default() .with_sequencer(sequencer_url.clone()) .with_sequencer_headers(sequencer_headers.clone()) .with_min_suggested_priority_fee(min_suggested_priority_fee), - EV::default(), + PVB::default(), EB::default(), + EVB::default(), rpc_middleware, ), - da_config: da_config.unwrap_or_default(), + da_config.unwrap_or_default(), sequencer_url, sequencer_headers, historical_rpc, enable_tx_conditional, min_suggested_priority_fee, - } + ) } } @@ -1132,7 +1169,7 @@ where #[non_exhaustive] pub struct OpEngineValidatorBuilder; -impl EngineValidatorBuilder for OpEngineValidatorBuilder +impl PayloadValidatorBuilder for OpEngineValidatorBuilder where Node: FullNodeComponents, { diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index 56022b5a4d4..a8776e1e4e6 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -1,13 +1,93 @@ //! RPC component builder +//! +//! # Example +//! +//! Builds offline `TraceApi` with only EVM and database. This can be useful +//! for example when downloading a state snapshot (pre-synced node) from some mirror. +//! +//! ```rust +//! use alloy_rpc_types_eth::BlockId; +//! use op_alloy_network::Optimism; +//! use reth_db::test_utils::create_test_rw_db_with_path; +//! use reth_node_builder::{ +//! components::ComponentsBuilder, +//! hooks::OnComponentInitializedHook, +//! rpc::{EthApiBuilder, EthApiCtx}, +//! LaunchContext, NodeConfig, RethFullAdapter, +//! }; +//! use reth_optimism_chainspec::OP_SEPOLIA; +//! use reth_optimism_evm::OpEvmConfig; +//! use reth_optimism_node::{OpExecutorBuilder, OpNetworkPrimitives, OpNode}; +//! use reth_optimism_rpc::OpEthApiBuilder; +//! use reth_optimism_txpool::OpPooledTransaction; +//! use reth_provider::providers::BlockchainProvider; +//! use reth_rpc::TraceApi; +//! use reth_rpc_eth_types::{EthConfig, EthStateCache}; +//! use reth_tasks::{pool::BlockingTaskGuard, TaskManager}; +//! use std::sync::Arc; +//! +//! #[tokio::main] +//! async fn main() { +//! // build core node with all components disabled except EVM and state +//! let sepolia = NodeConfig::new(OP_SEPOLIA.clone()); +//! let db = create_test_rw_db_with_path(sepolia.datadir()); +//! let tasks = TaskManager::current(); +//! let launch_ctx = LaunchContext::new(tasks.executor(), sepolia.datadir()); +//! let node = launch_ctx +//! .with_loaded_toml_config(sepolia) +//! .unwrap() +//! .attach(Arc::new(db)) +//! .with_provider_factory::<_, OpEvmConfig>() +//! .await +//! .unwrap() +//! .with_genesis() +//! .unwrap() +//! .with_metrics_task() // todo: shouldn't be req to set up blockchain db +//! .with_blockchain_db::, _>(move |provider_factory| { +//! Ok(BlockchainProvider::new(provider_factory).unwrap()) +//! }) +//! .unwrap() +//! .with_components( +//! ComponentsBuilder::default() +//! .node_types::>() +//! .noop_pool::() +//! .noop_network::() +//! .noop_consensus() +//! .executor(OpExecutorBuilder::default()) +//! .noop_payload(), +//! Box::new(()) as Box>, +//! ) +//! .await +//! .unwrap(); +//! +//! // build `eth` namespace API +//! let config = EthConfig::default(); +//! let cache = EthStateCache::spawn_with( +//! node.provider_factory().clone(), +//! config.cache, +//! node.task_executor().clone(), +//! ); +//! let ctx = EthApiCtx { components: node.node_adapter(), config, cache }; +//! let eth_api = OpEthApiBuilder::::default().build_eth_api(ctx).await.unwrap(); +//! +//! // build `trace` namespace API +//! let trace_api = TraceApi::new(eth_api, BlockingTaskGuard::new(10), EthConfig::default()); +//! +//! // fetch traces for latest block +//! let traces = trace_api.trace_block(BlockId::latest()).await.unwrap(); +//! } +//! ``` -pub use reth_optimism_rpc::OpEngineApi; +pub use reth_optimism_rpc::{OpEngineApi, OpEthApi, OpEthApiBuilder}; use crate::OP_NAME_CLIENT; use alloy_rpc_types_engine::ClientVersionV1; use op_alloy_rpc_types_engine::OpExecutionData; use reth_chainspec::EthereumHardforks; -use reth_node_api::{AddOnsContext, EngineTypes, FullNodeComponents, NodeTypes}; -use reth_node_builder::rpc::{EngineApiBuilder, EngineValidatorBuilder}; +use reth_node_api::{ + AddOnsContext, EngineApiValidator, EngineTypes, FullNodeComponents, NodeTypes, +}; +use reth_node_builder::rpc::{EngineApiBuilder, PayloadValidatorBuilder}; use reth_node_core::version::{CARGO_PKG_VERSION, CLIENT_CODE, VERGEN_GIT_SHA}; use reth_optimism_rpc::engine::OP_ENGINE_CAPABILITIES; use reth_payload_builder::PayloadStore; @@ -27,7 +107,8 @@ where Payload: EngineTypes, >, >, - EV: EngineValidatorBuilder, + EV: PayloadValidatorBuilder, + EV::Validator: EngineApiValidator<::Payload>, { type EngineApi = OpEngineApi< N::Provider, diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index ff1ee5340a3..f831c65ca93 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -12,14 +12,14 @@ use reth_e2e_test_utils::{ use reth_node_api::FullNodeTypes; use reth_node_builder::{ components::{BasicPayloadServiceBuilder, ComponentsBuilder}, - EngineNodeLauncher, NodeBuilder, NodeConfig, + EngineNodeLauncher, Node, NodeBuilder, NodeConfig, }; use reth_node_core::args::DatadirArgs; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_optimism_node::{ args::RollupArgs, node::{ - OpAddOns, OpConsensusBuilder, OpExecutorBuilder, OpNetworkBuilder, OpNodeComponentBuilder, + OpConsensusBuilder, OpExecutorBuilder, OpNetworkBuilder, OpNodeComponentBuilder, OpNodeTypes, OpPayloadBuilder, OpPoolBuilder, }, txpool::OpPooledTransaction, @@ -136,7 +136,7 @@ async fn test_custom_block_priority_config() { .with_database(db) .with_types_and_provider::>() .with_components(build_components(config.chain.chain_id())) - .with_add_ons(OpAddOns::default()) + .with_add_ons(OpNode::new(Default::default()).add_ons()) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( tasks.executor(), @@ -172,11 +172,11 @@ async fn test_custom_block_priority_config() { .unwrap(); assert_eq!(block_payloads.len(), 1); let block_payload = block_payloads.first().unwrap(); - let block_payload = block_payload.block().clone(); - assert_eq!(block_payload.body().transactions.len(), 2); // L1 block info tx + end-of-block custom tx + let block = block_payload.block(); + assert_eq!(block.body().transactions.len(), 2); // L1 block info tx + end-of-block custom tx // Check that last transaction in the block looks like a transfer to a random address. - let end_of_block_tx = block_payload.body().transactions.last().unwrap(); + let end_of_block_tx = block.body().transactions.last().unwrap(); let Some(tx) = end_of_block_tx.as_eip1559() else { panic!("expected EIP-1559 transaction"); }; diff --git a/crates/optimism/rpc/src/engine.rs b/crates/optimism/rpc/src/engine.rs index ac2cb7fcb2c..a31a64daca9 100644 --- a/crates/optimism/rpc/src/engine.rs +++ b/crates/optimism/rpc/src/engine.rs @@ -14,7 +14,7 @@ use op_alloy_rpc_types_engine::{ SuperchainSignal, }; use reth_chainspec::EthereumHardforks; -use reth_node_api::{EngineTypes, EngineValidator}; +use reth_node_api::{EngineApiValidator, EngineTypes}; use reth_rpc_api::IntoEngineApiRpcModule; use reth_rpc_engine_api::EngineApi; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; @@ -269,7 +269,7 @@ where Provider: HeaderProvider + BlockReader + StateProviderFactory + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, - Validator: EngineValidator, + Validator: EngineApiValidator, ChainSpec: EthereumHardforks + Send + Sync + 'static, { async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult { diff --git a/crates/optimism/rpc/src/historical.rs b/crates/optimism/rpc/src/historical.rs index 07cbadf4619..f5d5e71c0dd 100644 --- a/crates/optimism/rpc/src/historical.rs +++ b/crates/optimism/rpc/src/historical.rs @@ -159,14 +159,18 @@ where // if we've extracted a block ID, check if it's pre-Bedrock if let Some(block_id) = maybe_block_id { - let is_pre_bedrock = if let Ok(Some(num)) = - historical.provider.block_number_for_id(block_id) - { - num < historical.bedrock_block - } else { - // If we can't convert the hash to a number, assume it's post-Bedrock - debug!(target: "rpc::historical", ?block_id, "hash unknown; not forwarding"); - false + let is_pre_bedrock = match historical.provider.block_number_for_id(block_id) { + Ok(Some(num)) => num < historical.bedrock_block, + Ok(None) if block_id.is_hash() => { + // if we couldn't find the block number for the hash then we assume it is + // pre-Bedrock + true + } + _ => { + // If we can't convert blockid to a number, assume it's post-Bedrock + debug!(target: "rpc::historical", ?block_id, "hash unknown; not forwarding"); + false + } }; // if the block is pre-Bedrock, forward the request to the historical client diff --git a/crates/optimism/txpool/src/supervisor/access_list.rs b/crates/optimism/txpool/src/supervisor/access_list.rs index 9b3e4b0f2b4..7565c960c38 100644 --- a/crates/optimism/txpool/src/supervisor/access_list.rs +++ b/crates/optimism/txpool/src/supervisor/access_list.rs @@ -32,7 +32,7 @@ pub fn parse_access_list_items_to_inbox_entries<'a>( /// Max 3 inbox entries can exist per [`AccessListItem`] that points to [`CROSS_L2_INBOX_ADDRESS`]. /// /// Returns `Vec::new()` if [`AccessListItem`] address doesn't point to [`CROSS_L2_INBOX_ADDRESS`]. -// TODO: add url to spec once [pr](https://github.com/ethereum-optimism/specs/pull/612) is merged +// Access-list spec: fn parse_access_list_item_to_inbox_entries( access_list_item: &AccessListItem, ) -> Option> { diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 3c4daf25557..48daeeca0a5 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -260,6 +260,12 @@ where PayloadBuilderHandle::new(self.service_tx.clone()) } + /// Create clone on `payload_events` sending handle that could be used by builder to produce + /// additional events during block building + pub fn payload_events_handle(&self) -> broadcast::Sender> { + self.payload_events.clone() + } + /// Returns true if the given payload is currently being built. fn contains_payload(&self, id: PayloadId) -> bool { self.payload_jobs.iter().any(|(_, job_id)| *job_id == id) diff --git a/crates/primitives-traits/src/block/recovered.rs b/crates/primitives-traits/src/block/recovered.rs index 599aea1f8ac..5f60f317b61 100644 --- a/crates/primitives-traits/src/block/recovered.rs +++ b/crates/primitives-traits/src/block/recovered.rs @@ -6,9 +6,14 @@ use crate::{ Block, BlockBody, InMemorySize, SealedHeader, }; use alloc::vec::Vec; -use alloy_consensus::{transaction::Recovered, BlockHeader}; -use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; -use alloy_primitives::{Address, BlockHash, BlockNumber, Bloom, Bytes, Sealed, B256, B64, U256}; +use alloy_consensus::{ + transaction::{Recovered, TransactionMeta}, + BlockHeader, +}; +use alloy_eips::{eip1898::BlockWithParent, BlockNumHash, Encodable2718}; +use alloy_primitives::{ + Address, BlockHash, BlockNumber, Bloom, Bytes, Sealed, TxHash, B256, B64, U256, +}; use derive_more::Deref; /// A block with senders recovered from the block's transactions. @@ -308,6 +313,15 @@ impl RecoveredBlock { self.block.body().transactions().get(idx).map(|tx| Recovered::new_unchecked(tx, sender)) } + /// Finds a transaction by hash and returns it with its index and block context. + pub fn find_indexed(&self, tx_hash: TxHash) -> Option> { + self.body() + .transactions_iter() + .enumerate() + .find(|(_, tx)| tx.trie_hash() == tx_hash) + .map(|(index, tx)| IndexedTx { block: self, tx, index }) + } + /// Returns an iterator over all transactions and their sender. #[inline] pub fn transactions_with_sender( @@ -586,6 +600,52 @@ impl RecoveredBlock { } } +/// Transaction with its index and block reference for efficient metadata access. +#[derive(Debug)] +pub struct IndexedTx<'a, B: Block> { + /// Recovered block containing the transaction + block: &'a RecoveredBlock, + /// Transaction matching the hash + tx: &'a ::Transaction, + /// Index of the transaction in the block + index: usize, +} + +impl<'a, B: Block> IndexedTx<'a, B> { + /// Returns the transaction. + pub const fn tx(&self) -> &::Transaction { + self.tx + } + + /// Returns the transaction hash. + pub fn tx_hash(&self) -> TxHash { + self.tx.trie_hash() + } + + /// Returns the block hash. + pub fn block_hash(&self) -> B256 { + self.block.hash() + } + + /// Returns the index of the transaction in the block. + pub const fn index(&self) -> usize { + self.index + } + + /// Builds a [`TransactionMeta`] for the indexed transaction. + pub fn meta(&self) -> TransactionMeta { + TransactionMeta { + tx_hash: self.tx.trie_hash(), + index: self.index as u64, + block_hash: self.block.hash(), + block_number: self.block.number(), + base_fee: self.block.base_fee_per_gas(), + timestamp: self.block.timestamp(), + excess_blob_gas: self.block.excess_blob_gas(), + } + } +} + #[cfg(feature = "rpc-compat")] mod rpc_compat { use super::{ diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 60f83532dfc..841ba333b98 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -144,6 +144,7 @@ pub mod block; pub use block::{ body::{BlockBody, FullBlockBody}, header::{AlloyBlockHeader, BlockHeader, FullBlockHeader}, + recovered::IndexedTx, Block, FullBlock, RecoveredBlock, SealedBlock, }; diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index 2c94b5e4a8a..393ca638b89 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -115,8 +115,10 @@ mod tests { for block in &blocks { receipts.reserve_exact(block.transaction_count()); for transaction in &block.body().transactions { - receipts - .push((receipts.len() as u64, random_receipt(&mut rng, transaction, Some(0)))); + receipts.push(( + receipts.len() as u64, + random_receipt(&mut rng, transaction, Some(0), None), + )); } } let receipts_len = receipts.len(); diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index b413a70394b..bb214ea1679 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -274,7 +274,7 @@ mod tests { for block in &blocks { receipts.reserve_exact(block.body().size()); for (txi, transaction) in block.body().transactions.iter().enumerate() { - let mut receipt = random_receipt(&mut rng, transaction, Some(1)); + let mut receipt = random_receipt(&mut rng, transaction, Some(1), None); receipt.logs.push(random_log( &mut rng, (txi == (block.transaction_count() - 1)).then_some(deposit_contract_addr), diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 835ef19dedb..95d19348f4f 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -125,7 +125,7 @@ pub trait RpcConvert: Send + Sync + Unpin + Clone + Debug + 'static { fn fill( &self, tx: Recovered>, - tx_inf: TransactionInfo, + tx_info: TransactionInfo, ) -> Result, Self::Error>; /// Builds a fake transaction from a transaction request for inclusion into block built in diff --git a/crates/rpc/rpc-e2e-tests/README.md b/crates/rpc/rpc-e2e-tests/README.md index 44e9806f05d..03d6081cb2d 100644 --- a/crates/rpc/rpc-e2e-tests/README.md +++ b/crates/rpc/rpc-e2e-tests/README.md @@ -94,21 +94,25 @@ async fn test_eth_get_logs_compat() -> Result<()> { ### Running Tests +To run the official execution-apis test suite: + 1. Clone the execution-apis repository: ```bash git clone https://github.com/ethereum/execution-apis.git ``` -2. Set the test data path: +2. Set the test data path environment variable: ```bash - export EXECUTION_APIS_TEST_PATH=../execution-apis/tests + export EXECUTION_APIS_TEST_PATH=/path/to/execution-apis/tests ``` -3. Run the test: +3. Run the execution-apis compatibility test: ```bash - cargo test --test rpc_compat test_eth_get_logs_compat -- --nocapture + cargo nextest run --test e2e_testsuite test_execution_apis_compat ``` +This will auto-discover all RPC method directories and test each file individually, providing detailed per-file results. + ### Custom Test Data You can create custom test cases following the same format: diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_syncing/eth_syncing.io b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_syncing/eth_syncing.io new file mode 100644 index 00000000000..3aba3c1eb79 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_syncing/eth_syncing.io @@ -0,0 +1,3 @@ +// checks client syncing status +>> {"jsonrpc":"2.0","id":1,"method":"eth_syncing"} +<< {"jsonrpc":"2.0","id":1,"result":false} diff --git a/crates/rpc/rpc-e2e-tests/tests/e2e-testsuite/main.rs b/crates/rpc/rpc-e2e-tests/tests/e2e-testsuite/main.rs index 994cd714405..e1a4a249799 100644 --- a/crates/rpc/rpc-e2e-tests/tests/e2e-testsuite/main.rs +++ b/crates/rpc/rpc-e2e-tests/tests/e2e-testsuite/main.rs @@ -10,17 +10,17 @@ use reth_e2e_test_utils::testsuite::{ }; use reth_node_ethereum::{EthEngineTypes, EthereumNode}; use reth_rpc_e2e_tests::rpc_compat::{InitializeFromExecutionApis, RunRpcCompatTests}; -use std::{path::PathBuf, sync::Arc}; -use tracing::info; +use std::{env, path::PathBuf, sync::Arc}; +use tracing::{debug, info}; -/// Test `eth_getLogs` RPC method compatibility with execution-apis test data +/// Test repo-local RPC method compatibility with execution-apis test data /// /// This test: /// 1. Initializes a node with chain data from testdata (chain.rlp) /// 2. Applies the forkchoice state from headfcu.json -/// 3. Runs all `eth_getLogs` test cases from the execution-apis test suite +/// 3. Runs tests cases in the local repository, some of which are execution-api tests #[tokio::test(flavor = "multi_thread")] -async fn test_eth_get_logs_compat() -> Result<()> { +async fn test_local_rpc_tests_compat() -> Result<()> { reth_tracing::init_test_tracing(); // Use local test data @@ -69,7 +69,7 @@ async fn test_eth_get_logs_compat() -> Result<()> { ) .with_action(MakeCanonical::new()) .with_action(RunRpcCompatTests::new( - vec!["eth_getLogs".to_string()], + vec!["eth_getLogs".to_string(), "eth_syncing".to_string()], test_data_path.to_string_lossy(), )); @@ -77,3 +77,97 @@ async fn test_eth_get_logs_compat() -> Result<()> { Ok(()) } + +/// Test RPC method compatibility with execution-apis test data from environment variable +/// +/// This test: +/// 1. Reads test data path from `EXECUTION_APIS_TEST_PATH` environment variable +/// 2. Auto-discovers all RPC method directories (starting with `eth_`) +/// 3. Initializes a node with chain data from that directory (chain.rlp) +/// 4. Applies the forkchoice state from headfcu.json +/// 5. Runs all discovered RPC test cases individually (each test file reported separately) +#[tokio::test(flavor = "multi_thread")] +async fn test_execution_apis_compat() -> Result<()> { + reth_tracing::init_test_tracing(); + + // Get test data path from environment variable + let test_data_path = match env::var("EXECUTION_APIS_TEST_PATH") { + Ok(path) => path, + Err(_) => { + info!("SKIPPING: EXECUTION_APIS_TEST_PATH environment variable not set. Please set it to the path of execution-apis/tests directory to run this test."); + return Ok(()); + } + }; + + let test_data_path = PathBuf::from(test_data_path); + + if !test_data_path.exists() { + return Err(eyre::eyre!("Test data path does not exist: {}", test_data_path.display())); + } + + info!("Using execution-apis test data from: {}", test_data_path.display()); + + // Auto-discover RPC method directories + let mut rpc_methods = Vec::new(); + if let Ok(entries) = std::fs::read_dir(&test_data_path) { + for entry in entries.flatten() { + if let Some(name) = entry.file_name().to_str() { + // Search for an underscore to get all namespaced directories + if entry.path().is_dir() && name.contains('_') { + rpc_methods.push(name.to_string()); + } + } + } + } + + if rpc_methods.is_empty() { + return Err(eyre::eyre!( + "No RPC method directories (containing a '_' indicating namespacing) found in {}", + test_data_path.display() + )); + } + + rpc_methods.sort(); + debug!("Found RPC method test directories: {:?}", rpc_methods); + + // Paths to chain config files + let chain_rlp_path = test_data_path.join("chain.rlp"); + let genesis_path = test_data_path.join("genesis.json"); + let fcu_json_path = test_data_path.join("headfcu.json"); + + // Verify required files exist + if !chain_rlp_path.exists() { + return Err(eyre::eyre!("chain.rlp not found at {}", chain_rlp_path.display())); + } + if !fcu_json_path.exists() { + return Err(eyre::eyre!("headfcu.json not found at {}", fcu_json_path.display())); + } + if !genesis_path.exists() { + return Err(eyre::eyre!("genesis.json not found at {}", genesis_path.display())); + } + + // Load genesis from test data + let genesis_json = std::fs::read_to_string(&genesis_path)?; + let genesis: Genesis = serde_json::from_str(&genesis_json)?; + let chain_spec: ChainSpec = genesis.into(); + let chain_spec = Arc::new(chain_spec); + + // Create test setup with imported chain + let setup = Setup::::default() + .with_chain_spec(chain_spec) + .with_network(NetworkSetup::single_node()); + + // Build and run the test with all discovered methods + let test = TestBuilder::new() + .with_setup_and_import(setup, chain_rlp_path) + .with_action(UpdateBlockInfo::default()) + .with_action( + InitializeFromExecutionApis::new().with_fcu_json(fcu_json_path.to_string_lossy()), + ) + .with_action(MakeCanonical::new()) + .with_action(RunRpcCompatTests::new(rpc_methods, test_data_path.to_string_lossy())); + + test.run::().await?; + + Ok(()) +} diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 8738e94abe9..590c180ea15 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -18,7 +18,7 @@ use async_trait::async_trait; use jsonrpsee_core::{server::RpcModule, RpcResult}; use parking_lot::Mutex; use reth_chainspec::EthereumHardforks; -use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes, EngineValidator}; +use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineApiValidator, EngineTypes}; use reth_payload_builder::PayloadStore; use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, ExecutionPayload, @@ -76,7 +76,7 @@ where Provider: HeaderProvider + BlockReader + StateProviderFactory + 'static, PayloadT: PayloadTypes, Pool: TransactionPool + 'static, - Validator: EngineValidator, + Validator: EngineApiValidator, ChainSpec: EthereumHardforks + Send + Sync + 'static, { /// Create new instance of [`EngineApi`]. @@ -293,7 +293,7 @@ where Provider: HeaderProvider + BlockReader + StateProviderFactory + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, - Validator: EngineValidator, + Validator: EngineApiValidator, ChainSpec: EthereumHardforks + Send + Sync + 'static, { /// Sends a message to the beacon consensus engine to update the fork choice _without_ @@ -848,7 +848,7 @@ where Provider: HeaderProvider + BlockReader + StateProviderFactory + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, - Validator: EngineValidator, + Validator: EngineApiValidator, ChainSpec: EthereumHardforks + Send + Sync + 'static, { /// Handler for `engine_newPayloadV1` diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index 87945c3f4ad..3f58d97f7df 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -76,13 +76,14 @@ pub trait EstimateCall: Call { // Configure the evm env let mut db = CacheDB::new(StateProviderDatabase::new(state)); - let mut tx_env = self.create_txn_env(&evm_env, request, &mut db)?; // Apply any state overrides if specified. if let Some(state_override) = state_override { apply_state_overrides(state_override, &mut db).map_err(Self::Error::from_eth_err)?; } + let mut tx_env = self.create_txn_env(&evm_env, request, &mut db)?; + // Check if this is a basic transfer (no input data to account with no code) let mut is_basic_transfer = false; if tx_env.input().is_empty() { diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 3e63c04f75f..ae558d40559 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -10,8 +10,8 @@ use futures::Future; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_primitives_traits::BlockBody; use reth_rpc_eth_types::{ - fee_history::calculate_reward_percentiles_for_block, EthApiError, FeeHistoryCache, - FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, + fee_history::calculate_reward_percentiles_for_block, utils::checked_blob_gas_used_ratio, + EthApiError, FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, }; use reth_storage_api::{BlockIdReader, BlockReaderIdExt, HeaderProvider, ProviderHeader}; use tracing::debug; @@ -186,8 +186,10 @@ pub trait EthFees: base_fee_per_blob_gas.push(header.blob_fee(blob_params).unwrap_or_default()); blob_gas_used_ratio.push( - header.blob_gas_used().unwrap_or_default() as f64 - / blob_params.max_blob_gas_per_block() as f64, + checked_blob_gas_used_ratio( + header.blob_gas_used().unwrap_or_default(), + blob_params.max_blob_gas_per_block(), + ) ); // Percentiles were specified, so we need to collect reward percentile info diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 4296157e2bf..d8fb3621ee4 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -8,10 +8,11 @@ use alloy_eips::eip7840::BlobParams; use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockNumberOrTag; use futures::Future; +use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError, RethError}; use reth_evm::{ - execute::{BlockBuilder, BlockBuilderOutcome}, + execute::{BlockBuilder, BlockBuilderOutcome, ExecutionOutcome}, ConfigureEvm, Evm, NextBlockEnvAttributes, SpecFor, }; use reth_primitives_traits::{ @@ -154,7 +155,7 @@ pub trait LoadPendingBlock: } // no pending block from the CL yet, so we need to build it ourselves via txpool - let (sealed_block, receipts) = match self + let executed_block = match self .spawn_blocking_io(move |this| { // we rebuild the block this.build_block(&parent) @@ -168,17 +169,20 @@ pub trait LoadPendingBlock: } }; - let sealed_block = Arc::new(sealed_block); - let receipts = Arc::new(receipts); + let block = executed_block.recovered_block; - let now = Instant::now(); - *lock = Some(PendingBlock::new( - now + Duration::from_secs(1), - sealed_block.clone(), - receipts.clone(), - )); + let pending = PendingBlock::new( + Instant::now() + Duration::from_secs(1), + block.clone(), + Arc::new( + executed_block.execution_output.receipts.iter().flatten().cloned().collect(), + ), + ); + let receipts = pending.receipts.clone(); + + *lock = Some(pending); - Ok(Some((sealed_block, receipts))) + Ok(Some((block, receipts))) } } @@ -188,14 +192,10 @@ pub trait LoadPendingBlock: /// /// After Cancun, if the origin is the actual pending block, the block includes the EIP-4788 pre /// block contract call using the parent beacon block root received from the CL. - #[expect(clippy::type_complexity)] fn build_block( &self, parent: &SealedHeader>, - ) -> Result< - (RecoveredBlock>, Vec>), - Self::Error, - > + ) -> Result, Self::Error> where Self::Pool: TransactionPool>>, @@ -322,10 +322,21 @@ pub trait LoadPendingBlock: cumulative_gas_used += gas_used; } - let BlockBuilderOutcome { execution_result, block, .. } = + let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = builder.finish(&state_provider).map_err(Self::Error::from_eth_err)?; - Ok((block, execution_result.receipts)) + let execution_outcome = ExecutionOutcome::new( + db.take_bundle(), + vec![execution_result.receipts], + block.number(), + vec![execution_result.requests], + ); + + Ok(ExecutedBlock { + recovered_block: block.into(), + execution_output: Arc::new(execution_outcome), + hashed_state: Arc::new(hashed_state), + }) } } diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index fdb0ade248e..a8e8354fed1 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -9,7 +9,7 @@ use alloy_rpc_types_eth::{error::EthRpcErrorCode, request::TransactionInputError use alloy_sol_types::{ContractError, RevertReason}; pub use api::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; use core::time::Duration; -use reth_errors::{BlockExecutionError, RethError}; +use reth_errors::{BlockExecutionError, BlockValidationError, RethError}; use reth_primitives_traits::transaction::{error::InvalidTransactionError, signed::RecoveryError}; use reth_rpc_convert::{CallFeesError, EthTxEnvError, TransactionConversionError}; use reth_rpc_server_types::result::{ @@ -371,7 +371,30 @@ impl From for EthApiError { impl From for EthApiError { fn from(error: BlockExecutionError) -> Self { - Self::Internal(error.into()) + match error { + BlockExecutionError::Validation(validation_error) => match validation_error { + BlockValidationError::InvalidTx { error, .. } => { + if let Some(invalid_tx) = error.as_invalid_tx_err() { + Self::InvalidTransaction(RpcInvalidTransactionError::from( + invalid_tx.clone(), + )) + } else { + Self::InvalidTransaction(RpcInvalidTransactionError::other( + rpc_error_with_code( + EthRpcErrorCode::TransactionRejected.code(), + error.to_string(), + ), + )) + } + } + _ => Self::Internal(RethError::Execution(BlockExecutionError::Validation( + validation_error, + ))), + }, + BlockExecutionError::Internal(internal_error) => { + Self::Internal(RethError::Execution(BlockExecutionError::Internal(internal_error))) + } + } } } diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 0ae4da51913..615437498ec 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -22,6 +22,8 @@ use reth_storage_api::BlockReaderIdExt; use serde::{Deserialize, Serialize}; use tracing::trace; +use crate::utils::checked_blob_gas_used_ratio; + use super::{EthApiError, EthStateCache}; /// Contains cached fee history entries for blocks. @@ -377,12 +379,13 @@ where base_fee_per_blob_gas: header .excess_blob_gas() .and_then(|excess_blob_gas| Some(blob_params?.calc_blob_fee(excess_blob_gas))), - blob_gas_used_ratio: block.body().blob_gas_used() as f64 / + blob_gas_used_ratio: checked_blob_gas_used_ratio( + block.body().blob_gas_used(), blob_params .as_ref() .map(|params| params.max_blob_gas_per_block()) - .unwrap_or(alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK_DENCUN) - as f64, + .unwrap_or(alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK_DENCUN), + ), rewards: Vec::new(), blob_params, } diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 14ca4895a9b..a024c894629 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -301,25 +301,24 @@ where // find the maximum gas used by any of the transactions in the block to use as the // capacity margin for the block, if no receipts are found return the // suggested_min_priority_fee - let Some(max_tx_gas_used) = self + let receipts = self .cache .get_receipts(header.hash()) .await? - .ok_or(EthApiError::ReceiptsNotFound(BlockId::latest()))? + .ok_or(EthApiError::ReceiptsNotFound(BlockId::latest()))?; + + let mut max_tx_gas_used = 0u64; + let mut last_cumulative_gas = 0; + for receipt in receipts.as_ref() { + let cumulative_gas = receipt.cumulative_gas_used(); // get the gas used by each transaction in the block, by subtracting the - // cumulative gas used of the previous transaction from the cumulative gas used of the - // current transaction. This is because there is no gas_used() method on the Receipt - // trait. - .windows(2) - .map(|window| { - let prev = window[0].cumulative_gas_used(); - let curr = window[1].cumulative_gas_used(); - curr - prev - }) - .max() - else { - return Ok(suggestion); - }; + // cumulative gas used of the previous transaction from the cumulative gas used of + // the current transaction. This is because there is no gas_used() + // method on the Receipt trait. + let gas_used = cumulative_gas - last_cumulative_gas; + max_tx_gas_used = max_tx_gas_used.max(gas_used); + last_cumulative_gas = cumulative_gas; + } // if the block is at capacity, the suggestion must be increased if header.gas_used() + max_tx_gas_used > header.gas_limit() { diff --git a/crates/rpc/rpc-eth-types/src/utils.rs b/crates/rpc/rpc-eth-types/src/utils.rs index d3bb655be17..33616679ddd 100644 --- a/crates/rpc/rpc-eth-types/src/utils.rs +++ b/crates/rpc/rpc-eth-types/src/utils.rs @@ -58,6 +58,19 @@ where Ok(num) } +/// Calculates the blob gas used ratio for a block, accounting for the case where +/// `max_blob_gas_per_block` is zero. +/// +/// Returns `0.0` if `blob_gas_used` is `0`, otherwise returns the ratio +/// `blob_gas_used/max_blob_gas_per_block`. +pub fn checked_blob_gas_used_ratio(blob_gas_used: u64, max_blob_gas_per_block: u64) -> f64 { + if blob_gas_used == 0 { + 0.0 + } else { + blob_gas_used as f64 / max_blob_gas_per_block as f64 + } +} + #[cfg(test)] mod tests { use super::*; @@ -84,4 +97,16 @@ mod tests { binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 11) })).await; assert_eq!(num, Ok(10)); } + + #[test] + fn test_checked_blob_gas_used_ratio() { + // No blob gas used, max blob gas per block is 0 + assert_eq!(checked_blob_gas_used_ratio(0, 0), 0.0); + // Blob gas used is zero, max blob gas per block is non-zero + assert_eq!(checked_blob_gas_used_ratio(0, 100), 0.0); + // Blob gas used is non-zero, max blob gas per block is non-zero + assert_eq!(checked_blob_gas_used_ratio(50, 100), 0.5); + // Blob gas used is non-zero and equal to max blob gas per block + assert_eq!(checked_blob_gas_used_ratio(100, 100), 1.0); + } } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 3e88bf8a82d..c10c6326534 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -62,7 +62,6 @@ impl DebugApi { } /// Access the underlying `Eth` API. - #[allow(clippy::missing_const_for_fn)] pub fn eth_api(&self) -> &Eth { &self.inner.eth_api } @@ -636,13 +635,13 @@ where .eth_api() .spawn_with_state_at_block(block.parent_hash().into(), move |state_provider| { let db = StateProviderDatabase::new(&state_provider); - let block_executor = this.eth_api().evm_config().batch_executor(db); + let block_executor = this.eth_api().evm_config().executor(db); let mut witness_record = ExecutionWitnessRecord::default(); let mut withdraw_root_res: Result<_, reth_errors::ProviderError> = Ok(()); let _ = block_executor - .execute_with_state_closure(&(*block).clone(), |statedb: &mut State<_>| { + .execute_with_state_closure(&block, |statedb: &mut State<_>| { #[cfg(feature = "scroll")] { use reth_scroll_evm::LoadWithdrawRoot; diff --git a/crates/scroll/alloy/evm/src/block/mod.rs b/crates/scroll/alloy/evm/src/block/mod.rs index ec613d5cd72..0c53e5f7ce3 100644 --- a/crates/scroll/alloy/evm/src/block/mod.rs +++ b/crates/scroll/alloy/evm/src/block/mod.rs @@ -251,7 +251,7 @@ where // execute the transaction and commit the result to the database let ResultAndState { result, state } = - self.evm.transact(tx).map_err(move |err| BlockExecutionError::evm(err, hash))?; + self.evm.transact(&tx).map_err(move |err| BlockExecutionError::evm(err, hash))?; if !f(&result).should_commit() { return Ok(None) diff --git a/crates/scroll/alloy/evm/src/tx/compression.rs b/crates/scroll/alloy/evm/src/tx/compression.rs index a35d5a7098f..f86c82de256 100644 --- a/crates/scroll/alloy/evm/src/tx/compression.rs +++ b/crates/scroll/alloy/evm/src/tx/compression.rs @@ -2,7 +2,7 @@ use super::FromRecoveredTx; use crate::ScrollTransactionIntoTxEnv; use alloy_consensus::transaction::Recovered; use alloy_eips::{Encodable2718, Typed2718}; -use alloy_evm::{IntoTxEnv, RecoveredTx}; +use alloy_evm::{RecoveredTx, ToTxEnv}; use alloy_primitives::{Address, Bytes, TxKind, U256}; use revm::context::TxEnv; use scroll_alloy_consensus::{ScrollTxEnvelope, TxL1Message}; @@ -104,10 +104,10 @@ where } } -impl> IntoTxEnv +impl> ToTxEnv for WithCompressionRatio> { - fn into_tx_env(self) -> TxEnv { + fn to_tx_env(&self) -> TxEnv { let recovered = &self.value; TxEnv::from_tx_with_compression_ratio( recovered.inner(), @@ -118,38 +118,10 @@ impl> IntoTxEnv } } -impl> IntoTxEnv - for &WithCompressionRatio> -{ - fn into_tx_env(self) -> TxEnv { - let recovered = &self.value; - TxEnv::from_tx_with_compression_ratio( - recovered.inner(), - recovered.signer(), - self.encoded_bytes.clone(), - Some(self.compression_ratio), - ) - } -} - -impl> IntoTxEnv +impl> ToTxEnv for WithCompressionRatio<&Recovered> { - fn into_tx_env(self) -> TxEnv { - let recovered = &self.value; - TxEnv::from_tx_with_compression_ratio( - recovered.inner(), - *recovered.signer(), - self.encoded_bytes.clone(), - Some(self.compression_ratio), - ) - } -} - -impl> IntoTxEnv - for &WithCompressionRatio<&Recovered> -{ - fn into_tx_env(self) -> TxEnv { + fn to_tx_env(&self) -> TxEnv { let recovered = &self.value; TxEnv::from_tx_with_compression_ratio( recovered.inner(), diff --git a/crates/scroll/evm/Cargo.toml b/crates/scroll/evm/Cargo.toml index c1601ffb685..e1cd82d674b 100644 --- a/crates/scroll/evm/Cargo.toml +++ b/crates/scroll/evm/Cargo.toml @@ -35,6 +35,7 @@ alloy-consensus.workspace = true alloy-eips.workspace = true alloy-evm.workspace = true alloy-primitives.workspace = true +alloy-rpc-types-engine.workspace = true # scroll scroll-alloy-consensus.workspace = true @@ -74,4 +75,5 @@ std = [ "tracing/std", "scroll-alloy-hardforks/std", "reth-storage-api/std", + "alloy-rpc-types-engine/std", ] diff --git a/crates/scroll/evm/src/config.rs b/crates/scroll/evm/src/config.rs index 4f8adc3f105..aee3f55fa81 100644 --- a/crates/scroll/evm/src/config.rs +++ b/crates/scroll/evm/src/config.rs @@ -1,16 +1,22 @@ use crate::{build::ScrollBlockAssembler, ScrollEvmConfig, ScrollNextBlockEnvAttributes}; use alloc::sync::Arc; + use alloy_consensus::{BlockHeader, Header}; +use alloy_eips::{eip2718::WithEncoded, Decodable2718}; use alloy_evm::{FromRecoveredTx, FromTxWithEncoded}; use alloy_primitives::B256; +use alloy_rpc_types_engine::ExecutionData; use core::convert::Infallible; use reth_chainspec::EthChainSpec; -use reth_evm::{ConfigureEvm, EvmEnv, ExecutionCtxFor}; +use reth_evm::{ + ConfigureEngineEvm, ConfigureEvm, EvmEnv, EvmEnvFor, ExecutableTxIterator, ExecutionCtxFor, +}; use reth_primitives_traits::{ - BlockTy, NodePrimitives, SealedBlock, SealedHeader, SignedTransaction, + BlockTy, NodePrimitives, SealedBlock, SealedHeader, SignedTransaction, TxTy, }; use reth_scroll_chainspec::{ChainConfig, ScrollChainConfig}; use reth_scroll_primitives::ScrollReceipt; +use reth_storage_api::errors::any::AnyError; use revm::{ context::{BlockEnv, CfgEnv, TxEnv}, primitives::U256, @@ -135,6 +141,69 @@ where } } +impl ConfigureEngineEvm for ScrollEvmConfig +where + ChainSpec: EthChainSpec + ChainConfig + ScrollHardforks, + N: NodePrimitives< + Receipt = R::Receipt, + SignedTx = R::Transaction, + BlockHeader = Header, + BlockBody = alloy_consensus::BlockBody, + Block = alloy_consensus::Block, + >, + ScrollTransactionIntoTxEnv: + FromRecoveredTx + FromTxWithEncoded, + R: ScrollReceiptBuilder, + P: ScrollPrecompilesFactory, + Self: Send + Sync + Unpin + Clone + 'static, +{ + fn evm_env_for_payload(&self, payload: &ExecutionData) -> EvmEnvFor { + let timestamp = payload.payload.timestamp(); + let block_number = payload.payload.block_number(); + let chain_spec = self.chain_spec(); + + let spec_id = self.spec_id_at_timestamp_and_number(timestamp, block_number); + + let cfg_env = CfgEnv::::default() + .with_chain_id(chain_spec.chain().id()) + .with_spec(spec_id); + + // get coinbase from chain config. + let coinbase = + if let Some(vault_address) = self.chain_spec().chain_config().fee_vault_address { + vault_address + } else { + payload.payload.as_v1().fee_recipient + }; + + let block_env = BlockEnv { + number: U256::from(block_number), + beneficiary: coinbase, + timestamp: U256::from(timestamp), + difficulty: U256::ONE, + prevrandao: Some(B256::ZERO), + gas_limit: payload.payload.as_v1().gas_limit, + basefee: payload.payload.as_v1().base_fee_per_gas.to(), + blob_excess_gas_and_price: None, + }; + + EvmEnv { cfg_env, block_env } + } + + fn context_for_payload<'a>(&self, payload: &'a ExecutionData) -> ExecutionCtxFor<'a, Self> { + ScrollBlockExecutionCtx { parent_hash: payload.parent_hash() } + } + + fn tx_iterator_for_payload(&self, payload: &ExecutionData) -> impl ExecutableTxIterator { + payload.payload.transactions().clone().into_iter().map(|encoded| { + let tx = TxTy::::decode_2718_exact(encoded.as_ref()) + .map_err(AnyError::new)?; + let signer = tx.try_recover().map_err(AnyError::new)?; + Ok::<_, AnyError>(WithEncoded::new(encoded, tx.with_signer(signer))) + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/scroll/node/src/addons.rs b/crates/scroll/node/src/addons.rs index c5fc9d8ced8..9b2bce66377 100644 --- a/crates/scroll/node/src/addons.rs +++ b/crates/scroll/node/src/addons.rs @@ -1,12 +1,12 @@ use crate::{ - builder::payload::SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT, ScrollEngineValidator, - ScrollEngineValidatorBuilder, ScrollStorage, + builder::{engine::ScrollEngineValidatorBuilder, payload::SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT}, + ScrollStorage, }; -use reth_evm::{ConfigureEvm, EvmFactory, EvmFactoryFor}; -use reth_node_api::{AddOnsContext, NodeAddOns}; +use reth_evm::{ConfigureEngineEvm, EvmFactory, EvmFactoryFor}; +use reth_node_api::{AddOnsContext, NodeAddOns, PayloadTypes}; use reth_node_builder::{ rpc::{ - BasicEngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, EthApiBuilder, + BasicEngineApiBuilder, BasicEngineValidatorBuilder, EngineValidatorAddOn, EthApiBuilder, Identity, RethRpcAddOns, RethRpcMiddleware, RpcAddOns, RpcHandle, }, FullNodeComponents, @@ -23,14 +23,31 @@ use reth_scroll_rpc::{ }; use revm::context::TxEnv; use scroll_alloy_evm::ScrollTransactionIntoTxEnv; +use scroll_alloy_hardforks::ScrollHardforks; use scroll_alloy_network::Scroll; use std::marker::PhantomData; +/// Marker trait for Scroll node types with standard engine, chain spec, and primitives. +pub trait ScrollNodeTypes: + NodeTypes +{ +} + +/// Blanket impl for all node types that conform to the Scroll spec. +impl ScrollNodeTypes for N where + N: NodeTypes< + Payload = ScrollEngineTypes, + ChainSpec: ScrollHardforks, + Primitives = ScrollPrimitives, + > +{ +} + /// Add-ons for the Scroll follower node. #[derive(Debug)] pub struct ScrollAddOns where - N: FullNodeComponents, + N: FullNodeComponents, ScrollEthApiBuilder: EthApiBuilder, { /// Rpc add-ons responsible for launching the RPC servers and instantiating the RPC handlers @@ -40,13 +57,14 @@ where ScrollEthApiBuilder, ScrollEngineValidatorBuilder, BasicEngineApiBuilder, + BasicEngineValidatorBuilder, RpcMiddleWare, >, } impl Default for ScrollAddOns where - N: FullNodeComponents>, + N: FullNodeComponents, ScrollEthApiBuilder: EthApiBuilder, { fn default() -> Self { @@ -56,7 +74,7 @@ where impl ScrollAddOns where - N: FullNodeComponents>, + N: FullNodeComponents, ScrollEthApiBuilder: EthApiBuilder, { /// Build a [`ScrollAddOns`] using [`ScrollAddOnsBuilder`]. @@ -74,7 +92,10 @@ where Storage = ScrollStorage, Payload = ScrollEngineTypes, >, - Evm: ConfigureEvm, + Evm: ConfigureEngineEvm< + <::Payload as PayloadTypes>::ExecutionData, + NextBlockEnvCtx = ScrollNextBlockEnvAttributes, + >, >, ScrollEthApiError: FromEvmError, EvmFactoryFor: EvmFactory>, @@ -97,7 +118,10 @@ where Storage = ScrollStorage, Payload = ScrollEngineTypes, >, - Evm: ConfigureEvm, + Evm: ConfigureEngineEvm< + <::Payload as PayloadTypes>::ExecutionData, + NextBlockEnvCtx = ScrollNextBlockEnvAttributes, + >, >, ScrollEthApiError: FromEvmError, EvmFactoryFor: EvmFactory>, @@ -110,7 +134,7 @@ where } } -impl EngineValidatorAddOn for ScrollAddOns +impl EngineValidatorAddOn for ScrollAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -118,14 +142,17 @@ where Primitives = ScrollPrimitives, Payload = ScrollEngineTypes, >, + Evm: ConfigureEngineEvm< + <::Payload as PayloadTypes>::ExecutionData, + NextBlockEnvCtx = ScrollNextBlockEnvAttributes, + >, >, ScrollEthApiBuilder: EthApiBuilder, - RpcMiddleware: Send, { - type Validator = ScrollEngineValidator; + type ValidatorBuilder = BasicEngineValidatorBuilder; - async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { - ScrollEngineValidatorBuilder.build(ctx).await + fn engine_validator_builder(&self) -> Self::ValidatorBuilder { + EngineValidatorAddOn::engine_validator_builder(&self.rpc_add_ons) } } @@ -197,7 +224,7 @@ impl ScrollAddOnsBuilder { /// Builds an instance of [`ScrollAddOns`]. pub fn build(self) -> ScrollAddOns where - N: FullNodeComponents>, + N: FullNodeComponents, ScrollEthApiBuilder: EthApiBuilder, { let Self { @@ -214,8 +241,9 @@ impl ScrollAddOnsBuilder { .with_sequencer(sequencer_url) .with_payload_size_limit(payload_size_limit) .with_min_suggested_priority_fee(min_suggested_priority_fee), - Default::default(), - Default::default(), + ScrollEngineValidatorBuilder::default(), + BasicEngineApiBuilder::default(), + BasicEngineValidatorBuilder::default(), rpc_middleware, ), } diff --git a/crates/scroll/node/src/builder/engine.rs b/crates/scroll/node/src/builder/engine.rs index 97085fdecfb..244017f8e84 100644 --- a/crates/scroll/node/src/builder/engine.rs +++ b/crates/scroll/node/src/builder/engine.rs @@ -1,23 +1,22 @@ +use crate::addons::ScrollNodeTypes; +use std::sync::Arc; + use alloy_consensus::BlockHeader; use alloy_primitives::U256; use alloy_rpc_types_engine::{ExecutionData, PayloadError}; use reth_node_api::{ - InvalidPayloadAttributesError, MessageValidationKind, NewPayloadError, PayloadAttributes, - PayloadTypes, PayloadValidator, VersionSpecificValidationError, -}; -use reth_node_builder::{ - rpc::EngineValidatorBuilder, AddOnsContext, EngineApiMessageVersion, - EngineObjectValidationError, EngineTypes, EngineValidator, ExecutionPayload, - FullNodeComponents, PayloadOrAttributes, + AddOnsContext, EngineApiMessageVersion, EngineApiValidator, EngineObjectValidationError, + ExecutionPayload, FullNodeComponents, InvalidPayloadAttributesError, MessageValidationKind, + NewPayloadError, PayloadAttributes, PayloadOrAttributes, PayloadTypes, PayloadValidator, + VersionSpecificValidationError, }; +use reth_node_builder::rpc::PayloadValidatorBuilder; use reth_node_types::NodeTypes; use reth_primitives_traits::{Block, RecoveredBlock}; -use reth_scroll_chainspec::ScrollChainSpec; -use reth_scroll_engine_primitives::{try_into_block, ScrollEngineTypes}; -use reth_scroll_primitives::{ScrollBlock, ScrollPrimitives}; +use reth_scroll_engine_primitives::try_into_block; +use reth_scroll_primitives::ScrollBlock; use scroll_alloy_hardforks::ScrollHardforks; use scroll_alloy_rpc_types_engine::ScrollPayloadAttributes; -use std::sync::Arc; /// The block difficulty for in turn signing in the Clique consensus. const CLIQUE_IN_TURN_DIFFICULTY: U256 = U256::from_limbs([2, 0, 0, 0]); @@ -25,42 +24,38 @@ const CLIQUE_IN_TURN_DIFFICULTY: U256 = U256::from_limbs([2, 0, 0, 0]); const CLIQUE_NO_TURN_DIFFICULTY: U256 = U256::from_limbs([1, 0, 0, 0]); /// Builder for [`ScrollEngineValidator`]. -#[derive(Debug, Default, Clone, Copy)] +#[derive(Debug, Default, Clone)] +#[non_exhaustive] pub struct ScrollEngineValidatorBuilder; -impl EngineValidatorBuilder for ScrollEngineValidatorBuilder +impl PayloadValidatorBuilder for ScrollEngineValidatorBuilder where - Types: NodeTypes< - ChainSpec = ScrollChainSpec, - Primitives = ScrollPrimitives, - Payload = ScrollEngineTypes, - >, - Node: FullNodeComponents, + Node: FullNodeComponents, { - type Validator = ScrollEngineValidator; + type Validator = ScrollEngineValidator<::ChainSpec>; async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { - let chainspec = ctx.config.chain.clone(); - Ok(ScrollEngineValidator { chainspec }) + Ok(ScrollEngineValidator::new(ctx.config.chain.clone())) } } /// Scroll engine validator. #[derive(Debug, Clone)] -pub struct ScrollEngineValidator { - chainspec: Arc, +pub struct ScrollEngineValidator { + chainspec: Arc, } -impl ScrollEngineValidator { +impl ScrollEngineValidator { /// Returns a new [`ScrollEngineValidator`]. - pub const fn new(chainspec: Arc) -> Self { + pub const fn new(chainspec: Arc) -> Self { Self { chainspec } } } -impl EngineValidator for ScrollEngineValidator +impl EngineApiValidator for ScrollEngineValidator where - Types: EngineTypes, + Types: PayloadTypes, + CS: ScrollHardforks + Send + Sync + 'static, { fn validate_version_specific_fields( &self, @@ -114,9 +109,10 @@ fn validate_scroll_payload_or_attributes( Ok(()) } -impl PayloadValidator for ScrollEngineValidator +impl PayloadValidator for ScrollEngineValidator where Types: PayloadTypes, + CS: ScrollHardforks + Send + Sync + 'static, { type Block = ScrollBlock; diff --git a/crates/scroll/node/src/lib.rs b/crates/scroll/node/src/lib.rs index 099a252bde4..3189583edf0 100644 --- a/crates/scroll/node/src/lib.rs +++ b/crates/scroll/node/src/lib.rs @@ -6,7 +6,7 @@ pub use args::ScrollRollupArgs; mod builder; pub use builder::{ consensus::ScrollConsensusBuilder, - engine::{ScrollEngineValidator, ScrollEngineValidatorBuilder}, + engine::ScrollEngineValidator, execution::ScrollExecutorBuilder, network::{ScrollHeaderTransform, ScrollNetworkBuilder, ScrollNetworkPrimitives}, payload::ScrollPayloadBuilderBuilder, diff --git a/crates/stages/api/src/stage.rs b/crates/stages/api/src/stage.rs index e390f02e154..9fc3038c69c 100644 --- a/crates/stages/api/src/stage.rs +++ b/crates/stages/api/src/stage.rs @@ -111,7 +111,7 @@ impl ExecInput { // body. let end_block_body = provider .block_body_indices(end_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(target_block))?; + .ok_or(ProviderError::BlockBodyIndicesNotFound(end_block_number))?; (end_block_number, false, end_block_body.next_tx_num()) }; diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 50313f24d42..08e969c4793 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,5 +1,5 @@ use crate::stages::MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD; -use alloy_consensus::{BlockHeader, Header}; +use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use num_traits::Zero; use reth_config::config::ExecutionConfig; @@ -256,8 +256,9 @@ where + BlockReader< Block = ::Block, Header = ::BlockHeader, - > + StaticFileProviderFactory - + StatsReader + > + StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StatsReader + BlockHashReader + StateWriter::Receipt> + StateCommitmentProvider, @@ -560,12 +561,15 @@ where } } -fn execution_checkpoint( +fn execution_checkpoint( provider: &StaticFileProvider, start_block: BlockNumber, max_block: BlockNumber, checkpoint: StageCheckpoint, -) -> Result { +) -> Result +where + N: NodePrimitives, +{ Ok(match checkpoint.execution_stage_checkpoint() { // If checkpoint block range fully matches our range, // we take the previously used stage checkpoint as-is. @@ -628,10 +632,13 @@ fn execution_checkpoint( } /// Calculates the total amount of gas used from the headers in the given range. -pub fn calculate_gas_used_from_headers( +pub fn calculate_gas_used_from_headers( provider: &StaticFileProvider, range: RangeInclusive, -) -> Result { +) -> Result +where + N: NodePrimitives, +{ debug!(target: "sync::stages::execution", ?range, "Calculating gas used from headers"); let mut gas_total = 0; @@ -641,10 +648,10 @@ pub fn calculate_gas_used_from_headers( for entry in provider.fetch_range_iter( StaticFileSegment::Headers, *range.start()..*range.end() + 1, - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::>(number.into()), )? { - let Header { gas_used, .. } = entry?; - gas_total += gas_used; + let entry = entry?; + gas_total += entry.gas_used(); } let duration = start.elapsed(); diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index 7d5dd69d2bd..abaa12c6c6b 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -1,4 +1,4 @@ -use alloy_consensus::BlockHeader as _; +use alloy_consensus::{constants::KECCAK_EMPTY, BlockHeader as _}; use reth_codecs::Compact; use reth_db_api::{ tables, @@ -11,7 +11,7 @@ use reth_provider::{ }; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, MerkleCheckpoint, Stage, StageCheckpoint, - StageError, StageId, UnwindInput, UnwindOutput, + StageError, StageId, StorageRootMerkleCheckpoint, UnwindInput, UnwindOutput, }; use reth_trie::{IntermediateStateRootState, StateRoot, StateRootProgress, StoredSubNode}; use reth_trie_db::DatabaseStateRoot; @@ -281,12 +281,35 @@ where StateRootProgress::Progress(state, hashed_entries_walked, updates) => { provider.write_trie_updates(&updates)?; - let checkpoint = MerkleCheckpoint::new( + let mut checkpoint = MerkleCheckpoint::new( to_block, - state.last_account_key, - state.walker_stack.into_iter().map(StoredSubNode::from).collect(), - state.hash_builder.into(), + state.account_root_state.last_hashed_key, + state + .account_root_state + .walker_stack + .into_iter() + .map(StoredSubNode::from) + .collect(), + state.account_root_state.hash_builder.into(), ); + + // Save storage root state if present + if let Some(storage_state) = state.storage_root_state { + checkpoint.storage_root_checkpoint = + Some(StorageRootMerkleCheckpoint::new( + storage_state.state.last_hashed_key, + storage_state + .state + .walker_stack + .into_iter() + .map(StoredSubNode::from) + .collect(), + storage_state.state.hash_builder.into(), + storage_state.account.nonce, + storage_state.account.balance, + storage_state.account.bytecode_hash.unwrap_or(KECCAK_EMPTY), + )); + } self.save_execution_checkpoint(provider, Some(checkpoint))?; entities_checkpoint.processed += hashed_entries_walked as u64; diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index b73136d0922..e7210f05342 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -280,7 +280,7 @@ mod tests { for block in &blocks { let mut block_receipts = Vec::with_capacity(block.transaction_count()); for transaction in &block.body().transactions { - block_receipts.push((tx_num, random_receipt(&mut rng, transaction, Some(0)))); + block_receipts.push((tx_num, random_receipt(&mut rng, transaction, Some(0), None))); tx_num += 1; } receipts.push((block.number, block_receipts)); diff --git a/crates/stages/stages/src/stages/s3/downloader/meta.rs b/crates/stages/stages/src/stages/s3/downloader/meta.rs index 7ff4213fffc..dbe2a8a55a4 100644 --- a/crates/stages/stages/src/stages/s3/downloader/meta.rs +++ b/crates/stages/stages/src/stages/s3/downloader/meta.rs @@ -12,7 +12,7 @@ use tracing::info; pub struct Metadata { /// Total file size pub total_size: usize, - /// Total file size + /// Total downloaded bytes pub downloaded: usize, /// Download chunk size. Default 150MB. pub chunk_size: usize, @@ -141,7 +141,7 @@ impl MetadataBuilder { self } - /// Returns a [Metadata] if + /// Returns a [Metadata] if total size is valid pub fn build(&self) -> Result { match &self.total_size { Some(total_size) if *total_size > 0 => { @@ -173,7 +173,7 @@ impl MetadataBuilder { struct MetadataFile { /// Total file size total_size: usize, - /// Total file size + /// Total downloaded bytes downloaded: usize, /// Download chunk size. Default 150MB. chunk_size: usize, diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index 587d0508a29..61c399d9ac3 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -1,6 +1,6 @@ use super::StageId; use alloc::{format, string::String, vec::Vec}; -use alloy_primitives::{Address, BlockNumber, B256}; +use alloy_primitives::{Address, BlockNumber, B256, U256}; use core::ops::RangeInclusive; use reth_trie_common::{hash_builder::HashBuilderState, StoredSubNode}; @@ -15,6 +15,8 @@ pub struct MerkleCheckpoint { pub walker_stack: Vec, /// The hash builder state. pub state: HashBuilderState, + /// Optional storage root checkpoint for the last processed account. + pub storage_root_checkpoint: Option, } impl MerkleCheckpoint { @@ -25,7 +27,7 @@ impl MerkleCheckpoint { walker_stack: Vec, state: HashBuilderState, ) -> Self { - Self { target_block, last_account_key, walker_stack, state } + Self { target_block, last_account_key, walker_stack, state, storage_root_checkpoint: None } } } @@ -50,6 +52,22 @@ impl reth_codecs::Compact for MerkleCheckpoint { } len += self.state.to_compact(buf); + + // Encode the optional storage root checkpoint + match &self.storage_root_checkpoint { + Some(checkpoint) => { + // one means Some + buf.put_u8(1); + len += 1; + len += checkpoint.to_compact(buf); + } + None => { + // zero means None + buf.put_u8(0); + len += 1; + } + } + len } @@ -68,8 +86,133 @@ impl reth_codecs::Compact for MerkleCheckpoint { buf = rest; } - let (state, buf) = HashBuilderState::from_compact(buf, 0); - (Self { target_block, last_account_key, walker_stack, state }, buf) + let (state, mut buf) = HashBuilderState::from_compact(buf, 0); + + // Decode the storage root checkpoint if it exists + let (storage_root_checkpoint, buf) = if buf.is_empty() { + (None, buf) + } else { + match buf.get_u8() { + 1 => { + let (checkpoint, rest) = StorageRootMerkleCheckpoint::from_compact(buf, 0); + (Some(checkpoint), rest) + } + _ => (None, buf), + } + }; + + (Self { target_block, last_account_key, walker_stack, state, storage_root_checkpoint }, buf) + } +} + +/// Saves the progress of a storage root computation. +/// +/// This contains the walker stack, hash builder state, and the last storage key processed. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct StorageRootMerkleCheckpoint { + /// The last storage key processed. + pub last_storage_key: B256, + /// Previously recorded walker stack. + pub walker_stack: Vec, + /// The hash builder state. + pub state: HashBuilderState, + /// The account nonce. + pub account_nonce: u64, + /// The account balance. + pub account_balance: U256, + /// The account bytecode hash. + pub account_bytecode_hash: B256, +} + +impl StorageRootMerkleCheckpoint { + /// Creates a new storage root merkle checkpoint. + pub const fn new( + last_storage_key: B256, + walker_stack: Vec, + state: HashBuilderState, + account_nonce: u64, + account_balance: U256, + account_bytecode_hash: B256, + ) -> Self { + Self { + last_storage_key, + walker_stack, + state, + account_nonce, + account_balance, + account_bytecode_hash, + } + } +} + +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StorageRootMerkleCheckpoint { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let mut len = 0; + + buf.put_slice(self.last_storage_key.as_slice()); + len += self.last_storage_key.len(); + + buf.put_u16(self.walker_stack.len() as u16); + len += 2; + for item in &self.walker_stack { + len += item.to_compact(buf); + } + + len += self.state.to_compact(buf); + + // Encode account fields + buf.put_u64(self.account_nonce); + len += 8; + + let balance_len = self.account_balance.byte_len() as u8; + buf.put_u8(balance_len); + len += 1; + len += self.account_balance.to_compact(buf); + + buf.put_slice(self.account_bytecode_hash.as_slice()); + len += 32; + + len + } + + fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; + + let last_storage_key = B256::from_slice(&buf[..32]); + buf.advance(32); + + let walker_stack_len = buf.get_u16() as usize; + let mut walker_stack = Vec::with_capacity(walker_stack_len); + for _ in 0..walker_stack_len { + let (item, rest) = StoredSubNode::from_compact(buf, 0); + walker_stack.push(item); + buf = rest; + } + + let (state, mut buf) = HashBuilderState::from_compact(buf, 0); + + // Decode account fields + let account_nonce = buf.get_u64(); + let balance_len = buf.get_u8() as usize; + let (account_balance, mut buf) = U256::from_compact(buf, balance_len); + let account_bytecode_hash = B256::from_slice(&buf[..32]); + buf.advance(32); + + ( + Self { + last_storage_key, + walker_stack, + state, + account_nonce, + account_balance, + account_bytecode_hash, + }, + buf, + ) } } @@ -407,6 +550,7 @@ stage_unit_checkpoints!( #[cfg(test)] mod tests { use super::*; + use alloy_primitives::b256; use rand::Rng; use reth_codecs::Compact; @@ -422,6 +566,68 @@ mod tests { node: None, }], state: HashBuilderState::default(), + storage_root_checkpoint: None, + }; + + let mut buf = Vec::new(); + let encoded = checkpoint.to_compact(&mut buf); + let (decoded, _) = MerkleCheckpoint::from_compact(&buf, encoded); + assert_eq!(decoded, checkpoint); + } + + #[test] + fn storage_root_merkle_checkpoint_roundtrip() { + let mut rng = rand::rng(); + let checkpoint = StorageRootMerkleCheckpoint { + last_storage_key: rng.random(), + walker_stack: vec![StoredSubNode { + key: B256::random_with(&mut rng).to_vec(), + nibble: Some(rng.random()), + node: None, + }], + state: HashBuilderState::default(), + account_nonce: 0, + account_balance: U256::ZERO, + account_bytecode_hash: B256::ZERO, + }; + + let mut buf = Vec::new(); + let encoded = checkpoint.to_compact(&mut buf); + let (decoded, _) = StorageRootMerkleCheckpoint::from_compact(&buf, encoded); + assert_eq!(decoded, checkpoint); + } + + #[test] + fn merkle_checkpoint_with_storage_root_roundtrip() { + let mut rng = rand::rng(); + + // Create a storage root checkpoint + let storage_checkpoint = StorageRootMerkleCheckpoint { + last_storage_key: rng.random(), + walker_stack: vec![StoredSubNode { + key: B256::random_with(&mut rng).to_vec(), + nibble: Some(rng.random()), + node: None, + }], + state: HashBuilderState::default(), + account_nonce: 1, + account_balance: U256::from(1), + account_bytecode_hash: b256!( + "0x0fffffffffffffffffffffffffffffff0fffffffffffffffffffffffffffffff" + ), + }; + + // Create a merkle checkpoint with the storage root checkpoint + let checkpoint = MerkleCheckpoint { + target_block: rng.random(), + last_account_key: rng.random(), + walker_stack: vec![StoredSubNode { + key: B256::random_with(&mut rng).to_vec(), + nibble: Some(rng.random()), + node: None, + }], + state: HashBuilderState::default(), + storage_root_checkpoint: Some(storage_checkpoint), }; let mut buf = Vec::new(); diff --git a/crates/stages/types/src/lib.rs b/crates/stages/types/src/lib.rs index 13d59de3433..f6149d9eb07 100644 --- a/crates/stages/types/src/lib.rs +++ b/crates/stages/types/src/lib.rs @@ -19,7 +19,7 @@ mod checkpoints; pub use checkpoints::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, HeadersCheckpoint, IndexHistoryCheckpoint, MerkleCheckpoint, StageCheckpoint, - StageUnitCheckpoint, StorageHashingCheckpoint, + StageUnitCheckpoint, StorageHashingCheckpoint, StorageRootMerkleCheckpoint, }; mod execution; diff --git a/crates/stateless/src/validation.rs b/crates/stateless/src/validation.rs index 165deac1bb3..120273a7ebe 100644 --- a/crates/stateless/src/validation.rs +++ b/crates/stateless/src/validation.rs @@ -12,15 +12,16 @@ use alloc::{ vec::Vec, }; use alloy_consensus::{BlockHeader, Header}; -use alloy_primitives::B256; -use alloy_rlp::Decodable; +use alloy_primitives::{keccak256, B256}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::{Consensus, HeaderValidator}; use reth_errors::ConsensusError; use reth_ethereum_consensus::{validate_block_post_execution, EthBeaconConsensus}; use reth_ethereum_primitives::{Block, EthPrimitives}; use reth_evm::{execute::Executor, ConfigureEvm}; -use reth_primitives_traits::{block::error::BlockRecoveryError, Block as _, RecoveredBlock}; +use reth_primitives_traits::{ + block::error::BlockRecoveryError, Block as _, RecoveredBlock, SealedHeader, +}; use reth_trie_common::{HashedPostState, KeccakKeyHasher}; /// Errors that can occur during stateless validation. @@ -167,12 +168,13 @@ where .try_into_recovered() .map_err(|err| StatelessValidationError::SignerRecovery(Box::new(err)))?; - let mut ancestor_headers: Vec
= witness + let mut ancestor_headers: Vec<_> = witness .headers .iter() - .map(|serialized_header| { - let bytes = serialized_header.as_ref(); - Header::decode(&mut &bytes[..]) + .map(|bytes| { + let hash = keccak256(bytes); + alloy_rlp::decode_exact::
(bytes) + .map(|h| SealedHeader::new(h, hash)) .map_err(|_| StatelessValidationError::HeaderDeserializationFailed) }) .collect::>()?; @@ -180,25 +182,22 @@ where // ascending order. ancestor_headers.sort_by_key(|header| header.number()); - // Validate block against pre-execution consensus rules - validate_block_consensus(chain_spec.clone(), ¤t_block)?; - // Check that the ancestor headers form a contiguous chain and are not just random headers. let ancestor_hashes = compute_ancestor_hashes(¤t_block, &ancestor_headers)?; - // Get the last ancestor header and retrieve its state root. - // - // There should be at least one ancestor header, this is because we need the parent header to - // retrieve the previous state root. + // There should be at least one ancestor header. // The edge case here would be the genesis block, but we do not create proofs for the genesis // block. - let pre_state_root = match ancestor_headers.last() { - Some(prev_header) => prev_header.state_root, + let parent = match ancestor_headers.last() { + Some(prev_header) => prev_header, None => return Err(StatelessValidationError::MissingAncestorHeader), }; + // Validate block against pre-execution consensus rules + validate_block_consensus(chain_spec.clone(), ¤t_block, parent)?; + // First verify that the pre-state reads are correct - let (mut trie, bytecode) = T::new(&witness, pre_state_root)?; + let (mut trie, bytecode) = T::new(&witness, parent.state_root)?; // Create an in-memory database that will use the reads to validate the block let db = WitnessDatabase::new(&trie, bytecode, ancestor_hashes); @@ -231,17 +230,14 @@ where /// /// This function validates a block against Ethereum consensus rules by: /// -/// 1. **Difficulty Validation:** Validates the header with total difficulty to verify proof-of-work -/// (pre-merge) or to enforce post-merge requirements. -/// -/// 2. **Header Validation:** Validates the sealed header against protocol specifications, +/// 1. **Header Validation:** Validates the sealed header against protocol specifications, /// including: /// - Gas limit checks /// - Base fee validation for EIP-1559 /// - Withdrawals root validation for Shanghai fork /// - Blob-related fields validation for Cancun fork /// -/// 3. **Pre-Execution Validation:** Validates block structure, transaction format, signature +/// 2. **Pre-Execution Validation:** Validates block structure, transaction format, signature /// validity, and other pre-execution requirements. /// /// This function acts as a preliminary validation before executing and validating the state @@ -249,6 +245,7 @@ where fn validate_block_consensus( chain_spec: Arc, block: &RecoveredBlock, + parent: &SealedHeader
, ) -> Result<(), StatelessValidationError> where ChainSpec: Send + Sync + EthChainSpec
+ EthereumHardforks + Debug, @@ -256,6 +253,7 @@ where let consensus = EthBeaconConsensus::new(chain_spec); consensus.validate_header(block.sealed_header())?; + consensus.validate_header_against_parent(block.sealed_header(), parent)?; consensus.validate_block_pre_execution(block)?; @@ -277,18 +275,18 @@ where /// ancestor header to its corresponding block hash. fn compute_ancestor_hashes( current_block: &RecoveredBlock, - ancestor_headers: &[Header], + ancestor_headers: &[SealedHeader], ) -> Result, StatelessValidationError> { let mut ancestor_hashes = BTreeMap::new(); - let mut child_header = current_block.header(); + let mut child_header = current_block.sealed_header(); // Next verify that headers supplied are contiguous for parent_header in ancestor_headers.iter().rev() { let parent_hash = child_header.parent_hash(); ancestor_hashes.insert(parent_header.number, parent_hash); - if parent_hash != parent_header.hash_slow() { + if parent_hash != parent_header.hash() { return Err(StatelessValidationError::InvalidAncestorChain); // Blocks must be contiguous } diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 491419ef4b6..6e517a461f5 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -304,8 +304,10 @@ mod tests { let mut receipts = Vec::new(); for block in &blocks { for transaction in &block.body().transactions { - receipts - .push((receipts.len() as u64, random_receipt(&mut rng, transaction, Some(0)))); + receipts.push(( + receipts.len() as u64, + random_receipt(&mut rng, transaction, Some(0), None), + )); } } db.insert_receipts(receipts).expect("insert receipts"); diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 7ddcaaa01b8..a4122ebf5c0 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -22,6 +22,7 @@ reth-stages-types.workspace = true reth-fs-util.workspace = true reth-node-types.workspace = true reth-static-file-types.workspace = true +reth-execution-errors.workspace = true # eth alloy-consensus.workspace = true diff --git a/crates/storage/db-common/src/db_tool/mod.rs b/crates/storage/db-common/src/db_tool/mod.rs index 5866ad8ae2a..e9d7f81b0f6 100644 --- a/crates/storage/db-common/src/db_tool/mod.rs +++ b/crates/storage/db-common/src/db_tool/mod.rs @@ -185,7 +185,7 @@ pub struct ListFilter { impl ListFilter { /// If `search` has a list of bytes, then filter for rows that have this sequence. - pub fn has_search(&self) -> bool { + pub const fn has_search(&self) -> bool { !self.search.is_empty() } diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index a29d02a42c4..d28d9403312 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -8,6 +8,7 @@ use reth_codecs::Compact; use reth_config::config::EtlConfig; use reth_db_api::{tables, transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; +use reth_execution_errors::StateRootError; use reth_primitives_traits::{Account, Bytecode, GotExpected, NodePrimitives, StorageEntry}; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, writer::UnifiedStorageWriter, @@ -63,6 +64,9 @@ pub enum InitStorageError { /// Provider error. #[error(transparent)] Provider(#[from] ProviderError), + /// State root error while computing the state root + #[error(transparent)] + StateRootError(#[from] StateRootError), /// State root doesn't match the expected one. #[error("state root mismatch: {_0}")] StateRootMismatch(GotExpected), @@ -88,6 +92,7 @@ where + HeaderProvider + HashingWriter + StateWriter + + TrieWriter + AsRef, PF::ChainSpec: EthChainSpec
::BlockHeader>, { @@ -138,6 +143,9 @@ where insert_genesis_state(&provider_rw, alloc.iter())?; + // compute state root to populate trie tables + compute_state_root(&provider_rw)?; + // insert sync stage for stage in StageId::ALL { provider_rw.save_stage_checkpoint(stage, Default::default())?; @@ -552,7 +560,7 @@ where /// Computes the state root (from scratch) based on the accounts and storages present in the /// database. -fn compute_state_root(provider: &Provider) -> eyre::Result +fn compute_state_root(provider: &Provider) -> Result where Provider: DBProvider + TrieWriter, { @@ -572,7 +580,7 @@ where total_flushed_updates += updated_len; trace!(target: "reth::cli", - last_account_key = %state.last_account_key, + last_account_key = %state.account_root_state.last_hashed_key, updated_len, total_flushed_updates, "Flushing trie updates" diff --git a/crates/storage/db-models/src/client_version.rs b/crates/storage/db-models/src/client_version.rs index f6db3c071dc..ce6ced8a653 100644 --- a/crates/storage/db-models/src/client_version.rs +++ b/crates/storage/db-models/src/client_version.rs @@ -18,7 +18,7 @@ pub struct ClientVersion { impl ClientVersion { /// Returns `true` if no version fields are set. - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.version.is_empty() && self.git_sha.is_empty() && self.build_timestamp.is_empty() } } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 3234666e7c7..faa784de698 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -99,6 +99,9 @@ pub struct DatabaseArguments { /// /// This flag affects only at environment opening but can't be changed after. exclusive: Option, + /// MDBX allows up to 32767 readers (`MDBX_READERS_LIMIT`). This arg is to configure the max + /// readers. + max_readers: Option, } impl Default for DatabaseArguments { @@ -121,6 +124,7 @@ impl DatabaseArguments { log_level: None, max_read_transaction_duration: None, exclusive: None, + max_readers: None, } } @@ -169,6 +173,12 @@ impl DatabaseArguments { self } + /// Set `max_readers` flag. + pub const fn with_max_readers(mut self, max_readers: Option) -> Self { + self.max_readers = max_readers; + self + } + /// Returns the client version if any. pub const fn client_version(&self) -> &ClientVersion { &self.client_version @@ -375,7 +385,7 @@ impl DatabaseEnv { ..Default::default() }); // Configure more readers - inner_env.set_max_readers(DEFAULT_MAX_READERS); + inner_env.set_max_readers(args.max_readers.unwrap_or(DEFAULT_MAX_READERS)); // This parameter sets the maximum size of the "reclaimed list", and the unit of measurement // is "pages". Reclaimed list is the list of freed pages that's populated during the // lifetime of DB transaction, and through which MDBX searches when it needs to insert new diff --git a/crates/storage/errors/src/writer.rs b/crates/storage/errors/src/writer.rs index 3e060d7005d..52a5ba06e5e 100644 --- a/crates/storage/errors/src/writer.rs +++ b/crates/storage/errors/src/writer.rs @@ -2,7 +2,6 @@ use crate::db::DatabaseError; use reth_static_file_types::StaticFileSegment; /// `UnifiedStorageWriter` related errors -/// `StorageWriter` related errors #[derive(Clone, Debug, derive_more::Display, PartialEq, Eq, derive_more::Error)] pub enum UnifiedStorageWriterError { /// Database writer is missing diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/VERSION.json b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/VERSION.json index 25ba42aca58..534d22e15c6 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/VERSION.json +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/VERSION.json @@ -1 +1 @@ -{ "git_describe": "v0.13.6-0-ga971c76a", "git_timestamp": "2025-04-22T11:53:23+03:00", "git_tree": "4ca2c913e8614a1ed09512353faa227f25245e9f", "git_commit": "a971c76afffbb2ce0aa6151f4683b94fe10dc843", "semver": "0.13.6" } +{ "git_describe": "v0.13.7-0-g566b0f93", "git_timestamp": "2025-07-30T11:44:04+03:00", "git_tree": "7777cbdf5aa4c1ce85ff902a4c3e6170edd42495", "git_commit": "566b0f93c7c9a3bdffb8fb3dc0ce8ca42641bd72", "semver": "0.13.7" } diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 index f323b97de19..bc6de4b7758 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 @@ -27,7 +27,7 @@ mdbx_chk \- MDBX checking tool .SH DESCRIPTION The .B mdbx_chk -utility intended to check an MDBX database file. +utility is intended to check an MDBX database file. .SH OPTIONS .TP .BR \-V @@ -55,7 +55,7 @@ check, including full check of all meta-pages and actual size of database file. .BR \-w Open environment in read-write mode and lock for writing while checking. This could be impossible if environment already used by another process(s) -in an incompatible read-write mode. This allow rollback to last steady commit +in an incompatible read-write mode. This allows rollback to last steady commit (in case environment was not closed properly) and then check transaction IDs of meta-pages. Otherwise, without \fB\-w\fP option environment will be opened in read-only mode. @@ -90,7 +90,7 @@ then forcibly loads ones by sequential access and tries to lock database pages i .TP .BR \-n Open MDBX environment(s) which do not use subdirectories. -This is legacy option. For now MDBX handles this automatically. +This is a legacy option. For now MDBX handles this automatically. .SH DIAGNOSTICS Exit status is zero if no errors occur. Errors result in a non-zero exit status diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.c b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.c index 2e80d098da2..ae5de1be4c9 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.c +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.c @@ -4,7 +4,7 @@ #define xMDBX_ALLOY 1 /* alloyed build */ -#define MDBX_BUILD_SOURCERY 4df7f8f177aee7f9f94c4e72f0d732384e9a870d7d79b8142abdeb4633e710cd_v0_13_6_0_ga971c76a +#define MDBX_BUILD_SOURCERY 6b5df6869d2bf5419e3a8189d9cc849cc9911b9c8a951b9750ed0a261ce43724_v0_13_7_0_g566b0f93 #define LIBMDBX_INTERNALS #define MDBX_DEPRECATED @@ -132,6 +132,8 @@ #pragma warning(disable : 6235) /* is always a constant */ #pragma warning(disable : 6237) /* is never evaluated and might \ have side effects */ +#pragma warning(disable : 5286) /* implicit conversion from enum type 'type 1' to enum type 'type 2' */ +#pragma warning(disable : 5287) /* operands are different enum types 'type 1' and 'type 2' */ #endif #pragma warning(disable : 4710) /* 'xyz': function not inlined */ #pragma warning(disable : 4711) /* function 'xyz' selected for automatic \ @@ -441,11 +443,6 @@ __extern_C key_t ftok(const char *, int); #if __ANDROID_API__ >= 21 #include #endif -#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS -#error "_FILE_OFFSET_BITS != MDBX_WORDBITS" (_FILE_OFFSET_BITS != MDBX_WORDBITS) -#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS -#error "__FILE_OFFSET_BITS != MDBX_WORDBITS" (__FILE_OFFSET_BITS != MDBX_WORDBITS) -#endif #endif /* Android */ #if defined(HAVE_SYS_STAT_H) || __has_include() @@ -530,6 +527,12 @@ __extern_C key_t ftok(const char *, int); #endif #endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */ +#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) +#define MDBX_WORDBITS 64 +#else +#define MDBX_WORDBITS 32 +#endif /* MDBX_WORDBITS */ + /*----------------------------------------------------------------------------*/ /* Availability of CMOV or equivalent */ @@ -1192,7 +1195,14 @@ typedef struct osal_mmap { #elif defined(__ANDROID_API__) #if __ANDROID_API__ < 24 +/* https://android-developers.googleblog.com/2017/09/introducing-android-native-development.html + * https://android.googlesource.com/platform/bionic/+/master/docs/32-bit-abi.md */ #define MDBX_HAVE_PWRITEV 0 +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS +#error "_FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (_FILE_OFFSET_BITS != MDBX_WORDBITS) +#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS +#error "__FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (__FILE_OFFSET_BITS != MDBX_WORDBITS) +#endif #else #define MDBX_HAVE_PWRITEV 1 #endif @@ -1578,12 +1588,6 @@ MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t osal_bswap32 #endif } -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) -#define MDBX_WORDBITS 64 -#else -#define MDBX_WORDBITS 32 -#endif /* MDBX_WORDBITS */ - /******************************************************************************* ******************************************************************************* * @@ -5134,7 +5138,7 @@ MDBX_CONST_FUNCTION static inline lck_t *lckless_stub(const MDBX_env *env) { } #if !(defined(_WIN32) || defined(_WIN64)) -MDBX_MAYBE_UNUSED static inline int ignore_enosys(int err) { +MDBX_CONST_FUNCTION static inline int ignore_enosys(int err) { #ifdef ENOSYS if (err == ENOSYS) return MDBX_RESULT_TRUE; @@ -5155,10 +5159,21 @@ MDBX_MAYBE_UNUSED static inline int ignore_enosys(int err) { if (err == EOPNOTSUPP) return MDBX_RESULT_TRUE; #endif /* EOPNOTSUPP */ - if (err == EAGAIN) - return MDBX_RESULT_TRUE; return err; } + +MDBX_MAYBE_UNUSED MDBX_CONST_FUNCTION static inline int ignore_enosys_and_eagain(int err) { + return (err == EAGAIN) ? MDBX_RESULT_TRUE : ignore_enosys(err); +} + +MDBX_MAYBE_UNUSED MDBX_CONST_FUNCTION static inline int ignore_enosys_and_einval(int err) { + return (err == EINVAL) ? MDBX_RESULT_TRUE : ignore_enosys(err); +} + +MDBX_MAYBE_UNUSED MDBX_CONST_FUNCTION static inline int ignore_enosys_and_eremote(int err) { + return (err == MDBX_EREMOTE) ? MDBX_RESULT_TRUE : ignore_enosys(err); +} + #endif /* defined(_WIN32) || defined(_WIN64) */ static inline int check_env(const MDBX_env *env, const bool wanna_active) { @@ -7916,7 +7931,7 @@ __cold static int copy_asis(MDBX_env *env, MDBX_txn *txn, mdbx_filehandle_t fd, continue; } rc = MDBX_ENODATA; - if (written == 0 || ignore_enosys(rc = errno) != MDBX_RESULT_TRUE) + if (written == 0 || ignore_enosys_and_eagain(rc = errno) != MDBX_RESULT_TRUE) break; sendfile_unavailable = true; } @@ -7940,7 +7955,7 @@ __cold static int copy_asis(MDBX_env *env, MDBX_txn *txn, mdbx_filehandle_t fd, maybe useful for others FS */ EINVAL) not_the_same_filesystem = true; - else if (ignore_enosys(rc) == MDBX_RESULT_TRUE) + else if (ignore_enosys_and_eagain(rc) == MDBX_RESULT_TRUE) copyfilerange_unavailable = true; else break; @@ -8068,35 +8083,67 @@ __cold static int copy2pathname(MDBX_txn *txn, const pathchar_t *dest_path, MDBX S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP #endif ); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; #if defined(_WIN32) || defined(_WIN64) /* no locking required since the file opened with ShareMode == 0 */ #else - if (rc == MDBX_SUCCESS) { - MDBX_STRUCT_FLOCK lock_op; - memset(&lock_op, 0, sizeof(lock_op)); - lock_op.l_type = F_WRLCK; - lock_op.l_whence = SEEK_SET; - lock_op.l_start = 0; - lock_op.l_len = OFF_T_MAX; - if (MDBX_FCNTL(newfd, MDBX_F_SETLK, &lock_op)) - rc = errno; - } + MDBX_STRUCT_FLOCK lock_op; + memset(&lock_op, 0, sizeof(lock_op)); + lock_op.l_type = F_WRLCK; + lock_op.l_whence = SEEK_SET; + lock_op.l_start = 0; + lock_op.l_len = OFF_T_MAX; + const int err_fcntl = MDBX_FCNTL(newfd, MDBX_F_SETLK, &lock_op) ? errno : MDBX_SUCCESS; -#if defined(LOCK_EX) && (!defined(__ANDROID_API__) || __ANDROID_API__ >= 24) - if (rc == MDBX_SUCCESS && flock(newfd, LOCK_EX | LOCK_NB)) { - const int err_flock = errno, err_fs = osal_check_fs_local(newfd, 0); - if (err_flock != EAGAIN || err_fs != MDBX_EREMOTE) { - ERROR("%s flock(%" MDBX_PRIsPATH ") error %d, remote-fs check status %d", "unexpected", dest_path, err_flock, - err_fs); - rc = err_flock; - } else { - WARNING("%s flock(%" MDBX_PRIsPATH ") error %d, remote-fs check status %d", "ignore", dest_path, err_flock, - err_fs); + const int err_flock = +#ifdef LOCK_EX + flock(newfd, LOCK_EX | LOCK_NB) ? errno : MDBX_SUCCESS; +#else + MDBX_ENOSYS; +#endif /* LOCK_EX */ + + const int err_check_fs_local = + /* avoid call osal_check_fs_local() on success */ + (!err_fcntl && !err_flock && !MDBX_DEBUG) ? MDBX_SUCCESS : +#if !defined(__ANDROID_API__) || __ANDROID_API__ >= 24 + osal_check_fs_local(newfd, 0); +#else + MDBX_ENOSYS; +#endif + + const bool flock_may_fail = +#if defined(__linux__) || defined(__gnu_linux__) + err_check_fs_local != 0; +#else + true; +#endif /* Linux */ + + if (!err_fcntl && + (err_flock == EWOULDBLOCK || err_flock == EAGAIN || ignore_enosys_and_eremote(err_flock) == MDBX_RESULT_TRUE)) { + rc = err_flock; + if (flock_may_fail) { + WARNING("ignore %s(%" MDBX_PRIsPATH ") error %d: since %s done, local/remote-fs check %d", "flock", dest_path, + err_flock, "fcntl-lock", err_check_fs_local); + rc = MDBX_SUCCESS; } + } else if (!err_flock && err_check_fs_local == MDBX_RESULT_TRUE && + ignore_enosys_and_eremote(err_fcntl) == MDBX_RESULT_TRUE) { + WARNING("ignore %s(%" MDBX_PRIsPATH ") error %d: since %s done, local/remote-fs check %d", "fcntl-lock", dest_path, + err_fcntl, "flock", err_check_fs_local); + } else if (err_fcntl || err_flock) { + ERROR("file-lock(%" MDBX_PRIsPATH ") failed: fcntl-lock %d, flock %d, local/remote-fs check %d", dest_path, + err_fcntl, err_flock, err_check_fs_local); + if (err_fcntl == ENOLCK || err_flock == ENOLCK) + rc = ENOLCK; + else if (err_fcntl == EWOULDBLOCK || err_flock == EWOULDBLOCK) + rc = EWOULDBLOCK; + else if (EWOULDBLOCK != EAGAIN && (err_fcntl == EAGAIN || err_flock == EAGAIN)) + rc = EAGAIN; + else + rc = (err_fcntl && ignore_enosys_and_eremote(err_fcntl) != MDBX_RESULT_TRUE) ? err_fcntl : err_flock; } -#endif /* LOCK_EX && ANDROID_API >= 24 */ - #endif /* Windows / POSIX */ if (rc == MDBX_SUCCESS) @@ -13957,7 +14004,7 @@ __cold static void MDBX_PRINTF_ARGS(5, 6) issue->next = chk->usr->scope->issues; chk->usr->scope->issues = issue; } else - chk_error_rc(scope, ENOMEM, "adding issue"); + chk_error_rc(scope, MDBX_ENOMEM, "adding issue"); } va_list args; @@ -18491,7 +18538,7 @@ __noinline int dbi_import(MDBX_txn *txn, const size_t dbi) { /* dbi-слот еще не инициализирован в транзакции, а хендл не использовался */ txn->cursors[dbi] = nullptr; MDBX_txn *const parent = txn->parent; - if (parent) { + if (unlikely(parent)) { /* вложенная пишущая транзакция */ int rc = dbi_check(parent, dbi); /* копируем состояние table очищая new-флаги. */ @@ -18514,26 +18561,31 @@ __noinline int dbi_import(MDBX_txn *txn, const size_t dbi) { txn->dbi_state[dbi] = DBI_LINDO; } else { eASSERT(env, txn->dbi_seqs[dbi] != env->dbi_seqs[dbi].weak); - if (unlikely((txn->dbi_state[dbi] & (DBI_VALID | DBI_OLDEN)) || txn->cursors[dbi])) { + if (unlikely(txn->cursors[dbi])) { + /* хендл уже использовался в транзакции и остались висячие курсоры */ + txn->dbi_seqs[dbi] = env->dbi_seqs[dbi].weak; + txn->dbi_state[dbi] = DBI_OLDEN | DBI_LINDO; + return MDBX_DANGLING_DBI; + } + if (unlikely(txn->dbi_state[dbi] & (DBI_OLDEN | DBI_VALID))) { /* хендл уже использовался в транзакции, но был закрыт или переоткрыт, - * либо при явном пере-открытии хендла есть висячие курсоры */ - eASSERT(env, (txn->dbi_state[dbi] & DBI_STALE) == 0); + * висячих курсоров нет */ txn->dbi_seqs[dbi] = env->dbi_seqs[dbi].weak; txn->dbi_state[dbi] = DBI_OLDEN | DBI_LINDO; - return txn->cursors[dbi] ? MDBX_DANGLING_DBI : MDBX_BAD_DBI; + return MDBX_BAD_DBI; } } /* хендл не использовался в транзакции, либо явно пере-отрывается при * отсутствии висячих курсоров */ - eASSERT(env, (txn->dbi_state[dbi] & DBI_LINDO) && !txn->cursors[dbi]); + eASSERT(env, (txn->dbi_state[dbi] & (DBI_LINDO | DBI_VALID)) == DBI_LINDO && !txn->cursors[dbi]); /* читаем актуальные флаги и sequence */ struct dbi_snap_result snap = dbi_snap(env, dbi); txn->dbi_seqs[dbi] = snap.sequence; if (snap.flags & DB_VALID) { txn->dbs[dbi].flags = snap.flags & DB_PERSISTENT_FLAGS; - txn->dbi_state[dbi] = DBI_LINDO | DBI_VALID | DBI_STALE; + txn->dbi_state[dbi] = (dbi >= CORE_DBS) ? DBI_LINDO | DBI_VALID | DBI_STALE : DBI_LINDO | DBI_VALID; return MDBX_SUCCESS; } return MDBX_BAD_DBI; @@ -18787,7 +18839,7 @@ static int dbi_open_locked(MDBX_txn *txn, unsigned user_flags, MDBX_dbi *dbi, MD slot = (slot < scan) ? slot : scan; continue; } - if (!env->kvs[MAIN_DBI].clc.k.cmp(&name, &env->kvs[scan].name)) { + if (env->kvs[MAIN_DBI].clc.k.cmp(&name, &env->kvs[scan].name) == 0) { slot = scan; int err = dbi_check(txn, slot); if (err == MDBX_BAD_DBI && txn->dbi_state[slot] == (DBI_OLDEN | DBI_LINDO)) { @@ -18943,54 +18995,68 @@ int dbi_open(MDBX_txn *txn, const MDBX_val *const name, unsigned user_flags, MDB #if MDBX_ENABLE_DBI_LOCKFREE /* Is the DB already open? */ const MDBX_env *const env = txn->env; - size_t free_slot = env->n_dbi; + bool have_free_slot = env->n_dbi < env->max_dbi; for (size_t i = CORE_DBS; i < env->n_dbi; ++i) { - retry: if ((env->dbs_flags[i] & DB_VALID) == 0) { - free_slot = i; + have_free_slot = true; continue; } - const uint32_t snap_seq = atomic_load32(&env->dbi_seqs[i], mo_AcquireRelease); - const uint16_t snap_flags = env->dbs_flags[i]; + struct dbi_snap_result snap = dbi_snap(env, i); const MDBX_val snap_name = env->kvs[i].name; - if (user_flags != MDBX_ACCEDE && - (((user_flags ^ snap_flags) & DB_PERSISTENT_FLAGS) || (keycmp && keycmp != env->kvs[i].clc.k.cmp) || - (datacmp && datacmp != env->kvs[i].clc.v.cmp))) - continue; const uint32_t main_seq = atomic_load32(&env->dbi_seqs[MAIN_DBI], mo_AcquireRelease); MDBX_cmp_func *const snap_cmp = env->kvs[MAIN_DBI].clc.k.cmp; - if (unlikely(!(snap_flags & DB_VALID) || !snap_name.iov_base || !snap_name.iov_len || !snap_cmp)) - continue; + if (unlikely(!(snap.flags & DB_VALID) || !snap_name.iov_base || !snap_name.iov_len || !snap_cmp)) + /* похоже на столкновение с параллельно работающим обновлением */ + goto slowpath_locking; const bool name_match = snap_cmp(&snap_name, name) == 0; - osal_flush_incoherent_cpu_writeback(); - if (unlikely(snap_seq != atomic_load32(&env->dbi_seqs[i], mo_AcquireRelease) || + if (unlikely(snap.sequence != atomic_load32(&env->dbi_seqs[i], mo_AcquireRelease) || main_seq != atomic_load32(&env->dbi_seqs[MAIN_DBI], mo_AcquireRelease) || - snap_flags != env->dbs_flags[i] || snap_name.iov_base != env->kvs[i].name.iov_base || + snap.flags != env->dbs_flags[i] || snap_name.iov_base != env->kvs[i].name.iov_base || snap_name.iov_len != env->kvs[i].name.iov_len)) - goto retry; - if (name_match) { + /* похоже на столкновение с параллельно работающим обновлением */ + goto slowpath_locking; + + if (!name_match) + continue; + + osal_flush_incoherent_cpu_writeback(); + if (user_flags != MDBX_ACCEDE && + (((user_flags ^ snap.flags) & DB_PERSISTENT_FLAGS) || (keycmp && keycmp != env->kvs[i].clc.k.cmp) || + (datacmp && datacmp != env->kvs[i].clc.v.cmp))) + /* есть подозрение что пользователь открывает таблицу с другими флагами/атрибутами + * или другими компараторами, поэтому уходим в безопасный режим */ + goto slowpath_locking; + + rc = dbi_check(txn, i); + if (rc == MDBX_BAD_DBI && txn->dbi_state[i] == (DBI_OLDEN | DBI_LINDO)) { + /* хендл использовался, стал невалидным, + * но теперь явно пере-открывается в этой транзакци */ + eASSERT(env, !txn->cursors[i]); + txn->dbi_state[i] = DBI_LINDO; rc = dbi_check(txn, i); - if (rc == MDBX_BAD_DBI && txn->dbi_state[i] == (DBI_OLDEN | DBI_LINDO)) { - /* хендл использовался, стал невалидным, - * но теперь явно пере-открывается в этой транзакци */ - eASSERT(env, !txn->cursors[i]); - txn->dbi_state[i] = DBI_LINDO; - rc = dbi_check(txn, i); - } - if (likely(rc == MDBX_SUCCESS)) { - rc = dbi_bind(txn, i, user_flags, keycmp, datacmp); - if (likely(rc == MDBX_SUCCESS)) - *dbi = (MDBX_dbi)i; - } - return rc; } + if (likely(rc == MDBX_SUCCESS)) { + if (unlikely(snap.sequence != atomic_load32(&env->dbi_seqs[i], mo_AcquireRelease) || + main_seq != atomic_load32(&env->dbi_seqs[MAIN_DBI], mo_AcquireRelease) || + snap.flags != env->dbs_flags[i] || snap_name.iov_base != env->kvs[i].name.iov_base || + snap_name.iov_len != env->kvs[i].name.iov_len)) + /* похоже на столкновение с параллельно работающим обновлением */ + goto slowpath_locking; + rc = dbi_bind(txn, i, user_flags, keycmp, datacmp); + if (likely(rc == MDBX_SUCCESS)) + *dbi = (MDBX_dbi)i; + } + return rc; } /* Fail, if no free slot and max hit */ - if (unlikely(free_slot >= env->max_dbi)) + if (unlikely(!have_free_slot)) return MDBX_DBS_FULL; + +slowpath_locking: + #endif /* MDBX_ENABLE_DBI_LOCKFREE */ rc = osal_fastmutex_acquire(&txn->env->dbi_lock); @@ -19821,13 +19887,14 @@ __cold int dxb_resize(MDBX_env *const env, const pgno_t used_pgno, const pgno_t rc = MDBX_RESULT_TRUE; #if defined(MADV_REMOVE) if (env->flags & MDBX_WRITEMAP) - rc = madvise(ptr_disp(env->dxb_mmap.base, size_bytes), prev_size - size_bytes, MADV_REMOVE) ? ignore_enosys(errno) - : MDBX_SUCCESS; + rc = madvise(ptr_disp(env->dxb_mmap.base, size_bytes), prev_size - size_bytes, MADV_REMOVE) + ? ignore_enosys_and_eagain(errno) + : MDBX_SUCCESS; #endif /* MADV_REMOVE */ #if defined(MADV_DONTNEED) if (rc == MDBX_RESULT_TRUE) rc = madvise(ptr_disp(env->dxb_mmap.base, size_bytes), prev_size - size_bytes, MADV_DONTNEED) - ? ignore_enosys(errno) + ? ignore_enosys_and_eagain(errno) : MDBX_SUCCESS; #elif defined(POSIX_MADV_DONTNEED) if (rc == MDBX_RESULT_TRUE) @@ -20013,7 +20080,7 @@ __cold int dxb_set_readahead(const MDBX_env *env, const pgno_t edge, const bool void *const ptr = ptr_disp(env->dxb_mmap.base, offset); if (enable) { #if defined(MADV_NORMAL) - err = madvise(ptr, length, MADV_NORMAL) ? ignore_enosys(errno) : MDBX_SUCCESS; + err = madvise(ptr, length, MADV_NORMAL) ? ignore_enosys_and_eagain(errno) : MDBX_SUCCESS; if (unlikely(MDBX_IS_ERROR(err))) return err; #elif defined(POSIX_MADV_NORMAL) @@ -20041,7 +20108,7 @@ __cold int dxb_set_readahead(const MDBX_env *env, const pgno_t edge, const bool hint.ra_count = unlikely(length > INT_MAX && sizeof(length) > sizeof(hint.ra_count)) ? INT_MAX : (int)length; (void)/* Ignore ENOTTY for DB on the ram-disk and so on */ fcntl(env->lazy_fd, F_RDADVISE, &hint); #elif defined(MADV_WILLNEED) - err = madvise(ptr, length, MADV_WILLNEED) ? ignore_enosys(errno) : MDBX_SUCCESS; + err = madvise(ptr, length, MADV_WILLNEED) ? ignore_enosys_and_eagain(errno) : MDBX_SUCCESS; if (unlikely(MDBX_IS_ERROR(err))) return err; #elif defined(POSIX_MADV_WILLNEED) @@ -20066,7 +20133,7 @@ __cold int dxb_set_readahead(const MDBX_env *env, const pgno_t edge, const bool } else { mincore_clean_cache(env); #if defined(MADV_RANDOM) - err = madvise(ptr, length, MADV_RANDOM) ? ignore_enosys(errno) : MDBX_SUCCESS; + err = madvise(ptr, length, MADV_RANDOM) ? ignore_enosys_and_eagain(errno) : MDBX_SUCCESS; if (unlikely(MDBX_IS_ERROR(err))) return err; #elif defined(POSIX_MADV_RANDOM) @@ -20273,14 +20340,16 @@ __cold int dxb_setup(MDBX_env *env, const int lck_rc, const mdbx_mode_t mode_bit return err; #if defined(MADV_DONTDUMP) - err = madvise(env->dxb_mmap.base, env->dxb_mmap.limit, MADV_DONTDUMP) ? ignore_enosys(errno) : MDBX_SUCCESS; + err = + madvise(env->dxb_mmap.base, env->dxb_mmap.limit, MADV_DONTDUMP) ? ignore_enosys_and_eagain(errno) : MDBX_SUCCESS; if (unlikely(MDBX_IS_ERROR(err))) return err; #endif /* MADV_DONTDUMP */ #if defined(MADV_DODUMP) if (globals.runtime_flags & MDBX_DBG_DUMP) { const size_t meta_length_aligned2os = pgno_align2os_bytes(env, NUM_METAS); - err = madvise(env->dxb_mmap.base, meta_length_aligned2os, MADV_DODUMP) ? ignore_enosys(errno) : MDBX_SUCCESS; + err = madvise(env->dxb_mmap.base, meta_length_aligned2os, MADV_DODUMP) ? ignore_enosys_and_eagain(errno) + : MDBX_SUCCESS; if (unlikely(MDBX_IS_ERROR(err))) return err; } @@ -20519,7 +20588,7 @@ __cold int dxb_setup(MDBX_env *env, const int lck_rc, const mdbx_mode_t mode_bit bytes2pgno(env, env->dxb_mmap.current)); err = madvise(ptr_disp(env->dxb_mmap.base, used_aligned2os_bytes), env->dxb_mmap.current - used_aligned2os_bytes, MADV_REMOVE) - ? ignore_enosys(errno) + ? ignore_enosys_and_eagain(errno) : MDBX_SUCCESS; if (unlikely(MDBX_IS_ERROR(err))) return err; @@ -20529,7 +20598,7 @@ __cold int dxb_setup(MDBX_env *env, const int lck_rc, const mdbx_mode_t mode_bit NOTICE("open-MADV_%s %u..%u", "DONTNEED", env->lck->discarded_tail.weak, bytes2pgno(env, env->dxb_mmap.current)); err = madvise(ptr_disp(env->dxb_mmap.base, used_aligned2os_bytes), env->dxb_mmap.current - used_aligned2os_bytes, MADV_DONTNEED) - ? ignore_enosys(errno) + ? ignore_enosys_and_eagain(errno) : MDBX_SUCCESS; if (unlikely(MDBX_IS_ERROR(err))) return err; @@ -20621,7 +20690,7 @@ int dxb_sync_locked(MDBX_env *env, unsigned flags, meta_t *const pending, troika #endif /* MADV_FREE */ int err = madvise(ptr_disp(env->dxb_mmap.base, discard_edge_bytes), prev_discarded_bytes - discard_edge_bytes, advise) - ? ignore_enosys(errno) + ? ignore_enosys_and_eagain(errno) : MDBX_SUCCESS; #else int err = ignore_enosys(posix_madvise(ptr_disp(env->dxb_mmap.base, discard_edge_bytes), @@ -22394,10 +22463,8 @@ pgr_t gc_alloc_ex(const MDBX_cursor *const mc, const size_t num, uint8_t flags) //--------------------------------------------------------------------------- - if (unlikely(!is_gc_usable(txn, mc, flags))) { - eASSERT(env, (txn->flags & txn_gc_drained) || num > 1); + if (unlikely(!is_gc_usable(txn, mc, flags))) goto no_gc; - } eASSERT(env, (flags & (ALLOC_COALESCE | ALLOC_LIFO | ALLOC_SHOULD_SCAN)) == 0); flags += (env->flags & MDBX_LIFORECLAIM) ? ALLOC_LIFO : 0; @@ -24391,10 +24458,11 @@ __cold static void choice_fcntl(void) { static int lck_op(const mdbx_filehandle_t fd, int cmd, const int lck, const off_t offset, off_t len) { STATIC_ASSERT(sizeof(off_t) >= sizeof(void *) && sizeof(off_t) >= sizeof(size_t)); -#ifdef __ANDROID_API__ - STATIC_ASSERT_MSG((sizeof(off_t) * 8 == MDBX_WORDBITS), "The bitness of system `off_t` type is mismatch. Please " - "fix build and/or NDK configuration."); -#endif /* Android */ +#if defined(__ANDROID_API__) && __ANDROID_API__ < 24 + STATIC_ASSERT_MSG((sizeof(off_t) * CHAR_BIT == MDBX_WORDBITS), + "The bitness of system `off_t` type is mismatch. Please " + "fix build and/or NDK configuration."); +#endif /* Android && API < 24 */ assert(offset >= 0 && len > 0); assert((uint64_t)offset < (uint64_t)INT64_MAX && (uint64_t)len < (uint64_t)INT64_MAX && (uint64_t)(offset + len) > (uint64_t)offset); @@ -24430,7 +24498,8 @@ static int lck_op(const mdbx_filehandle_t fd, int cmd, const int lck, const off_ } rc = errno; #if MDBX_USE_OFDLOCKS - if (rc == EINVAL && (cmd == MDBX_F_OFD_SETLK || cmd == MDBX_F_OFD_SETLKW || cmd == MDBX_F_OFD_GETLK)) { + if (ignore_enosys_and_einval(rc) == MDBX_RESULT_TRUE && + (cmd == MDBX_F_OFD_SETLK || cmd == MDBX_F_OFD_SETLKW || cmd == MDBX_F_OFD_GETLK)) { /* fallback to non-OFD locks */ if (cmd == MDBX_F_OFD_SETLK) cmd = MDBX_F_SETLK; @@ -24758,6 +24827,10 @@ __cold MDBX_INTERNAL int lck_destroy(MDBX_env *env, MDBX_env *inprocess_neighbor jitter4testing(false); } +#if MDBX_LOCKING == MDBX_LOCKING_SYSV + env->me_sysv_ipc.semid = -1; +#endif /* MDBX_LOCKING */ + if (current_pid != env->pid) { eASSERT(env, !inprocess_neighbor); NOTICE("drown env %p after-fork pid %d -> %d", __Wpedantic_format_voidptr(env), env->pid, current_pid); @@ -25074,14 +25147,14 @@ static int osal_ipclock_lock(MDBX_env *env, osal_ipclock_t *ipc, const bool dont return rc; } -int osal_ipclock_unlock(MDBX_env *env, osal_ipclock_t *ipc) { +static int osal_ipclock_unlock(MDBX_env *env, osal_ipclock_t *ipc) { int err = MDBX_ENOSYS; #if MDBX_LOCKING == MDBX_LOCKING_POSIX2001 || MDBX_LOCKING == MDBX_LOCKING_POSIX2008 err = pthread_mutex_unlock(ipc); #elif MDBX_LOCKING == MDBX_LOCKING_POSIX1988 err = sem_post(ipc) ? errno : MDBX_SUCCESS; #elif MDBX_LOCKING == MDBX_LOCKING_SYSV - if (unlikely(*ipc != (pid_t)env->pid)) + if (unlikely(*ipc != (pid_t)env->pid || env->me_sysv_ipc.key == -1)) err = EPERM; else { *ipc = 0; @@ -25121,7 +25194,6 @@ MDBX_INTERNAL void lck_rdt_unlock(MDBX_env *env) { int lck_txn_lock(MDBX_env *env, bool dont_wait) { TRACE("%swait %s", dont_wait ? "dont-" : "", ">>"); - eASSERT(env, env->basal_txn || (env->lck == lckless_stub(env) && (env->flags & MDBX_RDONLY))); jitter4testing(true); const int err = osal_ipclock_lock(env, &env->lck->wrt_lock, dont_wait); int rc = err; @@ -25139,10 +25211,8 @@ int lck_txn_lock(MDBX_env *env, bool dont_wait) { void lck_txn_unlock(MDBX_env *env) { TRACE("%s", ">>"); if (env->basal_txn) { - eASSERT(env, !env->basal_txn || env->basal_txn->owner == osal_thread_self()); + eASSERT(env, env->basal_txn->owner == osal_thread_self()); env->basal_txn->owner = 0; - } else { - eASSERT(env, env->lck == lckless_stub(env) && (env->flags & MDBX_RDONLY)); } int err = osal_ipclock_unlock(env, &env->lck->wrt_lock); TRACE("<< err %d", err); @@ -25239,7 +25309,7 @@ int lck_txn_lock(MDBX_env *env, bool dontwait) { } } - eASSERT(env, !env->basal_txn->owner); + eASSERT(env, !env->basal_txn || !env->basal_txn->owner); if (env->flags & MDBX_EXCLUSIVE) goto done; @@ -25256,10 +25326,11 @@ int lck_txn_lock(MDBX_env *env, bool dontwait) { } if (rc == MDBX_SUCCESS) { done: + if (env->basal_txn) + env->basal_txn->owner = osal_thread_self(); /* Zap: Failing to release lock 'env->windowsbug_lock' * in function 'mdbx_txn_lock' */ MDBX_SUPPRESS_GOOFY_MSVC_ANALYZER(26115); - env->basal_txn->owner = osal_thread_self(); return MDBX_SUCCESS; } @@ -25268,14 +25339,15 @@ int lck_txn_lock(MDBX_env *env, bool dontwait) { } void lck_txn_unlock(MDBX_env *env) { - eASSERT(env, env->basal_txn->owner == osal_thread_self()); + eASSERT(env, !env->basal_txn || env->basal_txn->owner == osal_thread_self()); if ((env->flags & MDBX_EXCLUSIVE) == 0) { const HANDLE fd4data = env->ioring.overlapped_fd ? env->ioring.overlapped_fd : env->lazy_fd; int err = funlock(fd4data, DXB_BODY); if (err != MDBX_SUCCESS) mdbx_panic("%s failed: err %u", __func__, err); } - env->basal_txn->owner = 0; + if (env->basal_txn) + env->basal_txn->owner = 0; LeaveCriticalSection(&env->windowsbug_lock); } @@ -25824,13 +25896,13 @@ __cold static int lck_setup_locked(MDBX_env *env) { return err; #ifdef MADV_DODUMP - err = madvise(env->lck_mmap.lck, size, MADV_DODUMP) ? ignore_enosys(errno) : MDBX_SUCCESS; + err = madvise(env->lck_mmap.lck, size, MADV_DODUMP) ? ignore_enosys_and_eagain(errno) : MDBX_SUCCESS; if (unlikely(MDBX_IS_ERROR(err))) return err; #endif /* MADV_DODUMP */ #ifdef MADV_WILLNEED - err = madvise(env->lck_mmap.lck, size, MADV_WILLNEED) ? ignore_enosys(errno) : MDBX_SUCCESS; + err = madvise(env->lck_mmap.lck, size, MADV_WILLNEED) ? ignore_enosys_and_eagain(errno) : MDBX_SUCCESS; if (unlikely(MDBX_IS_ERROR(err))) return err; #elif defined(POSIX_MADV_WILLNEED) @@ -36483,7 +36555,7 @@ int txn_renew(MDBX_txn *txn, unsigned flags) { txn->dbi_seqs[FREE_DBI] = 0; txn->dbi_seqs[MAIN_DBI] = atomic_load32(&env->dbi_seqs[MAIN_DBI], mo_AcquireRelease); - if (unlikely(env->dbs_flags[MAIN_DBI] != (DB_VALID | txn->dbs[MAIN_DBI].flags))) { + if (unlikely(env->dbs_flags[MAIN_DBI] != (DB_VALID | txn->dbs[MAIN_DBI].flags) || !txn->dbi_seqs[MAIN_DBI])) { const bool need_txn_lock = env->basal_txn && env->basal_txn->owner != osal_thread_self(); bool should_unlock = false; if (need_txn_lock) { @@ -36495,24 +36567,24 @@ int txn_renew(MDBX_txn *txn, unsigned flags) { } rc = osal_fastmutex_acquire(&env->dbi_lock); if (likely(rc == MDBX_SUCCESS)) { - uint32_t seq = dbi_seq_next(env, MAIN_DBI); /* проверяем повторно после захвата блокировки */ + uint32_t seq = atomic_load32(&env->dbi_seqs[MAIN_DBI], mo_AcquireRelease); if (env->dbs_flags[MAIN_DBI] != (DB_VALID | txn->dbs[MAIN_DBI].flags)) { - if (!need_txn_lock || should_unlock || - /* если нет активной пишущей транзакции, - * то следующая будет ждать на dbi_lock */ - !env->txn) { - if (env->dbs_flags[MAIN_DBI] != 0 || MDBX_DEBUG) + if (!(env->dbs_flags[MAIN_DBI] & DB_VALID) || !need_txn_lock || should_unlock || + /* если нет активной пишущей транзакции, * то следующая будет ждать на dbi_lock */ !env->txn) { + if (env->dbs_flags[MAIN_DBI] & DB_VALID) { NOTICE("renew MainDB for %s-txn %" PRIaTXN " since db-flags changes 0x%x -> 0x%x", (txn->flags & MDBX_TXN_RDONLY) ? "ro" : "rw", txn->txnid, env->dbs_flags[MAIN_DBI] & ~DB_VALID, txn->dbs[MAIN_DBI].flags); - env->dbs_flags[MAIN_DBI] = DB_POISON; - atomic_store32(&env->dbi_seqs[MAIN_DBI], seq, mo_AcquireRelease); + seq = dbi_seq_next(env, MAIN_DBI); + env->dbs_flags[MAIN_DBI] = DB_POISON; + atomic_store32(&env->dbi_seqs[MAIN_DBI], seq, mo_AcquireRelease); + } rc = tbl_setup(env, &env->kvs[MAIN_DBI], &txn->dbs[MAIN_DBI]); if (likely(rc == MDBX_SUCCESS)) { seq = dbi_seq_next(env, MAIN_DBI); env->dbs_flags[MAIN_DBI] = DB_VALID | txn->dbs[MAIN_DBI].flags; - txn->dbi_seqs[MAIN_DBI] = atomic_store32(&env->dbi_seqs[MAIN_DBI], seq, mo_AcquireRelease); + atomic_store32(&env->dbi_seqs[MAIN_DBI], seq, mo_AcquireRelease); } } else { ERROR("MainDB db-flags changes 0x%x -> 0x%x ahead of read-txn " @@ -36521,6 +36593,7 @@ int txn_renew(MDBX_txn *txn, unsigned flags) { rc = MDBX_INCOMPATIBLE; } } + txn->dbi_seqs[MAIN_DBI] = seq; ENSURE(env, osal_fastmutex_release(&env->dbi_lock) == MDBX_SUCCESS); } else { DEBUG("dbi_lock failed, err %d", rc); @@ -37378,11 +37451,11 @@ __dll_export const struct MDBX_version_info mdbx_version = { 0, 13, - 6, + 7, 0, "", /* pre-release suffix of SemVer - 0.13.6 */ - {"2025-04-22T11:53:23+03:00", "4ca2c913e8614a1ed09512353faa227f25245e9f", "a971c76afffbb2ce0aa6151f4683b94fe10dc843", "v0.13.6-0-ga971c76a"}, + 0.13.7 */ + {"2025-07-30T11:44:04+03:00", "7777cbdf5aa4c1ce85ff902a4c3e6170edd42495", "566b0f93c7c9a3bdffb8fb3dc0ce8ca42641bd72", "v0.13.7-0-g566b0f93"}, sourcery}; __dll_export diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.c++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.c++ index a6ccd34274d..27220d53088 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.c++ +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.c++ @@ -2,7 +2,7 @@ /// \author Леонид Юрьев aka Leonid Yuriev \date 2015-2025 /* clang-format off */ -#define MDBX_BUILD_SOURCERY 4df7f8f177aee7f9f94c4e72f0d732384e9a870d7d79b8142abdeb4633e710cd_v0_13_6_0_ga971c76a +#define MDBX_BUILD_SOURCERY 6b5df6869d2bf5419e3a8189d9cc849cc9911b9c8a951b9750ed0a261ce43724_v0_13_7_0_g566b0f93 #define LIBMDBX_INTERNALS #define MDBX_DEPRECATED @@ -130,6 +130,8 @@ #pragma warning(disable : 6235) /* is always a constant */ #pragma warning(disable : 6237) /* is never evaluated and might \ have side effects */ +#pragma warning(disable : 5286) /* implicit conversion from enum type 'type 1' to enum type 'type 2' */ +#pragma warning(disable : 5287) /* operands are different enum types 'type 1' and 'type 2' */ #endif #pragma warning(disable : 4710) /* 'xyz': function not inlined */ #pragma warning(disable : 4711) /* function 'xyz' selected for automatic \ @@ -439,11 +441,6 @@ __extern_C key_t ftok(const char *, int); #if __ANDROID_API__ >= 21 #include #endif -#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS -#error "_FILE_OFFSET_BITS != MDBX_WORDBITS" (_FILE_OFFSET_BITS != MDBX_WORDBITS) -#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS -#error "__FILE_OFFSET_BITS != MDBX_WORDBITS" (__FILE_OFFSET_BITS != MDBX_WORDBITS) -#endif #endif /* Android */ #if defined(HAVE_SYS_STAT_H) || __has_include() @@ -528,6 +525,12 @@ __extern_C key_t ftok(const char *, int); #endif #endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */ +#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) +#define MDBX_WORDBITS 64 +#else +#define MDBX_WORDBITS 32 +#endif /* MDBX_WORDBITS */ + /*----------------------------------------------------------------------------*/ /* Availability of CMOV or equivalent */ @@ -1190,7 +1193,14 @@ typedef struct osal_mmap { #elif defined(__ANDROID_API__) #if __ANDROID_API__ < 24 +/* https://android-developers.googleblog.com/2017/09/introducing-android-native-development.html + * https://android.googlesource.com/platform/bionic/+/master/docs/32-bit-abi.md */ #define MDBX_HAVE_PWRITEV 0 +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS +#error "_FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (_FILE_OFFSET_BITS != MDBX_WORDBITS) +#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS +#error "__FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (__FILE_OFFSET_BITS != MDBX_WORDBITS) +#endif #else #define MDBX_HAVE_PWRITEV 1 #endif @@ -1576,12 +1586,6 @@ MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t osal_bswap32 #endif } -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) -#define MDBX_WORDBITS 64 -#else -#define MDBX_WORDBITS 32 -#endif /* MDBX_WORDBITS */ - /******************************************************************************* ******************************************************************************* * diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h index 3a6ffd8b49f..90835d1b9e9 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h @@ -99,7 +99,7 @@ are only a few cases of changing data. | _DELETING_||| |Key is absent → Error since no such key |\ref mdbx_del() or \ref mdbx_replace()|Error \ref MDBX_NOTFOUND| |Key exist → Delete by key |\ref mdbx_del() with the parameter `data = NULL`|Deletion| -|Key exist → Delete by key with with data matching check|\ref mdbx_del() with the parameter `data` filled with the value which should be match for deletion|Deletion or \ref MDBX_NOTFOUND if the value does not match| +|Key exist → Delete by key with data matching check|\ref mdbx_del() with the parameter `data` filled with the value which should be match for deletion|Deletion or \ref MDBX_NOTFOUND if the value does not match| |Delete at the current cursor position |\ref mdbx_cursor_del() with \ref MDBX_CURRENT flag|Deletion| |Extract (read & delete) value by the key |\ref mdbx_replace() with zero flag and parameter `new_data = NULL`|Returning a deleted value| @@ -1402,7 +1402,7 @@ typedef enum MDBX_env_flags { * \ref mdbx_env_set_syncbytes() and \ref mdbx_env_set_syncperiod() functions * could be very useful with `MDBX_SAFE_NOSYNC` flag. * - * The number and volume of of disk IOPs with MDBX_SAFE_NOSYNC flag will + * The number and volume of disk IOPs with MDBX_SAFE_NOSYNC flag will * exactly the as without any no-sync flags. However, you should expect a * larger process's [work set](https://bit.ly/2kA2tFX) and significantly worse * a [locality of reference](https://bit.ly/2mbYq2J), due to the more @@ -2116,7 +2116,7 @@ typedef enum MDBX_option { * for all processes interacting with the database. * * \details This defines the number of slots in the lock table that is used to - * track readers in the the environment. The default is about 100 for 4K + * track readers in the environment. The default is about 100 for 4K * system page size. Starting a read-only transaction normally ties a lock * table slot to the current thread until the environment closes or the thread * exits. If \ref MDBX_NOSTICKYTHREADS is in use, \ref mdbx_txn_begin() @@ -3638,7 +3638,7 @@ MDBX_NOTHROW_CONST_FUNCTION LIBMDBX_API intptr_t mdbx_limits_txnsize_max(intptr_ * \ingroup c_settings * * \details This defines the number of slots in the lock table that is used to - * track readers in the the environment. The default is about 100 for 4K system + * track readers in the environment. The default is about 100 for 4K system * page size. Starting a read-only transaction normally ties a lock table slot * to the current thread until the environment closes or the thread exits. If * \ref MDBX_NOSTICKYTHREADS is in use, \ref mdbx_txn_begin() instead ties the @@ -6056,7 +6056,7 @@ LIBMDBX_API int mdbx_dbi_sequence(MDBX_txn *txn, MDBX_dbi dbi, uint64_t *result, * This returns a comparison as if the two data items were keys in the * specified table. * - * \warning There ss a Undefined behavior if one of arguments is invalid. + * \warning There is a Undefined behavior if one of arguments is invalid. * * \param [in] txn A transaction handle returned by \ref mdbx_txn_begin(). * \param [in] dbi A table handle returned by \ref mdbx_dbi_open(). diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ index 85058af09b8..2d5f62b17d5 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ @@ -828,7 +828,7 @@ struct LIBMDBX_API_TYPE slice : public ::MDBX_val { /// \brief Checks whether the content of the slice is printable. /// \param [in] disable_utf8 By default if `disable_utf8` is `false` function /// checks that content bytes are printable ASCII-7 characters or a valid UTF8 - /// sequences. Otherwise, if if `disable_utf8` is `true` function checks that + /// sequences. Otherwise, if `disable_utf8` is `true` function checks that /// content bytes are printable extended 8-bit ASCII codes. MDBX_NOTHROW_PURE_FUNCTION bool is_printable(bool disable_utf8 = false) const noexcept; @@ -2062,7 +2062,7 @@ public: MDBX_CXX20_CONSTEXPR buffer(const char *c_str, const allocator_type &allocator = allocator_type()) - : buffer(::mdbx::slice(c_str), allocator){} + : buffer(::mdbx::slice(c_str), allocator) {} #if defined(DOXYGEN) || (defined(__cpp_lib_string_view) && __cpp_lib_string_view >= 201606L) template diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_chk.c b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_chk.c index efe2a3f600d..fdf5f8b406c 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_chk.c +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_chk.c @@ -18,7 +18,7 @@ /// \copyright SPDX-License-Identifier: Apache-2.0 /// \author Леонид Юрьев aka Leonid Yuriev \date 2015-2025 -#define MDBX_BUILD_SOURCERY 4df7f8f177aee7f9f94c4e72f0d732384e9a870d7d79b8142abdeb4633e710cd_v0_13_6_0_ga971c76a +#define MDBX_BUILD_SOURCERY 6b5df6869d2bf5419e3a8189d9cc849cc9911b9c8a951b9750ed0a261ce43724_v0_13_7_0_g566b0f93 #define LIBMDBX_INTERNALS #define MDBX_DEPRECATED @@ -146,6 +146,8 @@ #pragma warning(disable : 6235) /* is always a constant */ #pragma warning(disable : 6237) /* is never evaluated and might \ have side effects */ +#pragma warning(disable : 5286) /* implicit conversion from enum type 'type 1' to enum type 'type 2' */ +#pragma warning(disable : 5287) /* operands are different enum types 'type 1' and 'type 2' */ #endif #pragma warning(disable : 4710) /* 'xyz': function not inlined */ #pragma warning(disable : 4711) /* function 'xyz' selected for automatic \ @@ -455,11 +457,6 @@ __extern_C key_t ftok(const char *, int); #if __ANDROID_API__ >= 21 #include #endif -#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS -#error "_FILE_OFFSET_BITS != MDBX_WORDBITS" (_FILE_OFFSET_BITS != MDBX_WORDBITS) -#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS -#error "__FILE_OFFSET_BITS != MDBX_WORDBITS" (__FILE_OFFSET_BITS != MDBX_WORDBITS) -#endif #endif /* Android */ #if defined(HAVE_SYS_STAT_H) || __has_include() @@ -544,6 +541,12 @@ __extern_C key_t ftok(const char *, int); #endif #endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */ +#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) +#define MDBX_WORDBITS 64 +#else +#define MDBX_WORDBITS 32 +#endif /* MDBX_WORDBITS */ + /*----------------------------------------------------------------------------*/ /* Availability of CMOV or equivalent */ @@ -1206,7 +1209,14 @@ typedef struct osal_mmap { #elif defined(__ANDROID_API__) #if __ANDROID_API__ < 24 +/* https://android-developers.googleblog.com/2017/09/introducing-android-native-development.html + * https://android.googlesource.com/platform/bionic/+/master/docs/32-bit-abi.md */ #define MDBX_HAVE_PWRITEV 0 +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS +#error "_FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (_FILE_OFFSET_BITS != MDBX_WORDBITS) +#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS +#error "__FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (__FILE_OFFSET_BITS != MDBX_WORDBITS) +#endif #else #define MDBX_HAVE_PWRITEV 1 #endif @@ -1592,12 +1602,6 @@ MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t osal_bswap32 #endif } -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) -#define MDBX_WORDBITS 64 -#else -#define MDBX_WORDBITS 32 -#endif /* MDBX_WORDBITS */ - /******************************************************************************* ******************************************************************************* * diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_copy.c b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_copy.c index f4e2db92e5e..96e8c485c71 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_copy.c +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_copy.c @@ -18,7 +18,7 @@ /// \copyright SPDX-License-Identifier: Apache-2.0 /// \author Леонид Юрьев aka Leonid Yuriev \date 2015-2025 -#define MDBX_BUILD_SOURCERY 4df7f8f177aee7f9f94c4e72f0d732384e9a870d7d79b8142abdeb4633e710cd_v0_13_6_0_ga971c76a +#define MDBX_BUILD_SOURCERY 6b5df6869d2bf5419e3a8189d9cc849cc9911b9c8a951b9750ed0a261ce43724_v0_13_7_0_g566b0f93 #define LIBMDBX_INTERNALS #define MDBX_DEPRECATED @@ -146,6 +146,8 @@ #pragma warning(disable : 6235) /* is always a constant */ #pragma warning(disable : 6237) /* is never evaluated and might \ have side effects */ +#pragma warning(disable : 5286) /* implicit conversion from enum type 'type 1' to enum type 'type 2' */ +#pragma warning(disable : 5287) /* operands are different enum types 'type 1' and 'type 2' */ #endif #pragma warning(disable : 4710) /* 'xyz': function not inlined */ #pragma warning(disable : 4711) /* function 'xyz' selected for automatic \ @@ -455,11 +457,6 @@ __extern_C key_t ftok(const char *, int); #if __ANDROID_API__ >= 21 #include #endif -#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS -#error "_FILE_OFFSET_BITS != MDBX_WORDBITS" (_FILE_OFFSET_BITS != MDBX_WORDBITS) -#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS -#error "__FILE_OFFSET_BITS != MDBX_WORDBITS" (__FILE_OFFSET_BITS != MDBX_WORDBITS) -#endif #endif /* Android */ #if defined(HAVE_SYS_STAT_H) || __has_include() @@ -544,6 +541,12 @@ __extern_C key_t ftok(const char *, int); #endif #endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */ +#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) +#define MDBX_WORDBITS 64 +#else +#define MDBX_WORDBITS 32 +#endif /* MDBX_WORDBITS */ + /*----------------------------------------------------------------------------*/ /* Availability of CMOV or equivalent */ @@ -1206,7 +1209,14 @@ typedef struct osal_mmap { #elif defined(__ANDROID_API__) #if __ANDROID_API__ < 24 +/* https://android-developers.googleblog.com/2017/09/introducing-android-native-development.html + * https://android.googlesource.com/platform/bionic/+/master/docs/32-bit-abi.md */ #define MDBX_HAVE_PWRITEV 0 +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS +#error "_FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (_FILE_OFFSET_BITS != MDBX_WORDBITS) +#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS +#error "__FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (__FILE_OFFSET_BITS != MDBX_WORDBITS) +#endif #else #define MDBX_HAVE_PWRITEV 1 #endif @@ -1592,12 +1602,6 @@ MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t osal_bswap32 #endif } -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) -#define MDBX_WORDBITS 64 -#else -#define MDBX_WORDBITS 32 -#endif /* MDBX_WORDBITS */ - /******************************************************************************* ******************************************************************************* * diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_drop.c b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_drop.c index 64ef32017e8..319bcc13744 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_drop.c +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_drop.c @@ -18,7 +18,7 @@ /// \copyright SPDX-License-Identifier: Apache-2.0 /// \author Леонид Юрьев aka Leonid Yuriev \date 2015-2025 -#define MDBX_BUILD_SOURCERY 4df7f8f177aee7f9f94c4e72f0d732384e9a870d7d79b8142abdeb4633e710cd_v0_13_6_0_ga971c76a +#define MDBX_BUILD_SOURCERY 6b5df6869d2bf5419e3a8189d9cc849cc9911b9c8a951b9750ed0a261ce43724_v0_13_7_0_g566b0f93 #define LIBMDBX_INTERNALS #define MDBX_DEPRECATED @@ -146,6 +146,8 @@ #pragma warning(disable : 6235) /* is always a constant */ #pragma warning(disable : 6237) /* is never evaluated and might \ have side effects */ +#pragma warning(disable : 5286) /* implicit conversion from enum type 'type 1' to enum type 'type 2' */ +#pragma warning(disable : 5287) /* operands are different enum types 'type 1' and 'type 2' */ #endif #pragma warning(disable : 4710) /* 'xyz': function not inlined */ #pragma warning(disable : 4711) /* function 'xyz' selected for automatic \ @@ -455,11 +457,6 @@ __extern_C key_t ftok(const char *, int); #if __ANDROID_API__ >= 21 #include #endif -#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS -#error "_FILE_OFFSET_BITS != MDBX_WORDBITS" (_FILE_OFFSET_BITS != MDBX_WORDBITS) -#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS -#error "__FILE_OFFSET_BITS != MDBX_WORDBITS" (__FILE_OFFSET_BITS != MDBX_WORDBITS) -#endif #endif /* Android */ #if defined(HAVE_SYS_STAT_H) || __has_include() @@ -544,6 +541,12 @@ __extern_C key_t ftok(const char *, int); #endif #endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */ +#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) +#define MDBX_WORDBITS 64 +#else +#define MDBX_WORDBITS 32 +#endif /* MDBX_WORDBITS */ + /*----------------------------------------------------------------------------*/ /* Availability of CMOV or equivalent */ @@ -1206,7 +1209,14 @@ typedef struct osal_mmap { #elif defined(__ANDROID_API__) #if __ANDROID_API__ < 24 +/* https://android-developers.googleblog.com/2017/09/introducing-android-native-development.html + * https://android.googlesource.com/platform/bionic/+/master/docs/32-bit-abi.md */ #define MDBX_HAVE_PWRITEV 0 +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS +#error "_FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (_FILE_OFFSET_BITS != MDBX_WORDBITS) +#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS +#error "__FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (__FILE_OFFSET_BITS != MDBX_WORDBITS) +#endif #else #define MDBX_HAVE_PWRITEV 1 #endif @@ -1592,12 +1602,6 @@ MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t osal_bswap32 #endif } -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) -#define MDBX_WORDBITS 64 -#else -#define MDBX_WORDBITS 32 -#endif /* MDBX_WORDBITS */ - /******************************************************************************* ******************************************************************************* * diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_dump.c b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_dump.c index ddb99f64dad..3193b1d34ce 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_dump.c +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_dump.c @@ -18,7 +18,7 @@ /// \copyright SPDX-License-Identifier: Apache-2.0 /// \author Леонид Юрьев aka Leonid Yuriev \date 2015-2025 -#define MDBX_BUILD_SOURCERY 4df7f8f177aee7f9f94c4e72f0d732384e9a870d7d79b8142abdeb4633e710cd_v0_13_6_0_ga971c76a +#define MDBX_BUILD_SOURCERY 6b5df6869d2bf5419e3a8189d9cc849cc9911b9c8a951b9750ed0a261ce43724_v0_13_7_0_g566b0f93 #define LIBMDBX_INTERNALS #define MDBX_DEPRECATED @@ -146,6 +146,8 @@ #pragma warning(disable : 6235) /* is always a constant */ #pragma warning(disable : 6237) /* is never evaluated and might \ have side effects */ +#pragma warning(disable : 5286) /* implicit conversion from enum type 'type 1' to enum type 'type 2' */ +#pragma warning(disable : 5287) /* operands are different enum types 'type 1' and 'type 2' */ #endif #pragma warning(disable : 4710) /* 'xyz': function not inlined */ #pragma warning(disable : 4711) /* function 'xyz' selected for automatic \ @@ -455,11 +457,6 @@ __extern_C key_t ftok(const char *, int); #if __ANDROID_API__ >= 21 #include #endif -#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS -#error "_FILE_OFFSET_BITS != MDBX_WORDBITS" (_FILE_OFFSET_BITS != MDBX_WORDBITS) -#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS -#error "__FILE_OFFSET_BITS != MDBX_WORDBITS" (__FILE_OFFSET_BITS != MDBX_WORDBITS) -#endif #endif /* Android */ #if defined(HAVE_SYS_STAT_H) || __has_include() @@ -544,6 +541,12 @@ __extern_C key_t ftok(const char *, int); #endif #endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */ +#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) +#define MDBX_WORDBITS 64 +#else +#define MDBX_WORDBITS 32 +#endif /* MDBX_WORDBITS */ + /*----------------------------------------------------------------------------*/ /* Availability of CMOV or equivalent */ @@ -1206,7 +1209,14 @@ typedef struct osal_mmap { #elif defined(__ANDROID_API__) #if __ANDROID_API__ < 24 +/* https://android-developers.googleblog.com/2017/09/introducing-android-native-development.html + * https://android.googlesource.com/platform/bionic/+/master/docs/32-bit-abi.md */ #define MDBX_HAVE_PWRITEV 0 +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS +#error "_FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (_FILE_OFFSET_BITS != MDBX_WORDBITS) +#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS +#error "__FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (__FILE_OFFSET_BITS != MDBX_WORDBITS) +#endif #else #define MDBX_HAVE_PWRITEV 1 #endif @@ -1592,12 +1602,6 @@ MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t osal_bswap32 #endif } -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) -#define MDBX_WORDBITS 64 -#else -#define MDBX_WORDBITS 32 -#endif /* MDBX_WORDBITS */ - /******************************************************************************* ******************************************************************************* * diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_load.c b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_load.c index ba4c0c3c94a..0c1ceba53c2 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_load.c +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_load.c @@ -18,7 +18,7 @@ /// \copyright SPDX-License-Identifier: Apache-2.0 /// \author Леонид Юрьев aka Leonid Yuriev \date 2015-2025 -#define MDBX_BUILD_SOURCERY 4df7f8f177aee7f9f94c4e72f0d732384e9a870d7d79b8142abdeb4633e710cd_v0_13_6_0_ga971c76a +#define MDBX_BUILD_SOURCERY 6b5df6869d2bf5419e3a8189d9cc849cc9911b9c8a951b9750ed0a261ce43724_v0_13_7_0_g566b0f93 #define LIBMDBX_INTERNALS #define MDBX_DEPRECATED @@ -146,6 +146,8 @@ #pragma warning(disable : 6235) /* is always a constant */ #pragma warning(disable : 6237) /* is never evaluated and might \ have side effects */ +#pragma warning(disable : 5286) /* implicit conversion from enum type 'type 1' to enum type 'type 2' */ +#pragma warning(disable : 5287) /* operands are different enum types 'type 1' and 'type 2' */ #endif #pragma warning(disable : 4710) /* 'xyz': function not inlined */ #pragma warning(disable : 4711) /* function 'xyz' selected for automatic \ @@ -455,11 +457,6 @@ __extern_C key_t ftok(const char *, int); #if __ANDROID_API__ >= 21 #include #endif -#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS -#error "_FILE_OFFSET_BITS != MDBX_WORDBITS" (_FILE_OFFSET_BITS != MDBX_WORDBITS) -#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS -#error "__FILE_OFFSET_BITS != MDBX_WORDBITS" (__FILE_OFFSET_BITS != MDBX_WORDBITS) -#endif #endif /* Android */ #if defined(HAVE_SYS_STAT_H) || __has_include() @@ -544,6 +541,12 @@ __extern_C key_t ftok(const char *, int); #endif #endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */ +#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) +#define MDBX_WORDBITS 64 +#else +#define MDBX_WORDBITS 32 +#endif /* MDBX_WORDBITS */ + /*----------------------------------------------------------------------------*/ /* Availability of CMOV or equivalent */ @@ -1206,7 +1209,14 @@ typedef struct osal_mmap { #elif defined(__ANDROID_API__) #if __ANDROID_API__ < 24 +/* https://android-developers.googleblog.com/2017/09/introducing-android-native-development.html + * https://android.googlesource.com/platform/bionic/+/master/docs/32-bit-abi.md */ #define MDBX_HAVE_PWRITEV 0 +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS +#error "_FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (_FILE_OFFSET_BITS != MDBX_WORDBITS) +#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS +#error "__FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (__FILE_OFFSET_BITS != MDBX_WORDBITS) +#endif #else #define MDBX_HAVE_PWRITEV 1 #endif @@ -1592,12 +1602,6 @@ MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t osal_bswap32 #endif } -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) -#define MDBX_WORDBITS 64 -#else -#define MDBX_WORDBITS 32 -#endif /* MDBX_WORDBITS */ - /******************************************************************************* ******************************************************************************* * diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_stat.c b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_stat.c index 45431178c9d..bd052d70a3c 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_stat.c +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx_stat.c @@ -18,7 +18,7 @@ /// \copyright SPDX-License-Identifier: Apache-2.0 /// \author Леонид Юрьев aka Leonid Yuriev \date 2015-2025 -#define MDBX_BUILD_SOURCERY 4df7f8f177aee7f9f94c4e72f0d732384e9a870d7d79b8142abdeb4633e710cd_v0_13_6_0_ga971c76a +#define MDBX_BUILD_SOURCERY 6b5df6869d2bf5419e3a8189d9cc849cc9911b9c8a951b9750ed0a261ce43724_v0_13_7_0_g566b0f93 #define LIBMDBX_INTERNALS #define MDBX_DEPRECATED @@ -146,6 +146,8 @@ #pragma warning(disable : 6235) /* is always a constant */ #pragma warning(disable : 6237) /* is never evaluated and might \ have side effects */ +#pragma warning(disable : 5286) /* implicit conversion from enum type 'type 1' to enum type 'type 2' */ +#pragma warning(disable : 5287) /* operands are different enum types 'type 1' and 'type 2' */ #endif #pragma warning(disable : 4710) /* 'xyz': function not inlined */ #pragma warning(disable : 4711) /* function 'xyz' selected for automatic \ @@ -455,11 +457,6 @@ __extern_C key_t ftok(const char *, int); #if __ANDROID_API__ >= 21 #include #endif -#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS -#error "_FILE_OFFSET_BITS != MDBX_WORDBITS" (_FILE_OFFSET_BITS != MDBX_WORDBITS) -#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS -#error "__FILE_OFFSET_BITS != MDBX_WORDBITS" (__FILE_OFFSET_BITS != MDBX_WORDBITS) -#endif #endif /* Android */ #if defined(HAVE_SYS_STAT_H) || __has_include() @@ -544,6 +541,12 @@ __extern_C key_t ftok(const char *, int); #endif #endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */ +#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) +#define MDBX_WORDBITS 64 +#else +#define MDBX_WORDBITS 32 +#endif /* MDBX_WORDBITS */ + /*----------------------------------------------------------------------------*/ /* Availability of CMOV or equivalent */ @@ -1206,7 +1209,14 @@ typedef struct osal_mmap { #elif defined(__ANDROID_API__) #if __ANDROID_API__ < 24 +/* https://android-developers.googleblog.com/2017/09/introducing-android-native-development.html + * https://android.googlesource.com/platform/bionic/+/master/docs/32-bit-abi.md */ #define MDBX_HAVE_PWRITEV 0 +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS != MDBX_WORDBITS +#error "_FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (_FILE_OFFSET_BITS != MDBX_WORDBITS) +#elif defined(__FILE_OFFSET_BITS) && __FILE_OFFSET_BITS != MDBX_WORDBITS +#error "__FILE_OFFSET_BITS != MDBX_WORDBITS and __ANDROID_API__ < 24" (__FILE_OFFSET_BITS != MDBX_WORDBITS) +#endif #else #define MDBX_HAVE_PWRITEV 1 #endif @@ -1592,12 +1602,6 @@ MDBX_MAYBE_UNUSED MDBX_NOTHROW_PURE_FUNCTION static inline uint32_t osal_bswap32 #endif } -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul || defined(_WIN64) -#define MDBX_WORDBITS 64 -#else -#define MDBX_WORDBITS 32 -#endif /* MDBX_WORDBITS */ - /******************************************************************************* ******************************************************************************* * diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index f372d0c0c09..af1b4fe91d5 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -858,7 +858,7 @@ mod tests { .iter() .chain(in_memory_blocks.iter()) .map(|block| block.body().transactions.iter()) - .map(|tx| tx.map(|tx| random_receipt(rng, tx, Some(2))).collect()) + .map(|tx| tx.map(|tx| random_receipt(rng, tx, Some(2), None)).collect()) .collect(); let factory = create_test_provider_factory_with_chain_spec(chain_spec); diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index fe5e167bf0b..9fdcd0c4085 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -1,8 +1,9 @@ //! Helper provider traits to encapsulate all provider traits for simplicity. use crate::{ - AccountReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, - StageCheckpointReader, StateProviderFactory, StaticFileProviderFactory, + AccountReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, + DatabaseProviderFactory, HashedPostStateProvider, StageCheckpointReader, + StateCommitmentProvider, StateProviderFactory, StateReader, StaticFileProviderFactory, }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; @@ -11,7 +12,7 @@ use std::fmt::Debug; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: - DatabaseProviderFactory + DatabaseProviderFactory + NodePrimitivesProvider + StaticFileProviderFactory + BlockReaderIdExt< @@ -21,6 +22,9 @@ pub trait FullProvider: Header = HeaderTy, > + AccountReader + StateProviderFactory + + StateReader + + StateCommitmentProvider + + HashedPostStateProvider + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions @@ -34,7 +38,7 @@ pub trait FullProvider: } impl FullProvider for T where - T: DatabaseProviderFactory + T: DatabaseProviderFactory + NodePrimitivesProvider + StaticFileProviderFactory + BlockReaderIdExt< @@ -44,6 +48,9 @@ impl FullProvider for T where Header = HeaderTy, > + AccountReader + StateProviderFactory + + StateReader + + StateCommitmentProvider + + HashedPostStateProvider + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index cbdb773c203..bca2a4cdb4c 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -243,7 +243,7 @@ mod tests { use reth_storage_api::{DatabaseProviderFactory, HashedPostStateProvider}; use reth_trie::{ test_utils::{state_root, storage_root_prehashed}, - HashedPostState, HashedStorage, StateRoot, StorageRoot, + HashedPostState, HashedStorage, StateRoot, StorageRoot, StorageRootProgress, }; use reth_trie_db::{DatabaseStateRoot, DatabaseStorageRoot}; use revm_database::{ @@ -1343,8 +1343,14 @@ mod tests { provider_rw.write_hashed_state(&state.clone().into_sorted()).unwrap(); // calculate database storage root and write intermediate storage nodes. - let (storage_root, _, storage_updates) = - StorageRoot::from_tx_hashed(tx, hashed_address).calculate(true).unwrap(); + let StorageRootProgress::Complete(storage_root, _, storage_updates) = + StorageRoot::from_tx_hashed(tx, hashed_address) + .with_no_threshold() + .calculate(true) + .unwrap() + else { + panic!("no threshold for root"); + }; assert_eq!(storage_root, storage_root_prehashed(init_storage.storage)); assert!(!storage_updates.is_empty()); provider_rw diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 0409bfad62b..43421fe683e 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -5,12 +5,12 @@ use crate::{ BlockReader, BlockReaderIdExt, BlockSource, BytecodeReader, ChangeSetReader, HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider, - StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, StorageRootProvider, - TransactionVariant, TransactionsProvider, + StateProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, + StorageRootProvider, TransactionVariant, TransactionsProvider, }; #[cfg(feature = "db-api")] -use crate::{DBProvider, DatabaseProviderFactory}; +use crate::{DBProvider, DatabaseProviderFactory, StateCommitmentProvider}; use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; @@ -27,6 +27,7 @@ use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, MAINNET}; use reth_db_api::mock::{DatabaseMock, TxMock}; use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_ethereum_primitives::EthPrimitives; +use reth_execution_types::ExecutionOutcome; use reth_primitives_traits::{Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader}; #[cfg(feature = "db-api")] use reth_prune_types::PruneModes; @@ -37,6 +38,8 @@ use reth_trie_common::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; +#[cfg(feature = "db-api")] +use reth_trie_db::MerklePatriciaTrie; /// Supports various api interfaces for testing purposes. #[derive(Debug)] @@ -477,6 +480,17 @@ impl HashedPostStateProvider for NoopProvider } } +impl StateReader for NoopProvider { + type Receipt = N::Receipt; + + fn get_state( + &self, + _block: BlockNumber, + ) -> ProviderResult>> { + Ok(None) + } +} + impl StateProvider for NoopProvider { fn storage( &self, @@ -612,6 +626,13 @@ impl DBProvider for NoopProvider StateCommitmentProvider + for NoopProvider +{ + type StateCommitment = MerklePatriciaTrie; +} + #[cfg(feature = "db-api")] impl DatabaseProviderFactory for NoopProvider diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 3e826f34707..637ad907d07 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -47,7 +47,8 @@ thiserror.workspace = true tracing.workspace = true rustc-hash.workspace = true schnellru.workspace = true -serde = { workspace = true, features = ["derive", "rc"], optional = true } +serde = { workspace = true, features = ["derive", "rc"] } +serde_json.workspace = true bitflags.workspace = true auto_impl.workspace = true smallvec.workspace = true @@ -75,7 +76,6 @@ tokio = { workspace = true, features = ["rt-multi-thread"] } [features] serde = [ - "dep:serde", "reth-execution-types/serde", "reth-eth-wire-types/serde", "alloy-consensus/serde", @@ -133,3 +133,8 @@ harness = false name = "priority" required-features = ["arbitrary"] harness = false + +[[bench]] +name = "canonical_state_change" +required-features = ["test-utils", "arbitrary"] +harness = false diff --git a/crates/transaction-pool/benches/canonical_state_change.rs b/crates/transaction-pool/benches/canonical_state_change.rs new file mode 100644 index 00000000000..7f2d5b91f56 --- /dev/null +++ b/crates/transaction-pool/benches/canonical_state_change.rs @@ -0,0 +1,159 @@ +#![allow(missing_docs)] +use alloy_consensus::Transaction; +use alloy_primitives::{Address, B256, U256}; +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; +use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; +use rand::prelude::SliceRandom; +use reth_ethereum_primitives::{Block, BlockBody}; +use reth_execution_types::ChangedAccount; +use reth_primitives_traits::{Header, SealedBlock}; +use reth_transaction_pool::{ + test_utils::{MockTransaction, TestPoolBuilder}, + BlockInfo, CanonicalStateUpdate, PoolConfig, PoolTransaction, PoolUpdateKind, SubPoolLimit, + TransactionOrigin, TransactionPool, TransactionPoolExt, +}; +use std::{collections::HashMap, time::Duration}; +/// Generates a set of transactions for multiple senders +fn generate_transactions(num_senders: usize, txs_per_sender: usize) -> Vec { + let mut runner = TestRunner::deterministic(); + let mut txs = Vec::new(); + + for sender_idx in 0..num_senders { + // Create a unique sender address + let sender_bytes = sender_idx.to_be_bytes(); + let addr_slice = [0u8; 12].into_iter().chain(sender_bytes.into_iter()).collect::>(); + let sender = Address::from_slice(&addr_slice); + + // Generate transactions for this sender + for nonce in 0..txs_per_sender { + let mut tx = any::().new_tree(&mut runner).unwrap().current(); + tx.set_sender(sender); + tx.set_nonce(nonce as u64); + + // Ensure it's not a legacy transaction + if tx.is_legacy() || tx.is_eip2930() { + tx = MockTransaction::eip1559(); + tx.set_priority_fee(any::().new_tree(&mut runner).unwrap().current()); + tx.set_max_fee(any::().new_tree(&mut runner).unwrap().current()); + tx.set_sender(sender); + tx.set_nonce(nonce as u64); + } + + txs.push(tx); + } + } + + txs +} + +/// Fill the pool with transactions +async fn fill_pool(pool: &TestPoolBuilder, txs: Vec) -> HashMap { + let mut sender_nonces = HashMap::new(); + + // Add transactions one by one + for tx in txs { + let sender = tx.sender(); + let nonce = tx.nonce(); + + // Track the highest nonce for each sender + sender_nonces.insert(sender, nonce.max(sender_nonces.get(&sender).copied().unwrap_or(0))); + + // Add transaction to the pool + let _ = pool.add_transaction(TransactionOrigin::External, tx).await; + } + + sender_nonces +} + +fn canonical_state_change_bench(c: &mut Criterion) { + let mut group = c.benchmark_group("Transaction Pool Canonical State Change"); + group.measurement_time(Duration::from_secs(10)); + let rt = tokio::runtime::Runtime::new().unwrap(); + // Test different pool sizes + for num_senders in [500, 1000, 2000] { + for txs_per_sender in [1, 5, 10] { + let total_txs = num_senders * txs_per_sender; + + let group_id = format!( + "txpool | canonical_state_change | senders: {num_senders} | txs_per_sender: {txs_per_sender} | total: {total_txs}", + ); + + // Create the update + // Create a mock block - using default Ethereum block + let header = Header::default(); + let body = BlockBody::default(); + let block = Block { header, body }; + let sealed_block = SealedBlock::seal_slow(block); + + let txs = generate_transactions(num_senders, txs_per_sender); + let pool = TestPoolBuilder::default().with_config(PoolConfig { + pending_limit: SubPoolLimit::max(), + basefee_limit: SubPoolLimit::max(), + queued_limit: SubPoolLimit::max(), + blob_limit: SubPoolLimit::max(), + max_account_slots: 50, + ..Default::default() + }); + struct Input { + sealed_block: SealedBlock, + pool: TestPoolBuilder, + } + group.bench_with_input(group_id, &Input { sealed_block, pool }, |b, input| { + b.iter_batched( + || { + // Setup phase - create pool and transactions + let sealed_block = &input.sealed_block; + let pool = &input.pool; + let senders = pool.unique_senders(); + for sender in senders { + pool.remove_transactions_by_sender(sender); + } + // Set initial block info + pool.set_block_info(BlockInfo { + last_seen_block_number: 0, + last_seen_block_hash: B256::ZERO, + pending_basefee: 1_000_000_000, + pending_blob_fee: Some(1_000_000), + block_gas_limit: 30_000_000, + }); + let sender_nonces = rt.block_on(fill_pool(pool, txs.clone())); + let mut changed_accounts: Vec = sender_nonces + .into_iter() + .map(|(address, nonce)| ChangedAccount { + address, + nonce: nonce + 1, // Increment nonce as if transactions were mined + balance: U256::from(9_000_000_000_000_000u64), // Decrease balance + }) + .collect(); + changed_accounts.shuffle(&mut rand::rng()); + let changed_accounts = changed_accounts.drain(..100).collect(); + let update = CanonicalStateUpdate { + new_tip: sealed_block, + pending_block_base_fee: 1_000_000_000, // 1 gwei + pending_block_blob_fee: Some(1_000_000), // 0.001 gwei + changed_accounts, + mined_transactions: vec![], // No transactions mined in this benchmark + update_kind: PoolUpdateKind::Commit, + }; + + (pool, update) + }, + |(pool, update)| { + // The actual operation being benchmarked + pool.on_canonical_state_change(update); + }, + BatchSize::LargeInput, + ); + }); + } + } + + group.finish(); +} + +criterion_group! { + name = canonical_state_change; + config = Criterion::default(); + targets = canonical_state_change_bench +} +criterion_main!(canonical_state_change); diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index a58b02bb327..db792a5162f 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -115,6 +115,11 @@ impl SubPoolLimit { Self { max_txs, max_size } } + /// Creates an unlimited [`SubPoolLimit`] + pub const fn max() -> Self { + Self::new(usize::MAX, usize::MAX) + } + /// Returns whether the size or amount constraint is violated. #[inline] pub const fn is_exceeded(&self, txs: usize, size: usize) -> bool { diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 340ddaae2c2..410fbda20be 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -605,6 +605,10 @@ where self.pool.all_transactions() } + fn all_transaction_hashes(&self) -> Vec { + self.pool.all_transaction_hashes() + } + fn remove_transactions( &self, hashes: Vec, diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index fff01006254..a462e37351a 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -5,12 +5,12 @@ use crate::{ error::PoolError, metrics::MaintainPoolMetrics, traits::{CanonicalStateUpdate, EthPoolTransaction, TransactionPool, TransactionPoolExt}, - BlockInfo, PoolTransaction, PoolUpdateKind, + BlockInfo, PoolTransaction, PoolUpdateKind, TransactionOrigin, }; use alloy_consensus::{BlockHeader, Typed2718}; -use alloy_eips::BlockNumberOrTag; +use alloy_eips::{BlockNumberOrTag, Decodable2718, Encodable2718}; use alloy_primitives::{Address, BlockHash, BlockNumber}; -use alloy_rlp::Encodable; +use alloy_rlp::{Bytes, Encodable}; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -27,6 +27,7 @@ use reth_storage_api::{ StateProviderFactory, }; use reth_tasks::TaskSpawner; +use serde::{Deserialize, Serialize}; use std::{ borrow::Borrow, collections::HashSet, @@ -626,8 +627,8 @@ where Ok(res) } -/// Loads transactions from a file, decodes them from the RLP format, and inserts them -/// into the transaction pool on node boot up. +/// Loads transactions from a file, decodes them from the JSON or RLP format, and +/// inserts them into the transaction pool on node boot up. /// The file is removed after the transactions have been successfully processed. async fn load_and_reinsert_transactions

( pool: P, @@ -647,21 +648,43 @@ where return Ok(()) } - let txs_signed: Vec<::Consensus> = - alloy_rlp::Decodable::decode(&mut data.as_slice())?; - - let pool_transactions = txs_signed - .into_iter() - .filter_map(|tx| tx.try_clone_into_recovered().ok()) - .filter_map(|tx| { - // Filter out errors - ::try_from_consensus(tx).ok() - }) - .collect(); + let pool_transactions: Vec<(TransactionOrigin,

::Transaction)> = + if let Ok(tx_backups) = serde_json::from_slice::>(&data) { + tx_backups + .into_iter() + .filter_map(|backup| { + let tx_signed = ::Consensus::decode_2718( + &mut backup.rlp.as_ref(), + ) + .ok()?; + let recovered = tx_signed.try_into_recovered().ok()?; + let pool_tx = + ::try_from_consensus(recovered).ok()?; + + Some((backup.origin, pool_tx)) + }) + .collect() + } else { + let txs_signed: Vec<::Consensus> = + alloy_rlp::Decodable::decode(&mut data.as_slice())?; + + txs_signed + .into_iter() + .filter_map(|tx| tx.try_into_recovered().ok()) + .filter_map(|tx| { + ::try_from_consensus(tx) + .ok() + .map(|pool_tx| (TransactionOrigin::Local, pool_tx)) + }) + .collect() + }; - let outcome = pool.add_transactions(crate::TransactionOrigin::Local, pool_transactions).await; + let inserted = futures_util::future::join_all( + pool_transactions.into_iter().map(|(origin, tx)| pool.add_transaction(origin, tx)), + ) + .await; - info!(target: "txpool", txs_file =?file_path, num_txs=%outcome.len(), "Successfully reinserted local transactions from file"); + info!(target: "txpool", txs_file =?file_path, num_txs=%inserted.len(), "Successfully reinserted local transactions from file"); reth_fs_util::remove_file(file_path)?; Ok(()) } @@ -678,16 +701,26 @@ where let local_transactions = local_transactions .into_iter() - .map(|tx| tx.transaction.clone_into_consensus().into_inner()) + .map(|tx| { + let consensus_tx = tx.transaction.clone_into_consensus().into_inner(); + let rlp_data = consensus_tx.encoded_2718(); + + TxBackup { rlp: rlp_data.into(), origin: tx.origin } + }) .collect::>(); - let num_txs = local_transactions.len(); - let mut buf = Vec::new(); - alloy_rlp::encode_list(&local_transactions, &mut buf); - info!(target: "txpool", txs_file =?file_path, num_txs=%num_txs, "Saving current local transactions"); + let json_data = match serde_json::to_string(&local_transactions) { + Ok(data) => data, + Err(err) => { + warn!(target: "txpool", %err, txs_file=?file_path, "failed to serialize local transactions to json"); + return + } + }; + + info!(target: "txpool", txs_file =?file_path, num_txs=%local_transactions.len(), "Saving current local transactions"); let parent_dir = file_path.parent().map(std::fs::create_dir_all).transpose(); - match parent_dir.map(|_| reth_fs_util::write(file_path, buf)) { + match parent_dir.map(|_| reth_fs_util::write(file_path, json_data)) { Ok(_) => { info!(target: "txpool", txs_file=?file_path, "Wrote local transactions to file"); } @@ -697,12 +730,25 @@ where } } +/// A transaction backup that is saved as json to a file for +/// reinsertion into the pool +#[derive(Debug, Deserialize, Serialize)] +pub struct TxBackup { + /// Encoded transaction + pub rlp: Bytes, + /// The origin of the transaction + pub origin: TransactionOrigin, +} + /// Errors possible during txs backup load and decode #[derive(thiserror::Error, Debug)] pub enum TransactionsBackupError { /// Error during RLP decoding of transactions #[error("failed to apply transactions backup. Encountered RLP decode error: {0}")] Decode(#[from] alloy_rlp::Error), + /// Error during json decoding of transactions + #[error("failed to apply transactions backup. Encountered JSON decode error: {0}")] + Json(#[from] serde_json::Error), /// Error during file upload #[error("failed to apply transactions backup. Encountered file error: {0}")] FsPath(#[from] FsPathError), @@ -746,7 +792,7 @@ mod tests { }; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; - use reth_ethereum_primitives::{PooledTransactionVariant, TransactionSigned}; + use reth_ethereum_primitives::PooledTransactionVariant; use reth_fs_util as fs; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; use reth_tasks::TaskManager; @@ -759,7 +805,7 @@ mod tests { assert!(changed_acc.eq(&ChangedAccountEntry(copy))); } - const EXTENSION: &str = "rlp"; + const EXTENSION: &str = "json"; const FILENAME: &str = "test_transactions_backup"; #[tokio::test(flavor = "multi_thread")] @@ -804,8 +850,7 @@ mod tests { let data = fs::read(transactions_path).unwrap(); - let txs: Vec = - alloy_rlp::Decodable::decode(&mut data.as_slice()).unwrap(); + let txs: Vec = serde_json::from_slice::>(&data).unwrap(); assert_eq!(txs.len(), 1); temp_dir.close().unwrap(); diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 45851f31f88..a553ea6e87c 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -31,12 +31,12 @@ use tokio::sync::{mpsc, mpsc::Receiver}; /// This type will never hold any transactions and is only useful for wiring components together. #[derive(Debug, Clone)] #[non_exhaustive] -pub struct NoopTransactionPool { +pub struct NoopTransactionPool { /// Type marker _marker: PhantomData, } -impl NoopTransactionPool { +impl NoopTransactionPool { /// Creates a new [`NoopTransactionPool`]. pub fn new() -> Self { Self { _marker: Default::default() } @@ -198,6 +198,10 @@ impl TransactionPool for NoopTransactionPool { AllPoolTransactions::default() } + fn all_transaction_hashes(&self) -> Vec { + vec![] + } + fn remove_transactions( &self, _hashes: Vec, diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index ecf28a519e2..0066a51aaf6 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -2,7 +2,7 @@ use crate::{ error::{Eip4844PoolTransactionError, InvalidPoolTransactionError}, identifier::{SenderId, TransactionId}, pool::pending::PendingTransaction, - PoolTransaction, TransactionOrdering, ValidPoolTransaction, + PoolTransaction, Priority, TransactionOrdering, ValidPoolTransaction, }; use alloy_consensus::Transaction; use alloy_eips::Typed2718; @@ -96,6 +96,10 @@ pub struct BestTransactions { /// These new pending transactions are inserted into this iterator's pool before yielding the /// next value pub(crate) new_transaction_receiver: Option>>, + /// The priority value of most recently yielded transaction. + /// + /// This is required if we new pending transactions are fed in while it yields new values. + pub(crate) last_priority: Option>, /// Flag to control whether to skip blob transactions (EIP4844). pub(crate) skip_blobs: bool, } @@ -122,7 +126,16 @@ impl BestTransactions { fn try_recv(&mut self) -> Option> { loop { match self.new_transaction_receiver.as_mut()?.try_recv() { - Ok(tx) => return Some(tx), + Ok(tx) => { + if let Some(last_priority) = &self.last_priority { + if &tx.priority > last_priority { + // we skip transactions if we already yielded a transaction with lower + // priority + return None + } + } + return Some(tx) + } // note TryRecvError::Lagged can be returned here, which is an error that attempts // to correct itself on consecutive try_recv() attempts @@ -169,6 +182,7 @@ impl crate::traits::BestTransactions for BestTransaction fn no_updates(&mut self) { self.new_transaction_receiver.take(); + self.last_priority.take(); } fn skip_blobs(&mut self) { @@ -215,6 +229,9 @@ impl Iterator for BestTransactions { ), ) } else { + if self.new_transaction_receiver.is_some() { + self.last_priority = Some(best.priority.clone()) + } return Some(best.transaction) } } @@ -755,7 +772,7 @@ mod tests { // Create a filter that only returns transactions with even nonces let filter = BestTransactionFilter::new(best, |tx: &Arc>| { - tx.nonce() % 2 == 0 + tx.nonce().is_multiple_of(2) }); // Verify that the filter only returns transactions with even nonces @@ -931,5 +948,47 @@ mod tests { assert!(best.new_transaction_receiver.is_none()); } - // TODO: Same nonce test + #[test] + fn test_best_update_transaction_priority() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add 5 transactions with increasing nonces to the pool + let num_tx = 5; + let tx = MockTransaction::eip1559(); + for nonce in 0..num_tx { + let tx = tx.clone().rng_hash().with_nonce(nonce); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + // Create a BestTransactions iterator from the pool + let mut best = pool.best(); + + // Use a broadcast channel for transaction updates + let (tx_sender, tx_receiver) = + tokio::sync::broadcast::channel::>(1000); + best.new_transaction_receiver = Some(tx_receiver); + + // yield one tx, effectively locking in the highest prio + let first = best.next().unwrap(); + + // Create a new transaction with nonce 5 and validate it + let new_higher_fee_tx = MockTransaction::eip1559().with_nonce(0); + let valid_new_higher_fee_tx = f.validated(new_higher_fee_tx); + + // Send the new transaction through the broadcast channel + let pending_tx = PendingTransaction { + submission_id: 10, + transaction: Arc::new(valid_new_higher_fee_tx.clone()), + priority: Priority::Value(U256::from(u64::MAX)), + }; + tx_sender.send(pending_tx).unwrap(); + + // ensure that the higher prio tx is skipped since we yielded a lower one + for tx in best { + assert_eq!(tx.sender_id(), first.sender_id()); + assert_ne!(tx.sender_id(), valid_new_higher_fee_tx.sender_id()); + } + } } diff --git a/crates/transaction-pool/src/pool/events.rs b/crates/transaction-pool/src/pool/events.rs index 0dc07e7ee96..89cfc95bdfe 100644 --- a/crates/transaction-pool/src/pool/events.rs +++ b/crates/transaction-pool/src/pool/events.rs @@ -93,6 +93,13 @@ pub struct NewTransactionEvent { pub transaction: Arc>, } +impl NewTransactionEvent { + /// Creates a new event for a pending transaction. + pub const fn pending(transaction: Arc>) -> Self { + Self { subpool: SubPool::Pending, transaction } + } +} + impl Clone for NewTransactionEvent { fn clone(&self) -> Self { Self { subpool: self.subpool, transaction: self.transaction.clone() } diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index 2b5111b73be..280fb4ad10c 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -110,6 +110,12 @@ impl PoolEventBroadcast { self.all_events_broadcaster.broadcast(pool_event); } + /// Returns true if no listeners are installed + #[inline] + pub(crate) fn is_empty(&self) -> bool { + self.all_events_broadcaster.is_empty() && self.broadcasters_by_hash.is_empty() + } + /// Create a new subscription for the given transaction hash. pub(crate) fn subscribe(&mut self, tx_hash: TxHash) -> TransactionEvents { let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); @@ -167,6 +173,17 @@ impl PoolEventBroadcast { ); } + /// Notify listeners about all discarded transactions. + #[inline] + pub(crate) fn discarded_many(&mut self, discarded: &[Arc>]) { + if self.is_empty() { + return + } + for tx in discarded { + self.discarded(tx.hash()); + } + } + /// Notify listeners about a transaction that was discarded. pub(crate) fn discarded(&mut self, tx: &TxHash) { self.broadcast_event(tx, TransactionEvent::Discarded, FullTransactionEvent::Discarded(*tx)); @@ -210,6 +227,12 @@ impl AllPoolEventsBroadcaster { Err(TrySendError::Closed(_)) => false, }) } + + /// Returns true if there are no listeners installed. + #[inline] + const fn is_empty(&self) -> bool { + self.senders.is_empty() + } } /// All Sender half(s) of the event channels for a specific transaction. @@ -223,7 +246,7 @@ struct PoolEventBroadcaster { impl PoolEventBroadcaster { /// Returns `true` if there are no more listeners remaining. - fn is_empty(&self) -> bool { + const fn is_empty(&self) -> bool { self.senders.is_empty() } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 008003c3c20..53df964a472 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -283,7 +283,7 @@ where self.pool.read() } - /// Returns hashes of _all_ transactions in the pool. + /// Returns hashes of transactions in the pool that can be propagated. pub fn pooled_transactions_hashes(&self) -> Vec { self.get_pool_data() .all() @@ -293,12 +293,12 @@ where .collect() } - /// Returns _all_ transactions in the pool. + /// Returns transactions in the pool that can be propagated pub fn pooled_transactions(&self) -> Vec>> { self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).cloned().collect() } - /// Returns only the first `max` transactions in the pool. + /// Returns only the first `max` transactions in the pool that can be propagated. pub fn pooled_transactions_max( &self, max: usize, @@ -341,7 +341,8 @@ where } } - /// Returns pooled transactions for the given transaction hashes. + /// Returns pooled transactions for the given transaction hashes that are allowed to be + /// propagated. pub fn get_pooled_transaction_elements( &self, tx_hashes: Vec, @@ -350,7 +351,7 @@ where where ::Transaction: EthPoolTransaction, { - let transactions = self.get_all(tx_hashes); + let transactions = self.get_all_propagatable(tx_hashes); let mut elements = Vec::with_capacity(transactions.len()); let mut size = 0; for transaction in transactions { @@ -414,6 +415,8 @@ where /// Performs account updates on the pool. /// /// This will either promote or discard transactions based on the new account state. + /// + /// This should be invoked when the pool drifted and accounts are updated manually pub fn update_accounts(&self, accounts: Vec) { let changed_senders = self.changed_senders(accounts.into_iter()); let UpdateOutcome { promoted, discarded } = @@ -431,16 +434,29 @@ where }); listener.send_all(promoted_hashes) }); + + // in this case we should also emit promoted transactions in full + self.transaction_listener.lock().retain_mut(|listener| { + let promoted_txs = promoted.iter().filter_map(|tx| { + if listener.kind.is_propagate_only() && !tx.propagate { + None + } else { + Some(NewTransactionEvent::pending(tx.clone())) + } + }); + listener.send_all(promoted_txs) + }); } { let mut listener = self.event_listener.write(); - - for tx in &promoted { - listener.pending(tx.hash(), None); - } - for tx in &discarded { - listener.discarded(tx.hash()); + if !listener.is_empty() { + for tx in &promoted { + listener.pending(tx.hash(), None); + } + for tx in &discarded { + listener.discarded(tx.hash()); + } } } @@ -589,17 +605,11 @@ where if !discarded.is_empty() { // Delete any blobs associated with discarded blob transactions self.delete_discarded_blobs(discarded.iter()); + self.event_listener.write().discarded_many(&discarded); let discarded_hashes = discarded.into_iter().map(|tx| *tx.hash()).collect::>(); - { - let mut listener = self.event_listener.write(); - for hash in &discarded_hashes { - listener.discarded(hash); - } - } - // A newly added transaction may be immediately discarded, so we need to // adjust the result here for res in &mut added { @@ -692,20 +702,26 @@ where // broadcast specific transaction events let mut listener = self.event_listener.write(); - for tx in &mined { - listener.mined(tx, block_hash); - } - for tx in &promoted { - listener.pending(tx.hash(), None); - } - for tx in &discarded { - listener.discarded(tx.hash()); + if !listener.is_empty() { + for tx in &mined { + listener.mined(tx, block_hash); + } + for tx in &promoted { + listener.pending(tx.hash(), None); + } + for tx in &discarded { + listener.discarded(tx.hash()); + } } } /// Fire events for the newly added transaction if there are any. fn notify_event_listeners(&self, tx: &AddedTransaction) { let mut listener = self.event_listener.write(); + if listener.is_empty() { + // nothing to notify + return + } match tx { AddedTransaction::Pending(tx) => { @@ -770,6 +786,11 @@ where } } + /// Returns _all_ transactions in the pool + pub fn all_transaction_hashes(&self) -> Vec { + self.get_pool_data().all().transactions_iter().map(|tx| *tx.hash()).collect() + } + /// Removes and returns all matching transactions from the pool. /// /// This behaves as if the transactions got discarded (_not_ mined), effectively introducing a @@ -783,11 +804,7 @@ where } let removed = self.pool.write().remove_transactions(hashes); - let mut listener = self.event_listener.write(); - - for tx in &removed { - listener.discarded(tx.hash()); - } + self.event_listener.write().discarded_many(&removed); removed } @@ -820,11 +837,7 @@ where let sender_id = self.get_sender_id(sender); let removed = self.pool.write().remove_transactions_by_sender(sender_id); - let mut listener = self.event_listener.write(); - - for tx in &removed { - listener.discarded(tx.hash()); - } + self.event_listener.write().discarded_many(&removed); removed } @@ -941,6 +954,19 @@ where self.get_pool_data().get_all(txs).collect() } + /// Returns all the transactions belonging to the hashes that are propagatable. + /// + /// If no transaction exists, it is skipped. + fn get_all_propagatable( + &self, + txs: Vec, + ) -> Vec>> { + if txs.is_empty() { + return Vec::new() + } + self.get_pool_data().get_all(txs).filter(|tx| tx.propagate).collect() + } + /// Notify about propagated transactions. pub fn on_propagated(&self, txs: PropagatedTransactions) { if txs.0.is_empty() { @@ -948,7 +974,9 @@ where } let mut listener = self.event_listener.write(); - txs.0.into_iter().for_each(|(hash, peers)| listener.propagated(&hash, peers)) + if !listener.is_empty() { + txs.0.into_iter().for_each(|(hash, peers)| listener.propagated(&hash, peers)); + } } /// Number of transactions in the entire pool diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index d3e90b6e3c1..b7736f98516 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -16,9 +16,6 @@ use std::{ /// basefee, ancestor transactions, balance) that eventually move the transaction into the pending /// pool. /// -/// This pool is a bijection: at all times each set (`best`, `by_id`) contains the same -/// transactions. -/// /// Note: This type is generic over [`ParkedPool`] which enforces that the underlying transaction /// type is [`ValidPoolTransaction`] wrapped in an [Arc]. #[derive(Debug, Clone)] @@ -29,10 +26,6 @@ pub struct ParkedPool { submission_id: u64, /// _All_ Transactions that are currently inside the pool grouped by their identifier. by_id: BTreeMap>, - /// All transactions sorted by their order function. - /// - /// The higher, the better. - best: BTreeSet>, /// Keeps track of last submission id for each sender. /// /// This are sorted in reverse order, so the last (highest) submission id is first, and the @@ -71,8 +64,7 @@ impl ParkedPool { self.add_sender_count(tx.sender_id(), submission_id); let transaction = ParkedPoolTransaction { submission_id, transaction: tx.into() }; - self.by_id.insert(id, transaction.clone()); - self.best.insert(transaction); + self.by_id.insert(id, transaction); } /// Increments the count of transactions for the given sender and updates the tracked submission @@ -142,7 +134,6 @@ impl ParkedPool { ) -> Option>> { // remove from queues let tx = self.by_id.remove(id)?; - self.best.remove(&tx); self.remove_sender_count(tx.transaction.sender_id()); // keep track of size @@ -254,11 +245,9 @@ impl ParkedPool { self.by_id.get(id) } - /// Asserts that the bijection between `by_id` and `best` is valid. + /// Asserts that all subpool invariants #[cfg(any(test, feature = "test-utils"))] pub(crate) fn assert_invariants(&self) { - assert_eq!(self.by_id.len(), self.best.len(), "by_id.len() != best.len()"); - assert_eq!( self.last_sender_submission.len(), self.sender_transaction_count.len(), @@ -275,7 +264,7 @@ impl ParkedPool> { &self, basefee: u64, ) -> Vec>> { - let ids = self.satisfy_base_fee_ids(basefee); + let ids = self.satisfy_base_fee_ids(basefee as u128); let mut txs = Vec::with_capacity(ids.len()); for id in ids { txs.push(self.get(&id).expect("transaction exists").transaction.clone().into()); @@ -284,13 +273,13 @@ impl ParkedPool> { } /// Returns all transactions that satisfy the given basefee. - fn satisfy_base_fee_ids(&self, basefee: u64) -> Vec { + fn satisfy_base_fee_ids(&self, basefee: u128) -> Vec { let mut transactions = Vec::new(); { let mut iter = self.by_id.iter().peekable(); while let Some((id, tx)) = iter.next() { - if tx.transaction.transaction.max_fee_per_gas() < basefee as u128 { + if tx.transaction.transaction.max_fee_per_gas() < basefee { // still parked -> skip descendant transactions 'this: while let Some((peek, _)) = iter.peek() { if peek.sender != id.sender { @@ -311,7 +300,7 @@ impl ParkedPool> { /// /// Note: the transactions are not returned in a particular order. pub(crate) fn enforce_basefee(&mut self, basefee: u64) -> Vec>> { - let to_remove = self.satisfy_base_fee_ids(basefee); + let to_remove = self.satisfy_base_fee_ids(basefee as u128); let mut removed = Vec::with_capacity(to_remove.len()); for id in to_remove { @@ -327,7 +316,6 @@ impl Default for ParkedPool { Self { submission_id: 0, by_id: Default::default(), - best: Default::default(), last_sender_submission: Default::default(), sender_transaction_count: Default::default(), size_of: Default::default(), @@ -1051,50 +1039,4 @@ mod tests { assert!(removed.is_some()); assert!(!pool.contains(&tx_id)); } - - #[test] - fn test_parkpool_ord() { - let mut f = MockTransactionFactory::default(); - let mut pool = ParkedPool::>::default(); - - let tx1 = MockTransaction::eip1559().with_max_fee(100); - let tx1_v = f.validated_arc(tx1.clone()); - - let tx2 = MockTransaction::eip1559().with_max_fee(101); - let tx2_v = f.validated_arc(tx2.clone()); - - let tx3 = MockTransaction::eip1559().with_max_fee(101); - let tx3_v = f.validated_arc(tx3.clone()); - - let tx4 = MockTransaction::eip1559().with_max_fee(101); - let mut tx4_v = f.validated(tx4.clone()); - tx4_v.timestamp = tx3_v.timestamp; - - let ord_1 = QueuedOrd(tx1_v.clone()); - let ord_2 = QueuedOrd(tx2_v.clone()); - let ord_3 = QueuedOrd(tx3_v.clone()); - assert!(ord_1 < ord_2); - // lower timestamp is better - assert!(ord_2 > ord_3); - assert!(ord_1 < ord_3); - - pool.add_transaction(tx1_v); - pool.add_transaction(tx2_v); - pool.add_transaction(tx3_v); - pool.add_transaction(Arc::new(tx4_v)); - - // from worst to best - let mut iter = pool.best.iter(); - let tx = iter.next().unwrap(); - assert_eq!(tx.transaction.transaction, tx1); - - let tx = iter.next().unwrap(); - assert_eq!(tx.transaction.transaction, tx4); - - let tx = iter.next().unwrap(); - assert_eq!(tx.transaction.transaction, tx3); - - let tx = iter.next().unwrap(); - assert_eq!(tx.transaction.transaction, tx2); - } } diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 162e3aa1979..f2cd6a6fde5 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -109,6 +109,7 @@ impl PendingPool { independent: self.independent_transactions.values().cloned().collect(), invalid: Default::default(), new_transaction_receiver: Some(self.new_transaction_notifier.subscribe()), + last_priority: None, skip_blobs: false, } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 1763e19cf0f..c3f2233f442 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -864,8 +864,6 @@ impl TxPool { } } - self.update_size_metrics(); - outcome } @@ -1582,7 +1580,6 @@ impl AllTransactions { self.remove_auths(&internal); // decrement the counter for the sender. self.tx_decr(tx.sender_id()); - self.update_size_metrics(); Some((tx, internal.subpool)) } @@ -1648,8 +1645,6 @@ impl AllTransactions { self.remove_auths(&internal); - self.update_size_metrics(); - result } @@ -3782,4 +3777,64 @@ mod tests { assert_eq!(pool.pending_pool.independent().len(), 1); } + + #[test] + fn test_insertion_disorder() { + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::new(MockOrdering::default(), Default::default()); + + let sender = address!("1234567890123456789012345678901234567890"); + let tx0 = f.validated_arc( + MockTransaction::legacy().with_sender(sender).with_nonce(0).with_gas_price(10), + ); + let tx1 = f.validated_arc( + MockTransaction::eip1559() + .with_sender(sender) + .with_nonce(1) + .with_gas_limit(1000) + .with_gas_price(10), + ); + let tx2 = f.validated_arc( + MockTransaction::legacy().with_sender(sender).with_nonce(2).with_gas_price(10), + ); + let tx3 = f.validated_arc( + MockTransaction::legacy().with_sender(sender).with_nonce(3).with_gas_price(10), + ); + + // tx0 should be put in the pending subpool + pool.add_transaction((*tx0).clone(), U256::from(1000), 0, None).unwrap(); + let mut best = pool.best_transactions(); + let t0 = best.next().expect("tx0 should be put in the pending subpool"); + assert_eq!(t0.id(), tx0.id()); + // tx1 should be put in the queued subpool due to insufficient sender balance + pool.add_transaction((*tx1).clone(), U256::from(1000), 0, None).unwrap(); + let mut best = pool.best_transactions(); + let t0 = best.next().expect("tx0 should be put in the pending subpool"); + assert_eq!(t0.id(), tx0.id()); + assert!(best.next().is_none()); + + // tx2 should be put in the pending subpool, and tx1 should be promoted to pending + pool.add_transaction((*tx2).clone(), U256::MAX, 0, None).unwrap(); + + let mut best = pool.best_transactions(); + + let t0 = best.next().expect("tx0 should be put in the pending subpool"); + let t1 = best.next().expect("tx1 should be put in the pending subpool"); + let t2 = best.next().expect("tx2 should be put in the pending subpool"); + assert_eq!(t0.id(), tx0.id()); + assert_eq!(t1.id(), tx1.id()); + assert_eq!(t2.id(), tx2.id()); + + // tx3 should be put in the pending subpool, + pool.add_transaction((*tx3).clone(), U256::MAX, 0, None).unwrap(); + let mut best = pool.best_transactions(); + let t0 = best.next().expect("tx0 should be put in the pending subpool"); + let t1 = best.next().expect("tx1 should be put in the pending subpool"); + let t2 = best.next().expect("tx2 should be put in the pending subpool"); + let t3 = best.next().expect("tx3 should be put in the pending subpool"); + assert_eq!(t0.id(), tx0.id()); + assert_eq!(t1.id(), tx1.id()); + assert_eq!(t2.id(), tx2.id()); + assert_eq!(t3.id(), tx3.id()); + } } diff --git a/crates/transaction-pool/src/test_utils/okvalidator.rs b/crates/transaction-pool/src/test_utils/okvalidator.rs index 369839760c3..fc15dce74ec 100644 --- a/crates/transaction-pool/src/test_utils/okvalidator.rs +++ b/crates/transaction-pool/src/test_utils/okvalidator.rs @@ -10,11 +10,21 @@ use crate::{ #[non_exhaustive] pub struct OkValidator { _phantom: PhantomData, + /// Whether to mark transactions as propagatable. + propagate: bool, +} + +impl OkValidator { + /// Determines whether transactions should be allowed to be propagated + pub const fn set_propagate_transactions(mut self, propagate: bool) -> Self { + self.propagate = propagate; + self + } } impl Default for OkValidator { fn default() -> Self { - Self { _phantom: Default::default() } + Self { _phantom: Default::default(), propagate: false } } } @@ -38,7 +48,7 @@ where state_nonce: transaction.nonce(), bytecode_hash: None, transaction: ValidTransaction::Valid(transaction), - propagate: false, + propagate: self.propagate, authorities, } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 090f59169b0..490b41b9c78 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -76,7 +76,6 @@ use reth_eth_wire_types::HandleMempoolData; use reth_ethereum_primitives::{PooledTransactionVariant, TransactionSigned}; use reth_execution_types::ChangedAccount; use reth_primitives_traits::{Block, InMemorySize, Recovered, SealedBlock, SignedTransaction}; -#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ collections::{HashMap, HashSet}, @@ -282,7 +281,9 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { NewSubpoolTransactionStream::new(self.new_transactions_listener(), SubPool::Queued) } - /// Returns the _hashes_ of all transactions in the pool. + /// Returns the _hashes_ of all transactions in the pool that are allowed to be propagated. + /// + /// This excludes hashes that aren't allowed to be propagated. /// /// Note: This returns a `Vec` but should guarantee that all hashes are unique. /// @@ -294,7 +295,8 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { /// Consumer: P2P fn pooled_transaction_hashes_max(&self, max: usize) -> Vec; - /// Returns the _full_ transaction objects all transactions in the pool. + /// Returns the _full_ transaction objects all transactions in the pool that are allowed to be + /// propagated. /// /// This is intended to be used by the network for the initial exchange of pooled transaction /// _hashes_ @@ -314,7 +316,8 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { max: usize, ) -> Vec>>; - /// Returns converted [`PooledTransactionVariant`] for the given transaction hashes. + /// Returns converted [`PooledTransactionVariant`] for the given transaction hashes that are + /// allowed to be propagated. /// /// This adheres to the expected behavior of /// [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): @@ -403,6 +406,17 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { /// Consumer: RPC fn all_transactions(&self) -> AllPoolTransactions; + /// Returns the _hashes_ of all transactions regardless of whether they can be propagated or + /// not. + /// + /// Unlike [`Self::pooled_transaction_hashes`] this doesn't consider whether the transaction can + /// be propagated or not. + /// + /// Note: This returns a `Vec` but should guarantee that all hashes are unique. + /// + /// Consumer: Utility + fn all_transaction_hashes(&self) -> Vec; + /// Removes all transactions corresponding to the given hashes. /// /// Note: This removes the transactions as if they got discarded (_not_ mined). @@ -652,6 +666,11 @@ pub struct AllPoolTransactions { // === impl AllPoolTransactions === impl AllPoolTransactions { + /// Returns the combined number of all transactions. + pub const fn count(&self) -> usize { + self.pending.len() + self.queued.len() + } + /// Returns an iterator over all pending [`Recovered`] transactions. pub fn pending_recovered(&self) -> impl Iterator> + '_ { self.pending.iter().map(|tx| tx.transaction.clone().into_consensus()) @@ -737,7 +756,7 @@ pub struct NewBlobSidecar { /// /// Depending on where the transaction was picked up, it affects how the transaction is handled /// internally, e.g. limits for simultaneous transaction of one sender. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default, Deserialize, Serialize)] pub enum TransactionOrigin { /// Transaction is coming from a local source. #[default] diff --git a/crates/trie/common/src/input.rs b/crates/trie/common/src/input.rs index ecf9bab7eca..fff50fbb7b0 100644 --- a/crates/trie/common/src/input.rs +++ b/crates/trie/common/src/input.rs @@ -109,4 +109,17 @@ impl TrieInput { self.nodes.extend_ref(nodes); self.state.extend_ref(state); } + + /// This method clears the trie input nodes, state, and prefix sets. + pub fn clear(&mut self) { + self.nodes.clear(); + self.state.clear(); + self.prefix_sets.clear(); + } + + /// This method returns a cleared version of this trie input. + pub fn cleared(mut self) -> Self { + self.clear(); + self + } } diff --git a/crates/trie/common/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs index e1f4150dd25..c8d3ac74547 100644 --- a/crates/trie/common/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -45,6 +45,13 @@ impl TriePrefixSetsMut { destroyed_accounts: self.destroyed_accounts, } } + + /// Clears the prefix sets and destroyed accounts map. + pub fn clear(&mut self) { + self.destroyed_accounts.clear(); + self.storage_prefix_sets.clear(); + self.account_prefix_set.clear(); + } } /// Collection of trie prefix sets. @@ -134,12 +141,12 @@ impl PrefixSetMut { } /// Returns the number of elements in the set. - pub fn len(&self) -> usize { + pub const fn len(&self) -> usize { self.keys.len() } /// Returns `true` if the set is empty. - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.keys.is_empty() } diff --git a/crates/trie/parallel/src/lib.rs b/crates/trie/parallel/src/lib.rs index e3138d19a30..c04af264d18 100644 --- a/crates/trie/parallel/src/lib.rs +++ b/crates/trie/parallel/src/lib.rs @@ -25,3 +25,7 @@ pub mod proof_task; /// Parallel state root metrics. #[cfg(feature = "metrics")] pub mod metrics; + +/// Proof task manager metrics. +#[cfg(feature = "metrics")] +pub mod proof_task_metrics; diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 2e5813d55b0..d884a7b71a2 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -39,6 +39,9 @@ use std::{ use tokio::runtime::Handle; use tracing::debug; +#[cfg(feature = "metrics")] +use crate::proof_task_metrics::ProofTaskMetrics; + type StorageProofResult = Result; type TrieNodeProviderResult = Result, SparseTrieError>; @@ -70,6 +73,9 @@ pub struct ProofTaskManager { /// Incremented in [`ProofTaskManagerHandle::new`] and decremented in /// [`ProofTaskManagerHandle::drop`]. active_handles: Arc, + /// Metrics tracking blinded node fetches. + #[cfg(feature = "metrics")] + metrics: ProofTaskMetrics, } impl ProofTaskManager { @@ -95,6 +101,8 @@ impl ProofTaskManager { proof_task_rx, tx_sender, active_handles: Arc::new(AtomicUsize::new(0)), + #[cfg(feature = "metrics")] + metrics: ProofTaskMetrics::default(), } } @@ -167,6 +175,17 @@ where match self.proof_task_rx.recv() { Ok(message) => match message { ProofTaskMessage::QueueTask(task) => { + // Track metrics for blinded node requests + #[cfg(feature = "metrics")] + match &task { + ProofTaskKind::BlindedAccountNode(_, _) => { + self.metrics.account_nodes += 1; + } + ProofTaskKind::BlindedStorageNode(_, _, _) => { + self.metrics.storage_nodes += 1; + } + _ => {} + } // queue the task self.queue_proof_task(task) } @@ -174,7 +193,12 @@ where // return the transaction to the pool self.proof_task_txs.push(tx); } - ProofTaskMessage::Terminate => return Ok(()), + ProofTaskMessage::Terminate => { + // Record metrics before terminating + #[cfg(feature = "metrics")] + self.metrics.record(); + return Ok(()) + } }, // All senders are disconnected, so we can terminate // However this should never happen, as this struct stores a sender diff --git a/crates/trie/parallel/src/proof_task_metrics.rs b/crates/trie/parallel/src/proof_task_metrics.rs new file mode 100644 index 00000000000..cdb59d078d8 --- /dev/null +++ b/crates/trie/parallel/src/proof_task_metrics.rs @@ -0,0 +1,42 @@ +use reth_metrics::{metrics::Histogram, Metrics}; + +/// Metrics for blinded node fetching for the duration of the proof task manager. +#[derive(Clone, Debug, Default)] +pub struct ProofTaskMetrics { + /// The actual metrics for blinded nodes. + pub task_metrics: ProofTaskTrieMetrics, + /// Count of blinded account node requests. + pub account_nodes: usize, + /// Count of blinded storage node requests. + pub storage_nodes: usize, +} + +impl ProofTaskMetrics { + /// Record the blinded node counts into the histograms. + pub fn record(&self) { + self.task_metrics.record_account_nodes(self.account_nodes); + self.task_metrics.record_storage_nodes(self.storage_nodes); + } +} + +/// Metrics for the proof task. +#[derive(Clone, Metrics)] +#[metrics(scope = "trie.proof_task")] +pub struct ProofTaskTrieMetrics { + /// A histogram for the number of blinded account nodes fetched. + blinded_account_nodes: Histogram, + /// A histogram for the number of blinded storage nodes fetched. + blinded_storage_nodes: Histogram, +} + +impl ProofTaskTrieMetrics { + /// Record account nodes fetched. + pub fn record_account_nodes(&self, count: usize) { + self.blinded_account_nodes.record(count as f64); + } + + /// Record storage nodes fetched. + pub fn record_storage_nodes(&self, count: usize) { + self.blinded_storage_nodes.record(count as f64); + } +} diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index 408635a1f42..e48ea0503a2 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -163,7 +163,7 @@ where hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } TrieElement::Leaf(hashed_address, account) => { - let (storage_root, _, updates) = match storage_roots.remove(&hashed_address) { + let storage_root_result = match storage_roots.remove(&hashed_address) { Some(rx) => rx.recv().map_err(|_| { ParallelStateRootError::StorageRoot(StorageRootError::Database( DatabaseError::Other(format!( @@ -187,6 +187,17 @@ where } }; + let (storage_root, _, updates) = match storage_root_result { + reth_trie::StorageRootProgress::Complete(root, _, updates) => (root, (), updates), + reth_trie::StorageRootProgress::Progress(..) => { + return Err(ParallelStateRootError::StorageRoot( + StorageRootError::Database(DatabaseError::Other( + "StorageRoot returned Progress variant in parallel trie calculation".to_string() + )) + )) + } + }; + if retain_updates { trie_updates.insert_storage_updates(hashed_address, updates); } diff --git a/crates/trie/sparse-parallel/Cargo.toml b/crates/trie/sparse-parallel/Cargo.toml index 41f9ab9ab1f..9c62aabaddf 100644 --- a/crates/trie/sparse-parallel/Cargo.toml +++ b/crates/trie/sparse-parallel/Cargo.toml @@ -23,6 +23,10 @@ alloy-trie.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true +# metrics +reth-metrics = { workspace = true, optional = true } +metrics = { workspace = true, optional = true } + # misc smallvec.workspace = true rayon = { workspace = true, optional = true } @@ -47,7 +51,7 @@ rand.workspace = true rand_08.workspace = true [features] -default = ["std"] +default = ["std", "metrics"] std = [ "dep:rayon", "alloy-primitives/std", @@ -59,3 +63,8 @@ std = [ "reth-trie-sparse/std", "tracing/std", ] +metrics = [ + "dep:reth-metrics", + "dep:metrics", + "std", +] diff --git a/crates/trie/sparse-parallel/src/lib.rs b/crates/trie/sparse-parallel/src/lib.rs index c4b7b10ea51..f37b274e41c 100644 --- a/crates/trie/sparse-parallel/src/lib.rs +++ b/crates/trie/sparse-parallel/src/lib.rs @@ -9,3 +9,6 @@ pub use trie::*; mod lower; use lower::*; + +#[cfg(feature = "metrics")] +mod metrics; diff --git a/crates/trie/sparse-parallel/src/metrics.rs b/crates/trie/sparse-parallel/src/metrics.rs new file mode 100644 index 00000000000..892c8fbe2ae --- /dev/null +++ b/crates/trie/sparse-parallel/src/metrics.rs @@ -0,0 +1,23 @@ +//! Metrics for the parallel sparse trie +use reth_metrics::{metrics::Histogram, Metrics}; + +/// Metrics for the parallel sparse trie +#[derive(Metrics, Clone)] +#[metrics(scope = "parallel_sparse_trie")] +pub(crate) struct ParallelSparseTrieMetrics { + /// A histogram for the number of subtries updated when calculating hashes. + pub(crate) subtries_updated: Histogram, + /// A histogram for the time it took to update lower subtrie hashes. + pub(crate) subtrie_hash_update_latency: Histogram, + /// A histogram for the time it took to update the upper subtrie hashes. + pub(crate) subtrie_upper_hash_latency: Histogram, +} + +impl PartialEq for ParallelSparseTrieMetrics { + fn eq(&self, _other: &Self) -> bool { + // It does not make sense to compare metrics, so return true, all are equal + true + } +} + +impl Eq for ParallelSparseTrieMetrics {} diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index ffc40ded86b..7c1f8a02bc9 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -32,6 +32,60 @@ pub const NUM_LOWER_SUBTRIES: usize = 16usize.pow(UPPER_TRIE_MAX_DEPTH as u32); /// A revealed sparse trie with subtries that can be updated in parallel. /// +/// ## Structure +/// +/// The trie is divided into two tiers for efficient parallel processing: +/// - **Upper subtrie**: Contains nodes with paths shorter than [`UPPER_TRIE_MAX_DEPTH`] +/// - **Lower subtries**: An array of [`NUM_LOWER_SUBTRIES`] subtries, each handling nodes with +/// paths of at least [`UPPER_TRIE_MAX_DEPTH`] nibbles +/// +/// Node placement is determined by path depth: +/// - Paths with < [`UPPER_TRIE_MAX_DEPTH`] nibbles go to the upper subtrie +/// - Paths with >= [`UPPER_TRIE_MAX_DEPTH`] nibbles go to lower subtries, indexed by their first +/// [`UPPER_TRIE_MAX_DEPTH`] nibbles. +/// +/// Each lower subtrie tracks its root via the `path` field, which represents the shortest path +/// in that subtrie. This path will have at least [`UPPER_TRIE_MAX_DEPTH`] nibbles, but may be +/// longer when an extension node in the upper trie "reaches into" the lower subtrie. For example, +/// if the upper trie has an extension from `0x1` to `0x12345`, then the lower subtrie for prefix +/// `0x12` will have its root at path `0x12345` rather than at `0x12`. +/// +/// ## Node Revealing +/// +/// The trie uses lazy loading to efficiently handle large state tries. Nodes can be: +/// - **Blind nodes**: Stored as hashes ([`SparseNode::Hash`]), representing unloaded trie parts +/// - **Revealed nodes**: Fully loaded nodes (Branch, Extension, Leaf) with complete structure +/// +/// Note: An empty trie contains an `EmptyRoot` node at the root path, rather than no nodes at all. +/// A trie with no nodes is blinded, its root may be `EmptyRoot` or some other node type. +/// +/// Revealing is generally done using pre-loaded node data provided to via `reveal_nodes`. In +/// certain cases, such as edge-cases when updating/removing leaves, nodes are revealed on-demand. +/// +/// ## Leaf Operations +/// +/// **Update**: When updating a leaf, the new value is stored in the appropriate subtrie's values +/// map. If the leaf is new, the trie structure is updated by walking to the leaf from the root, +/// creating necessary intermediate branch nodes. +/// +/// **Removal**: Leaf removal may require parent node modifications. The algorithm walks up the +/// trie, removing nodes that become empty and converting single-child branches to extensions. +/// +/// During leaf operations the overall structure of the trie may change, causing nodes to be moved +/// from the upper to lower trie or vice-versa. +/// +/// The `prefix_set` is modified during both leaf updates and removals to track changed leaf paths. +/// +/// ## Root Hash Calculation +/// +/// Root hash computation follows a bottom-up approach: +/// 1. Update hashes for all modified lower subtries (can be done in parallel) +/// 2. Update hashes for the upper subtrie (which may reference lower subtrie hashes) +/// 3. Calculate the final root hash from the upper subtrie's root node +/// +/// The `prefix_set` tracks which paths have been modified, enabling incremental updates instead of +/// recalculating the entire trie. +/// /// ## Invariants /// /// - Each leaf entry in the `subtries` and `upper_trie` collection must have a corresponding entry @@ -55,6 +109,9 @@ pub struct ParallelSparseTrie { /// Reusable buffer pool used for collecting [`SparseTrieUpdatesAction`]s during hash /// computations. update_actions_buffers: Vec>, + /// Metrics for the parallel sparse trie. + #[cfg(feature = "metrics")] + metrics: crate::metrics::ParallelSparseTrieMetrics, } impl Default for ParallelSparseTrie { @@ -70,6 +127,8 @@ impl Default for ParallelSparseTrie { branch_node_tree_masks: HashMap::default(), branch_node_hash_masks: HashMap::default(), update_actions_buffers: Vec::default(), + #[cfg(feature = "metrics")] + metrics: Default::default(), } } } @@ -664,6 +723,10 @@ impl SparseTrieInterface for ParallelSparseTrie { let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); let (subtries, unchanged_prefix_set) = self.take_changed_lower_subtries(&mut prefix_set); + // update metrics + #[cfg(feature = "metrics")] + self.metrics.subtries_updated.record(subtries.len() as f64); + // Update the prefix set with the keys that didn't have matching subtries self.prefix_set = unchanged_prefix_set; @@ -698,12 +761,16 @@ impl SparseTrieInterface for ParallelSparseTrie { mut prefix_set, mut update_actions_buf, }| { + #[cfg(feature = "metrics")] + let start = std::time::Instant::now(); subtrie.update_hashes( &mut prefix_set, &mut update_actions_buf, branch_node_tree_masks, branch_node_hash_masks, ); + #[cfg(feature = "metrics")] + self.metrics.subtrie_hash_update_latency.record(start.elapsed()); (index, subtrie, update_actions_buf) }, ) @@ -1164,6 +1231,9 @@ impl ParallelSparseTrie { is_in_prefix_set: None, }); + #[cfg(feature = "metrics")] + let start = std::time::Instant::now(); + let mut update_actions_buf = self.updates_enabled().then(|| self.update_actions_buffers.pop().unwrap_or_default()); @@ -1209,6 +1279,9 @@ impl ParallelSparseTrie { self.update_actions_buffers.push(update_actions_buf); } + #[cfg(feature = "metrics")] + self.metrics.subtrie_upper_hash_latency.record(start.elapsed()); + debug_assert_eq!(self.upper_subtrie.inner.buffers.rlp_node_stack.len(), 1); self.upper_subtrie.inner.buffers.rlp_node_stack.pop().unwrap().rlp_node } @@ -2381,7 +2454,7 @@ fn path_subtrie_index_unchecked(path: &Nibbles) -> usize { path.get_byte_unchecked(0) as usize } -/// Used by lower subtries to communicate updates to the the top-level [`SparseTrieUpdates`] set. +/// Used by lower subtries to communicate updates to the top-level [`SparseTrieUpdates`] set. #[derive(Clone, Debug, Eq, PartialEq)] enum SparseTrieUpdatesAction { /// Remove the path from the `updated_nodes`, if it was present, and add it to `removed_nodes`. diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 8b40a72da2a..6fac7c5faad 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -25,6 +25,7 @@ alloy-rlp.workspace = true # misc auto_impl.workspace = true +rayon = { workspace = true, optional = true } smallvec = { workspace = true, features = ["const_new"] } # metrics @@ -54,6 +55,7 @@ rand_08.workspace = true [features] default = ["std", "metrics"] std = [ + "dep:rayon", "alloy-primitives/std", "alloy-rlp/std", "alloy-trie/std", diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 0739d6946a3..c7c214a894c 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -37,9 +37,8 @@ where /// [`SparseStateTrie`] and then storing that instance for later re-use. pub fn from_state_trie(mut trie: SparseStateTrie) -> Self { trie.state = trie.state.clear(); - trie.cleared_storages.extend(trie.storages.drain().map(|(_, trie)| trie.clear())); trie.revealed_account_paths.clear(); - trie.revealed_storage_paths.clear(); + trie.storage.clear(); trie.account_rlp_buf.clear(); Self(trie) } @@ -58,14 +57,10 @@ pub struct SparseStateTrie< > { /// Sparse account trie. state: SparseTrie, - /// Sparse storage tries. - storages: B256Map>, - /// Cleared storage tries, kept for re-use - cleared_storages: Vec>, /// Collection of revealed account trie paths. revealed_account_paths: HashSet, - /// Collection of revealed storage trie paths, per account. - revealed_storage_paths: B256Map>, + /// State related to storage tries. + storage: StorageTries, /// Flag indicating whether trie updates should be retained. retain_updates: bool, /// Reusable buffer for RLP encoding of trie accounts. @@ -83,10 +78,8 @@ where fn default() -> Self { Self { state: Default::default(), - storages: Default::default(), - cleared_storages: Default::default(), revealed_account_paths: Default::default(), - revealed_storage_paths: Default::default(), + storage: Default::default(), retain_updates: false, account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), #[cfg(feature = "metrics")] @@ -156,7 +149,8 @@ where /// Returns `true` if storage slot for account was already revealed. pub fn is_storage_slot_revealed(&self, account: B256, slot: B256) -> bool { - self.revealed_storage_paths + self.storage + .revealed_paths .get(&account) .is_some_and(|slots| slots.contains(&Nibbles::unpack(slot))) } @@ -168,7 +162,7 @@ where /// Returns reference to bytes representing leaf value for the target account and storage slot. pub fn get_storage_slot_value(&self, account: &B256, slot: &B256) -> Option<&Vec> { - self.storages.get(account)?.as_revealed_ref()?.get_leaf_value(&Nibbles::unpack(slot)) + self.storage.tries.get(account)?.as_revealed_ref()?.get_leaf_value(&Nibbles::unpack(slot)) } /// Returns reference to state trie if it was revealed. @@ -178,32 +172,22 @@ where /// Returns reference to storage trie if it was revealed. pub fn storage_trie_ref(&self, address: &B256) -> Option<&S> { - self.storages.get(address).and_then(|e| e.as_revealed_ref()) + self.storage.tries.get(address).and_then(|e| e.as_revealed_ref()) } /// Returns mutable reference to storage sparse trie if it was revealed. pub fn storage_trie_mut(&mut self, address: &B256) -> Option<&mut S> { - self.storages.get_mut(address).and_then(|e| e.as_revealed_mut()) + self.storage.tries.get_mut(address).and_then(|e| e.as_revealed_mut()) } /// Takes the storage trie for the provided address. pub fn take_storage_trie(&mut self, address: &B256) -> Option> { - self.storages.remove(address) + self.storage.tries.remove(address) } /// Inserts storage trie for the provided address. pub fn insert_storage_trie(&mut self, address: B256, storage_trie: SparseTrie) { - self.storages.insert(address, storage_trie); - } - - /// Retrieves the storage trie for the given address, creating a new one if it doesn't exist. - /// - /// This method should always be used to create a storage trie, as it will re-use previously - /// allocated and cleared storage tries when possible. - fn get_or_create_storage_trie(&mut self, address: B256) -> &mut SparseTrie { - self.storages - .entry(address) - .or_insert_with(|| self.cleared_storages.pop().unwrap_or_default()) + self.storage.tries.insert(address, storage_trie); } /// Reveal unknown trie paths from multiproof. @@ -236,12 +220,71 @@ where branch_node_tree_masks, )?; - // then reveal storage proof nodes for each storage trie - for (account, storage_subtree) in storages { - self.reveal_decoded_storage_multiproof(account, storage_subtree)?; + #[cfg(not(feature = "std"))] + // If nostd then serially reveal storage proof nodes for each storage trie + { + for (account, storage_subtree) in storages { + self.reveal_decoded_storage_multiproof(account, storage_subtree)?; + } + + Ok(()) } - Ok(()) + #[cfg(feature = "std")] + // If std then reveal storage proofs in parallel + { + use rayon::iter::{ParallelBridge, ParallelIterator}; + + let (tx, rx) = std::sync::mpsc::channel(); + let retain_updates = self.retain_updates; + + // Process all storage trie revealings in parallel, having first removed the + // `reveal_nodes` tracking and `SparseTrie`s for each account from their HashMaps. + // These will be returned after processing. + storages + .into_iter() + .map(|(account, storage_subtree)| { + let revealed_nodes = self.storage.take_or_create_revealed_paths(&account); + let trie = self.storage.take_or_create_trie(&account); + (account, storage_subtree, revealed_nodes, trie) + }) + .par_bridge() + .map(|(account, storage_subtree, mut revealed_nodes, mut trie)| { + let result = Self::reveal_decoded_storage_multiproof_inner( + account, + storage_subtree, + &mut revealed_nodes, + &mut trie, + retain_updates, + ); + + (account, revealed_nodes, trie, result) + }) + .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); + + drop(tx); + + // Return `revealed_nodes` and `SparseTrie` for each account, incrementing metrics and + // returning the last error seen if any. + let mut any_err = Ok(()); + for (account, revealed_nodes, trie, result) in rx { + self.storage.revealed_paths.insert(account, revealed_nodes); + self.storage.tries.insert(account, trie); + if let Ok(_metric_values) = result { + #[cfg(feature = "metrics")] + { + self.metrics + .increment_total_storage_nodes(_metric_values.total_nodes as u64); + self.metrics + .increment_skipped_storage_nodes(_metric_values.skipped_nodes as u64); + } + } else { + any_err = result.map(|_| ()); + } + } + + any_err + } } /// Reveals an account multiproof. @@ -267,22 +310,17 @@ where branch_node_hash_masks: HashMap, branch_node_tree_masks: HashMap, ) -> SparseStateTrieResult<()> { - let FilterMappedProofNodes { - root_node, - nodes, - new_nodes, - total_nodes: _total_nodes, - skipped_nodes: _skipped_nodes, - } = filter_map_revealed_nodes( - account_subtree, - &mut self.revealed_account_paths, - &branch_node_hash_masks, - &branch_node_tree_masks, - )?; + let FilterMappedProofNodes { root_node, nodes, new_nodes, metric_values: _metric_values } = + filter_map_revealed_nodes( + account_subtree, + &mut self.revealed_account_paths, + &branch_node_hash_masks, + &branch_node_tree_masks, + )?; #[cfg(feature = "metrics")] { - self.metrics.increment_total_account_nodes(_total_nodes as u64); - self.metrics.increment_skipped_account_nodes(_skipped_nodes as u64); + self.metrics.increment_total_account_nodes(_metric_values.total_nodes as u64); + self.metrics.increment_skipped_account_nodes(_metric_values.skipped_nodes as u64); } if let Some(root_node) = root_node { @@ -319,35 +357,45 @@ where account: B256, storage_subtree: DecodedStorageMultiProof, ) -> SparseStateTrieResult<()> { - let revealed_nodes = self.revealed_storage_paths.entry(account).or_default(); - - let FilterMappedProofNodes { - root_node, - nodes, - new_nodes, - total_nodes: _total_nodes, - skipped_nodes: _skipped_nodes, - } = filter_map_revealed_nodes( - storage_subtree.subtree, - revealed_nodes, - &storage_subtree.branch_node_hash_masks, - &storage_subtree.branch_node_tree_masks, + let (trie, revealed_paths) = self.storage.get_trie_and_revealed_paths_mut(account); + let _metric_values = Self::reveal_decoded_storage_multiproof_inner( + account, + storage_subtree, + revealed_paths, + trie, + self.retain_updates, )?; + #[cfg(feature = "metrics")] { - self.metrics.increment_total_storage_nodes(_total_nodes as u64); - self.metrics.increment_skipped_storage_nodes(_skipped_nodes as u64); + self.metrics.increment_total_storage_nodes(_metric_values.total_nodes as u64); + self.metrics.increment_skipped_storage_nodes(_metric_values.skipped_nodes as u64); } + Ok(()) + } + + /// Reveals a decoded storage multiproof for the given address. This is internal static function + /// is designed to handle a variety of associated public functions. + fn reveal_decoded_storage_multiproof_inner( + account: B256, + storage_subtree: DecodedStorageMultiProof, + revealed_nodes: &mut HashSet, + trie: &mut SparseTrie, + retain_updates: bool, + ) -> SparseStateTrieResult { + let FilterMappedProofNodes { root_node, nodes, new_nodes, metric_values } = + filter_map_revealed_nodes( + storage_subtree.subtree, + revealed_nodes, + &storage_subtree.branch_node_hash_masks, + &storage_subtree.branch_node_tree_masks, + )?; + if let Some(root_node) = root_node { // Reveal root node if it wasn't already. trace!(target: "trie::sparse", ?account, ?root_node, "Revealing root storage node"); - let retain_updates = self.retain_updates; - let trie = self.get_or_create_storage_trie(account).reveal_root( - root_node.node, - root_node.masks, - retain_updates, - )?; + let trie = trie.reveal_root(root_node.node, root_node.masks, retain_updates)?; // Reserve the capacity for new nodes ahead of time, if the trie implementation // supports doing so. @@ -357,7 +405,7 @@ where trie.reveal_nodes(nodes)?; } - Ok(()) + Ok(metric_values) } /// Reveal state witness with the given state root. @@ -417,12 +465,15 @@ where if let Some(account) = maybe_account { // Check that the path was not already revealed. if self - .revealed_storage_paths + .storage + .revealed_paths .get(&account) .is_none_or(|paths| !paths.contains(&path)) { let retain_updates = self.retain_updates; - let storage_trie_entry = self.get_or_create_storage_trie(account); + let (storage_trie_entry, revealed_storage_paths) = + self.storage.get_trie_and_revealed_paths_mut(account); + if path.is_empty() { // Handle special storage state root node case. storage_trie_entry.reveal_root( @@ -439,7 +490,7 @@ where } // Track the revealed path. - self.revealed_storage_paths.entry(account).or_default().insert(path); + revealed_storage_paths.insert(path); } } // Check that the path was not already revealed. @@ -466,7 +517,7 @@ where /// Wipe the storage trie at the provided address. pub fn wipe_storage(&mut self, address: B256) -> SparseStateTrieResult<()> { - if let Some(trie) = self.storages.get_mut(&address) { + if let Some(trie) = self.storage.tries.get_mut(&address) { trie.wipe()?; } Ok(()) @@ -483,7 +534,7 @@ where /// Returns storage sparse trie root if the trie has been revealed. pub fn storage_root(&mut self, account: B256) -> Option { - self.storages.get_mut(&account).and_then(|trie| trie.root()) + self.storage.tries.get_mut(&account).and_then(|trie| trie.root()) } /// Returns mutable reference to the revealed account sparse trie. @@ -551,7 +602,8 @@ where /// /// Panics if any of the storage tries are not revealed. pub fn storage_trie_updates(&mut self) -> B256Map { - self.storages + self.storage + .tries .iter_mut() .map(|(address, trie)| { let trie = trie.as_revealed_mut().unwrap(); @@ -598,7 +650,7 @@ where Ok(()) } - /// Update the leaf node of a storage trie at the provided address. + /// Update the leaf node of a revealed storage trie at the provided address. pub fn update_storage_leaf( &mut self, address: B256, @@ -606,14 +658,13 @@ where value: Vec, provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<()> { - if !self.revealed_storage_paths.get(&address).is_some_and(|slots| slots.contains(&slot)) { - self.revealed_storage_paths.entry(address).or_default().insert(slot); - } - - let storage_trie = self.storages.get_mut(&address).ok_or(SparseTrieErrorKind::Blind)?; - let provider = provider_factory.storage_node_provider(address); - storage_trie.update_leaf(slot, value, provider)?; + self.storage + .tries + .get_mut(&address) + .ok_or(SparseTrieErrorKind::Blind)? + .update_leaf(slot, value, provider)?; + self.storage.get_revealed_paths_mut(address).insert(slot); Ok(()) } @@ -629,7 +680,7 @@ where ) -> SparseStateTrieResult<()> { let nibbles = Nibbles::unpack(address); - let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { + let storage_root = if let Some(storage_trie) = self.storage.tries.get_mut(&address) { trace!(target: "trie::sparse", ?address, "Calculating storage root to update account"); storage_trie.root().ok_or(SparseTrieErrorKind::Blind)? } else if self.is_account_revealed(address) { @@ -684,7 +735,7 @@ where // Calculate the new storage root. If the storage trie doesn't exist, the storage root will // be empty. - let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { + let storage_root = if let Some(storage_trie) = self.storage.tries.get_mut(&address) { trace!(target: "trie::sparse", ?address, "Calculating storage root to update account"); storage_trie.root().ok_or(SparseTrieErrorKind::Blind)? } else { @@ -728,7 +779,8 @@ where slot: &Nibbles, provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<()> { - let storage_trie = self.storages.get_mut(&address).ok_or(SparseTrieErrorKind::Blind)?; + let storage_trie = + self.storage.tries.get_mut(&address).ok_or(SparseTrieErrorKind::Blind)?; let provider = provider_factory.storage_node_provider(address); storage_trie.remove_leaf(slot, provider)?; @@ -736,6 +788,84 @@ where } } +/// The fields of [`SparseStateTrie`] related to storage tries. This is kept separate from the rest +/// of [`SparseStateTrie`] both to help enforce allocation re-use and to allow us to implement +/// methods like `get_trie_and_revealed_paths` which return multiple mutable borrows. +#[derive(Debug, Default)] +struct StorageTries { + /// Sparse storage tries. + tries: B256Map>, + /// Cleared storage tries, kept for re-use. + cleared_tries: Vec>, + /// Collection of revealed storage trie paths, per account. + revealed_paths: B256Map>, + /// Cleared revealed storage trie path collections, kept for re-use. + cleared_revealed_paths: Vec>, +} + +impl StorageTries { + /// Returns all fields to a cleared state, equivalent to the default state, keeping cleared + /// collections for re-use later when possible. + fn clear(&mut self) { + self.cleared_tries.extend(self.tries.drain().map(|(_, trie)| trie.clear())); + self.cleared_revealed_paths.extend(self.revealed_paths.drain().map(|(_, mut set)| { + set.clear(); + set + })); + } + + /// Returns the set of already revealed trie node paths for an account's storage, creating the + /// set if it didn't previously exist. + fn get_revealed_paths_mut(&mut self, account: B256) -> &mut HashSet { + self.revealed_paths + .entry(account) + .or_insert_with(|| self.cleared_revealed_paths.pop().unwrap_or_default()) + } + + /// Returns the `SparseTrie` and the set of already revealed trie node paths for an account's + /// storage, creating them if they didn't previously exist. + fn get_trie_and_revealed_paths_mut( + &mut self, + account: B256, + ) -> (&mut SparseTrie, &mut HashSet) { + let trie = self + .tries + .entry(account) + .or_insert_with(|| self.cleared_tries.pop().unwrap_or_default()); + + let revealed_paths = self + .revealed_paths + .entry(account) + .or_insert_with(|| self.cleared_revealed_paths.pop().unwrap_or_default()); + + (trie, revealed_paths) + } + + /// Takes the storage trie for the account from the internal `HashMap`, creating it if it + /// doesn't already exist. + #[cfg(feature = "std")] + fn take_or_create_trie(&mut self, account: &B256) -> SparseTrie { + self.tries.remove(account).unwrap_or_else(|| self.cleared_tries.pop().unwrap_or_default()) + } + + /// Takes the revealed paths set from the account from the internal `HashMap`, creating one if + /// it doesn't exist. + #[cfg(feature = "std")] + fn take_or_create_revealed_paths(&mut self, account: &B256) -> HashSet { + self.revealed_paths + .remove(account) + .unwrap_or_else(|| self.cleared_revealed_paths.pop().unwrap_or_default()) + } +} + +#[derive(Debug, PartialEq, Eq, Default)] +struct ProofNodesMetricValues { + /// Number of nodes in the proof. + total_nodes: usize, + /// Number of nodes that were skipped because they were already revealed. + skipped_nodes: usize, +} + /// Result of [`filter_map_revealed_nodes`]. #[derive(Debug, PartialEq, Eq)] struct FilterMappedProofNodes { @@ -743,13 +873,11 @@ struct FilterMappedProofNodes { root_node: Option, /// Filtered, decoded and unsorted proof nodes. Root node is removed. nodes: Vec, - /// Number of nodes in the proof. - total_nodes: usize, - /// Number of nodes that were skipped because they were already revealed. - skipped_nodes: usize, /// Number of new nodes that will be revealed. This includes all children of branch nodes, even /// if they are not in the proof. new_nodes: usize, + /// Values which are being returned so they can be incremented into metrics. + metric_values: ProofNodesMetricValues, } /// Filters the decoded nodes that are already revealed, maps them to `RevealedSparseNodes`, @@ -764,21 +892,20 @@ fn filter_map_revealed_nodes( let mut result = FilterMappedProofNodes { root_node: None, nodes: Vec::with_capacity(proof_nodes.len()), - total_nodes: 0, - skipped_nodes: 0, new_nodes: 0, + metric_values: Default::default(), }; let proof_nodes_len = proof_nodes.len(); for (path, proof_node) in proof_nodes.into_inner() { - result.total_nodes += 1; + result.metric_values.total_nodes += 1; let is_root = path.is_empty(); // If the node is already revealed, skip it. We don't ever skip the root node, nor do we add // it to `revealed_nodes`. if !is_root && !revealed_nodes.insert(path) { - result.skipped_nodes += 1; + result.metric_values.skipped_nodes += 1; continue } @@ -1190,12 +1317,15 @@ mod tests { node: leaf, masks: TrieMasks::none(), }], - // Branch, leaf, leaf - total_nodes: 3, - // Revealed leaf node with path 0x1 - skipped_nodes: 1, // Branch, two of its children, one leaf - new_nodes: 4 + new_nodes: 4, + // Metric values + metric_values: ProofNodesMetricValues { + // Branch, leaf, leaf + total_nodes: 3, + // Revealed leaf node with path 0x1 + skipped_nodes: 1, + }, } ); } diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index 356f3dac93f..8accd447105 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -41,7 +41,10 @@ pub use trie::{StateRoot, StorageRoot, TrieType}; /// Utilities for state root checkpoint progress. mod progress; -pub use progress::{IntermediateStateRootState, StateRootProgress}; +pub use progress::{ + IntermediateStateRootState, IntermediateStorageRootState, StateRootProgress, + StorageRootProgress, +}; /// Trie calculation stats. pub mod stats; diff --git a/crates/trie/trie/src/progress.rs b/crates/trie/trie/src/progress.rs index 25195b48adb..1eab18318f2 100644 --- a/crates/trie/trie/src/progress.rs +++ b/crates/trie/trie/src/progress.rs @@ -1,34 +1,90 @@ -use crate::{hash_builder::HashBuilder, trie_cursor::CursorSubNode, updates::TrieUpdates}; +use crate::{ + hash_builder::HashBuilder, + trie_cursor::CursorSubNode, + updates::{StorageTrieUpdates, TrieUpdates}, +}; use alloy_primitives::B256; +use reth_primitives_traits::Account; use reth_stages_types::MerkleCheckpoint; /// The progress of the state root computation. #[derive(Debug)] pub enum StateRootProgress { - /// The complete state root computation with updates and computed root. + /// The complete state root computation with updates, the total number of entries walked, and + /// the computed root. Complete(B256, usize, TrieUpdates), /// The intermediate progress of state root computation. - /// Contains the walker stack, the hash builder and the trie updates. + /// Contains the walker stack, the hash builder, and the trie updates. + /// + /// Also contains any progress in an inner storage root computation. Progress(Box, usize, TrieUpdates), } /// The intermediate state of the state root computation. #[derive(Debug)] pub struct IntermediateStateRootState { - /// Previously constructed hash builder. - pub hash_builder: HashBuilder, - /// Previously recorded walker stack. - pub walker_stack: Vec, - /// The last hashed account key processed. - pub last_account_key: B256, + /// The intermediate account root state. + pub account_root_state: IntermediateRootState, + /// The intermediate storage root state with account data. + pub storage_root_state: Option, +} + +/// The intermediate state of a storage root computation along with the account. +#[derive(Debug)] +pub struct IntermediateStorageRootState { + /// The intermediate storage trie state. + pub state: IntermediateRootState, + /// The account for which the storage root is being computed. + pub account: Account, } impl From for IntermediateStateRootState { fn from(value: MerkleCheckpoint) -> Self { Self { - hash_builder: HashBuilder::from(value.state), - walker_stack: value.walker_stack.into_iter().map(CursorSubNode::from).collect(), - last_account_key: value.last_account_key, + account_root_state: IntermediateRootState { + hash_builder: HashBuilder::from(value.state), + walker_stack: value.walker_stack.into_iter().map(CursorSubNode::from).collect(), + last_hashed_key: value.last_account_key, + }, + storage_root_state: value.storage_root_checkpoint.map(|checkpoint| { + IntermediateStorageRootState { + state: IntermediateRootState { + hash_builder: HashBuilder::from(checkpoint.state), + walker_stack: checkpoint + .walker_stack + .into_iter() + .map(CursorSubNode::from) + .collect(), + last_hashed_key: checkpoint.last_storage_key, + }, + account: Account { + nonce: checkpoint.account_nonce, + balance: checkpoint.account_balance, + bytecode_hash: Some(checkpoint.account_bytecode_hash), + }, + } + }), } } } + +/// The intermediate state of a state root computation, whether account or storage root. +#[derive(Debug)] +pub struct IntermediateRootState { + /// Previously constructed hash builder. + pub hash_builder: HashBuilder, + /// Previously recorded walker stack. + pub walker_stack: Vec, + /// The last hashed key processed. + pub last_hashed_key: B256, +} + +/// The progress of a storage root calculation. +#[derive(Debug)] +pub enum StorageRootProgress { + /// The complete storage root computation with updates and computed root. + Complete(B256, usize, StorageTrieUpdates), + /// The intermediate progress of state root computation. + /// Contains the walker stack, the hash builder, and the trie updates. + Progress(Box, usize, StorageTrieUpdates), +} diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index c4e3dfcb477..f0ce3aac7cf 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -1,10 +1,13 @@ use crate::{ - hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, + hashed_cursor::{HashedCursor, HashedCursorFactory, HashedStorageCursor}, node_iter::{TrieElement, TrieNodeIter}, prefix_set::{PrefixSet, TriePrefixSets}, - progress::{IntermediateStateRootState, StateRootProgress}, + progress::{ + IntermediateRootState, IntermediateStateRootState, IntermediateStorageRootState, + StateRootProgress, StorageRootProgress, + }, stats::TrieTracker, - trie_cursor::TrieCursorFactory, + trie_cursor::{TrieCursor, TrieCursorFactory}, updates::{StorageTrieUpdates, TrieUpdates}, walker::TrieWalker, HashBuilder, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, @@ -13,7 +16,12 @@ use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{keccak256, Address, B256}; use alloy_rlp::{BufMut, Encodable}; use reth_execution_errors::{StateRootError, StorageRootError}; -use tracing::{trace, trace_span}; +use reth_primitives_traits::Account; +use tracing::{debug, trace, trace_span}; + +/// The default updates after which root algorithms should return intermediate progress rather than +/// finishing the computation. +const DEFAULT_INTERMEDIATE_THRESHOLD: u64 = 100_000; #[cfg(feature = "metrics")] use crate::metrics::{StateRootMetrics, TrieRootMetrics}; @@ -48,7 +56,7 @@ impl StateRoot { hashed_cursor_factory, prefix_sets: TriePrefixSets::default(), previous_state: None, - threshold: 100_000, + threshold: DEFAULT_INTERMEDIATE_THRESHOLD, #[cfg(feature = "metrics")] metrics: StateRootMetrics::default(), } @@ -117,7 +125,7 @@ where /// /// # Returns /// - /// The intermediate progress of state root computation and the trie updates. + /// The state root and the trie updates. pub fn root_with_updates(self) -> Result<(B256, TrieUpdates), StateRootError> { match self.with_no_threshold().calculate(true)? { StateRootProgress::Complete(root, _, updates) => Ok((root, updates)), @@ -151,37 +159,90 @@ where fn calculate(self, retain_updates: bool) -> Result { trace!(target: "trie::state_root", "calculating state root"); let mut tracker = TrieTracker::default(); - let mut trie_updates = TrieUpdates::default(); let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?; - let hashed_account_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; - let (mut hash_builder, mut account_node_iter) = match self.previous_state { - Some(state) => { - let hash_builder = state.hash_builder.with_updates(retain_updates); - let walker = TrieWalker::state_trie_from_stack( - trie_cursor, - state.walker_stack, - self.prefix_sets.account_prefix_set, + + // create state root context once for reuse + let mut storage_ctx = StateRootContext::new(); + + // first handle any in-progress storage root calculation + let (mut hash_builder, mut account_node_iter) = if let Some(state) = self.previous_state { + let IntermediateStateRootState { account_root_state, storage_root_state } = state; + + // resume account trie iteration + let mut hash_builder = account_root_state.hash_builder.with_updates(retain_updates); + let walker = TrieWalker::state_trie_from_stack( + trie_cursor, + account_root_state.walker_stack, + self.prefix_sets.account_prefix_set, + ) + .with_deletions_retained(retain_updates); + let account_node_iter = TrieNodeIter::state_trie(walker, hashed_account_cursor) + .with_last_hashed_key(account_root_state.last_hashed_key); + + // if we have an in-progress storage root, complete it first + if let Some(storage_state) = storage_root_state { + let hashed_address = account_root_state.last_hashed_key; + let account = storage_state.account; + + debug!( + target: "trie::state_root", + account_nonce = account.nonce, + account_balance = ?account.balance, + last_hashed_key = ?account_root_state.last_hashed_key, + "Resuming storage root calculation" + ); + + // resume the storage root calculation + let remaining_threshold = self.threshold.saturating_sub( + storage_ctx.total_updates_len(&account_node_iter, &hash_builder), + ); + + let storage_root_calculator = StorageRoot::new_hashed( + self.trie_cursor_factory.clone(), + self.hashed_cursor_factory.clone(), + hashed_address, + self.prefix_sets + .storage_prefix_sets + .get(&hashed_address) + .cloned() + .unwrap_or_default(), + #[cfg(feature = "metrics")] + self.metrics.storage_trie.clone(), ) - .with_deletions_retained(retain_updates); - let node_iter = TrieNodeIter::state_trie(walker, hashed_account_cursor) - .with_last_hashed_key(state.last_account_key); - (hash_builder, node_iter) - } - None => { - let hash_builder = HashBuilder::default().with_updates(retain_updates); - let walker = - TrieWalker::state_trie(trie_cursor, self.prefix_sets.account_prefix_set) - .with_deletions_retained(retain_updates); - let node_iter = TrieNodeIter::state_trie(walker, hashed_account_cursor); - (hash_builder, node_iter) + .with_intermediate_state(Some(storage_state.state)) + .with_threshold(remaining_threshold); + + let storage_result = storage_root_calculator.calculate(retain_updates)?; + if let Some(storage_state) = storage_ctx.process_storage_root_result( + storage_result, + hashed_address, + account, + &mut hash_builder, + retain_updates, + )? { + // still in progress, need to pause again + return Ok(storage_ctx.create_progress_state( + account_node_iter, + hash_builder, + account_root_state.last_hashed_key, + Some(storage_state), + )) + } } + + (hash_builder, account_node_iter) + } else { + // no intermediate state, create new hash builder and node iter for state root + // calculation + let hash_builder = HashBuilder::default().with_updates(retain_updates); + let walker = TrieWalker::state_trie(trie_cursor, self.prefix_sets.account_prefix_set) + .with_deletions_retained(retain_updates); + let node_iter = TrieNodeIter::state_trie(walker, hashed_account_cursor); + (hash_builder, node_iter) }; - let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); - let mut hashed_entries_walked = 0; - let mut updated_storage_nodes = 0; while let Some(node) = account_node_iter.try_next()? { match node { TrieElement::Branch(node) => { @@ -190,15 +251,14 @@ where } TrieElement::Leaf(hashed_address, account) => { tracker.inc_leaf(); - hashed_entries_walked += 1; + storage_ctx.hashed_entries_walked += 1; + + // calculate storage root, calculating the remaining threshold so we have + // bounded memory usage even while in the middle of storage root calculation + let remaining_threshold = self.threshold.saturating_sub( + storage_ctx.total_updates_len(&account_node_iter, &hash_builder), + ); - // We assume we can always calculate a storage root without - // OOMing. This opens us up to a potential DOS vector if - // a contract had too many storage entries and they were - // all buffered w/o us returning and committing our intermediate - // progress. - // TODO: We can consider introducing the TrieProgress::Progress/Complete - // abstraction inside StorageRoot, but let's give it a try as-is for now. let storage_root_calculator = StorageRoot::new_hashed( self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone(), @@ -210,45 +270,35 @@ where .unwrap_or_default(), #[cfg(feature = "metrics")] self.metrics.storage_trie.clone(), - ); + ) + .with_threshold(remaining_threshold); - let storage_root = if retain_updates { - let (root, storage_slots_walked, updates) = - storage_root_calculator.root_with_updates()?; - hashed_entries_walked += storage_slots_walked; - // We only walk over hashed address once, so it's safe to insert. - updated_storage_nodes += updates.len(); - trie_updates.insert_storage_updates(hashed_address, updates); - root - } else { - storage_root_calculator.root()? - }; - - account_rlp.clear(); - let account = account.into_trie_account(storage_root); - account.encode(&mut account_rlp as &mut dyn BufMut); - hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - - // Decide if we need to return intermediate progress. - let total_updates_len = updated_storage_nodes + - account_node_iter.walker.removed_keys_len() + - hash_builder.updates_len(); - if retain_updates && total_updates_len as u64 >= self.threshold { - let (walker_stack, walker_deleted_keys) = account_node_iter.walker.split(); - trie_updates.removed_nodes.extend(walker_deleted_keys); - let (hash_builder, hash_builder_updates) = hash_builder.split(); - trie_updates.account_nodes.extend(hash_builder_updates); - - let state = IntermediateStateRootState { + let storage_result = storage_root_calculator.calculate(retain_updates)?; + if let Some(storage_state) = storage_ctx.process_storage_root_result( + storage_result, + hashed_address, + account, + &mut hash_builder, + retain_updates, + )? { + // storage root hit threshold, need to pause + return Ok(storage_ctx.create_progress_state( + account_node_iter, hash_builder, - walker_stack, - last_account_key: hashed_address, - }; + hashed_address, + Some(storage_state), + )) + } - return Ok(StateRootProgress::Progress( - Box::new(state), - hashed_entries_walked, - trie_updates, + // decide if we need to return intermediate progress + let total_updates_len = + storage_ctx.total_updates_len(&account_node_iter, &hash_builder); + if retain_updates && total_updates_len >= self.threshold { + return Ok(storage_ctx.create_progress_state( + account_node_iter, + hash_builder, + hashed_address, + None, )) } } @@ -258,6 +308,7 @@ where let root = hash_builder.root(); let removed_keys = account_node_iter.walker.take_removed_keys(); + let StateRootContext { mut trie_updates, hashed_entries_walked, .. } = storage_ctx; trie_updates.finalize(hash_builder, removed_keys, self.prefix_sets.destroyed_accounts); let stats = tracker.finish(); @@ -278,6 +329,128 @@ where } } +/// Contains state mutated during state root calculation and storage root result handling. +#[derive(Debug)] +pub(crate) struct StateRootContext { + /// Reusable buffer for encoding account data. + account_rlp: Vec, + /// Accumulates updates from account and storage root calculation. + trie_updates: TrieUpdates, + /// Tracks total hashed entries walked. + hashed_entries_walked: usize, + /// Counts storage trie nodes updated. + updated_storage_nodes: usize, +} + +impl StateRootContext { + /// Creates a new state root context. + fn new() -> Self { + Self { + account_rlp: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), + trie_updates: TrieUpdates::default(), + hashed_entries_walked: 0, + updated_storage_nodes: 0, + } + } + + /// Creates a [`StateRootProgress`] when the threshold is hit, from the state of the current + /// [`TrieNodeIter`], [`HashBuilder`], last hashed key and any storage root intermediate state. + fn create_progress_state( + mut self, + account_node_iter: TrieNodeIter, + hash_builder: HashBuilder, + last_hashed_key: B256, + storage_state: Option, + ) -> StateRootProgress + where + C: TrieCursor, + H: HashedCursor, + { + let (walker_stack, walker_deleted_keys) = account_node_iter.walker.split(); + self.trie_updates.removed_nodes.extend(walker_deleted_keys); + let (hash_builder, hash_builder_updates) = hash_builder.split(); + self.trie_updates.account_nodes.extend(hash_builder_updates); + + let account_state = IntermediateRootState { hash_builder, walker_stack, last_hashed_key }; + + let state = IntermediateStateRootState { + account_root_state: account_state, + storage_root_state: storage_state, + }; + + StateRootProgress::Progress(Box::new(state), self.hashed_entries_walked, self.trie_updates) + } + + /// Calculates the total number of updated nodes. + fn total_updates_len( + &self, + account_node_iter: &TrieNodeIter, + hash_builder: &HashBuilder, + ) -> u64 + where + C: TrieCursor, + H: HashedCursor, + { + (self.updated_storage_nodes + + account_node_iter.walker.removed_keys_len() + + hash_builder.updates_len()) as u64 + } + + /// Processes the result of a storage root calculation. + /// + /// Handles both completed and in-progress storage root calculations: + /// - For completed roots: encodes the account with the storage root, updates the hash builder + /// with the new account, and updates metrics. + /// - For in-progress roots: returns the intermediate state for later resumption + /// + /// Returns an [`IntermediateStorageRootState`] if the calculation needs to be resumed later, or + /// `None` if the storage root was successfully computed and added to the trie. + fn process_storage_root_result( + &mut self, + storage_result: StorageRootProgress, + hashed_address: B256, + account: Account, + hash_builder: &mut HashBuilder, + retain_updates: bool, + ) -> Result, StateRootError> { + match storage_result { + StorageRootProgress::Complete(storage_root, storage_slots_walked, updates) => { + // Storage root completed + self.hashed_entries_walked += storage_slots_walked; + if retain_updates { + self.updated_storage_nodes += updates.len(); + self.trie_updates.insert_storage_updates(hashed_address, updates); + } + + // Encode the account with the computed storage root + self.account_rlp.clear(); + let trie_account = account.into_trie_account(storage_root); + trie_account.encode(&mut self.account_rlp as &mut dyn BufMut); + hash_builder.add_leaf(Nibbles::unpack(hashed_address), &self.account_rlp); + Ok(None) + } + StorageRootProgress::Progress(state, storage_slots_walked, updates) => { + // Storage root hit threshold or resumed calculation hit threshold + debug!( + target: "trie::state_root", + ?hashed_address, + storage_slots_walked, + last_storage_key = ?state.last_hashed_key, + ?account, + "Pausing storage root calculation" + ); + + self.hashed_entries_walked += storage_slots_walked; + if retain_updates { + self.trie_updates.insert_storage_updates(hashed_address, updates); + } + + Ok(Some(IntermediateStorageRootState { state: *state, account })) + } + } + } +} + /// `StorageRoot` is used to compute the root node of an account storage trie. #[derive(Debug)] pub struct StorageRoot { @@ -289,6 +462,10 @@ pub struct StorageRoot { pub hashed_address: B256, /// The set of storage slot prefixes that have changed. pub prefix_set: PrefixSet, + /// Previous intermediate state. + previous_state: Option, + /// The number of updates after which the intermediate progress should be returned. + threshold: u64, /// Storage root metrics. #[cfg(feature = "metrics")] metrics: TrieRootMetrics, @@ -326,6 +503,8 @@ impl StorageRoot { hashed_cursor_factory, hashed_address, prefix_set, + previous_state: None, + threshold: DEFAULT_INTERMEDIATE_THRESHOLD, #[cfg(feature = "metrics")] metrics, } @@ -337,6 +516,24 @@ impl StorageRoot { self } + /// Set the threshold. + pub const fn with_threshold(mut self, threshold: u64) -> Self { + self.threshold = threshold; + self + } + + /// Set the threshold to maximum value so that intermediate progress is not returned. + pub const fn with_no_threshold(mut self) -> Self { + self.threshold = u64::MAX; + self + } + + /// Set the previously recorded intermediate state. + pub fn with_intermediate_state(mut self, state: Option) -> Self { + self.previous_state = state; + self + } + /// Set the hashed cursor factory. pub fn with_hashed_cursor_factory(self, hashed_cursor_factory: HF) -> StorageRoot { StorageRoot { @@ -344,6 +541,8 @@ impl StorageRoot { hashed_cursor_factory, hashed_address: self.hashed_address, prefix_set: self.prefix_set, + previous_state: self.previous_state, + threshold: self.threshold, #[cfg(feature = "metrics")] metrics: self.metrics, } @@ -356,6 +555,8 @@ impl StorageRoot { hashed_cursor_factory: self.hashed_cursor_factory, hashed_address: self.hashed_address, prefix_set: self.prefix_set, + previous_state: self.previous_state, + threshold: self.threshold, #[cfg(feature = "metrics")] metrics: self.metrics, } @@ -367,13 +568,26 @@ where T: TrieCursorFactory, H: HashedCursorFactory, { + /// Walks the intermediate nodes of existing storage trie (if any) and hashed entries. Feeds the + /// nodes into the hash builder. Collects the updates in the process. + /// + /// # Returns + /// + /// The intermediate progress of state root computation. + pub fn root_with_progress(self) -> Result { + self.calculate(true) + } + /// Walks the hashed storage table entries for a given address and calculates the storage root. /// /// # Returns /// /// The storage root and storage trie updates for a given address. pub fn root_with_updates(self) -> Result<(B256, usize, StorageTrieUpdates), StorageRootError> { - self.calculate(true) + match self.with_no_threshold().calculate(true)? { + StorageRootProgress::Complete(root, walked, updates) => Ok((root, walked, updates)), + StorageRootProgress::Progress(..) => unreachable!(), // unreachable threshold + } } /// Walks the hashed storage table entries for a given address and calculates the storage root. @@ -382,8 +596,10 @@ where /// /// The storage root. pub fn root(self) -> Result { - let (root, _, _) = self.calculate(false)?; - Ok(root) + match self.calculate(false)? { + StorageRootProgress::Complete(root, _, _) => Ok(root), + StorageRootProgress::Progress(..) => unreachable!(), // update retenion is disabled + } } /// Walks the hashed storage table entries for a given address and calculates the storage root. @@ -392,10 +608,7 @@ where /// /// The storage root, number of walked entries and trie updates /// for a given address if requested. - pub fn calculate( - self, - retain_updates: bool, - ) -> Result<(B256, usize, StorageTrieUpdates), StorageRootError> { + pub fn calculate(self, retain_updates: bool) -> Result { let span = trace_span!(target: "trie::storage_root", "Storage trie", hashed_address = ?self.hashed_address); let _enter = span.enter(); @@ -406,17 +619,41 @@ where // short circuit on empty storage if hashed_storage_cursor.is_storage_empty()? { - return Ok((EMPTY_ROOT_HASH, 0, StorageTrieUpdates::deleted())) + return Ok(StorageRootProgress::Complete( + EMPTY_ROOT_HASH, + 0, + StorageTrieUpdates::deleted(), + )) } let mut tracker = TrieTracker::default(); + let mut trie_updates = StorageTrieUpdates::default(); + let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(self.hashed_address)?; - let walker = TrieWalker::storage_trie(trie_cursor, self.prefix_set) - .with_deletions_retained(retain_updates); - let mut hash_builder = HashBuilder::default().with_updates(retain_updates); + let (mut hash_builder, mut storage_node_iter) = match self.previous_state { + Some(state) => { + let hash_builder = state.hash_builder.with_updates(retain_updates); + let walker = TrieWalker::storage_trie_from_stack( + trie_cursor, + state.walker_stack, + self.prefix_set, + ) + .with_deletions_retained(retain_updates); + let node_iter = TrieNodeIter::storage_trie(walker, hashed_storage_cursor) + .with_last_hashed_key(state.last_hashed_key); + (hash_builder, node_iter) + } + None => { + let hash_builder = HashBuilder::default().with_updates(retain_updates); + let walker = TrieWalker::storage_trie(trie_cursor, self.prefix_set) + .with_deletions_retained(retain_updates); + let node_iter = TrieNodeIter::storage_trie(walker, hashed_storage_cursor); + (hash_builder, node_iter) + } + }; - let mut storage_node_iter = TrieNodeIter::storage_trie(walker, hashed_storage_cursor); + let mut hashed_entries_walked = 0; while let Some(node) = storage_node_iter.try_next()? { match node { TrieElement::Branch(node) => { @@ -425,17 +662,39 @@ where } TrieElement::Leaf(hashed_slot, value) => { tracker.inc_leaf(); + hashed_entries_walked += 1; hash_builder.add_leaf( Nibbles::unpack(hashed_slot), alloy_rlp::encode_fixed_size(&value).as_ref(), ); + + // Check if we need to return intermediate progress + let total_updates_len = + storage_node_iter.walker.removed_keys_len() + hash_builder.updates_len(); + if retain_updates && total_updates_len as u64 >= self.threshold { + let (walker_stack, walker_deleted_keys) = storage_node_iter.walker.split(); + trie_updates.removed_nodes.extend(walker_deleted_keys); + let (hash_builder, hash_builder_updates) = hash_builder.split(); + trie_updates.storage_nodes.extend(hash_builder_updates); + + let state = IntermediateRootState { + hash_builder, + walker_stack, + last_hashed_key: hashed_slot, + }; + + return Ok(StorageRootProgress::Progress( + Box::new(state), + hashed_entries_walked, + trie_updates, + )) + } } } } let root = hash_builder.root(); - let mut trie_updates = StorageTrieUpdates::default(); let removed_keys = storage_node_iter.walker.take_removed_keys(); trie_updates.finalize(hash_builder, removed_keys); @@ -455,7 +714,7 @@ where ); let storage_slots_walked = stats.leaves_added() as usize; - Ok((root, storage_slots_walked, trie_updates)) + Ok(StorageRootProgress::Complete(root, storage_slots_walked, trie_updates)) } } diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 0d2a4355c84..cae42444a7a 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -37,13 +37,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -52,13 +52,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -90,13 +90,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index 86384a169a1..b5067952f89 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -22,13 +22,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -37,13 +37,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -75,13 +75,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index e0079bf2616..7c98b981f2e 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -79,17 +79,20 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -98,13 +101,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -136,13 +139,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index c914aaed98b..65bd2246ded 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -39,13 +39,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -54,13 +54,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -92,13 +92,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 87dae39c51e..809d464b517 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -31,13 +31,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -46,13 +46,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -84,13 +84,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index 20bfd3d5b1c..ddf915c18da 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -30,13 +30,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -45,13 +45,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -83,13 +83,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 739557bd071..8092fc3442b 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -34,13 +34,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -49,13 +49,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -87,13 +87,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index cf726a03b75..bcf7c641e68 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -43,6 +43,9 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + --table The table name to diff. If not specified, all tables are diffed. @@ -63,13 +66,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -78,13 +81,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -116,13 +119,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index 2988f658d20..db52366c4fb 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -29,13 +29,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -44,13 +44,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -82,13 +82,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index d1b9251ca06..7437801a902 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -31,13 +31,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -46,13 +46,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -84,13 +84,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 0ac2e31e208..6ba85a2d861 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -39,13 +39,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -54,13 +54,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -92,13 +92,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index d274c5b4760..5e1dbedcb02 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -40,13 +40,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -55,13 +55,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -93,13 +93,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index b9b667323b7..b5bbfc3ec78 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -72,13 +72,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -87,13 +87,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -125,13 +125,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index 2929c47ed74..dd1f384c5ec 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -26,13 +26,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -41,13 +41,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -79,13 +79,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 2bc28cb490d..0aa7637aa66 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -39,13 +39,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -54,13 +54,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -92,13 +92,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index a59992da6f3..98be9145128 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -26,13 +26,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -41,13 +41,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -79,13 +79,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index e170a321a4f..b185275ffaa 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -67,6 +67,9 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + -u, --url Specify a snapshot URL or let the command propose a default one. @@ -81,13 +84,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -96,13 +99,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -134,13 +137,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 30e7ea76afa..de1a401b051 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -25,13 +25,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -40,13 +40,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -78,13 +78,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 165970638ba..9498fec19e0 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -67,6 +67,9 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + --first-block-number Optional first block number to export from the db. It is by default 0. @@ -87,13 +90,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -102,13 +105,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -140,13 +143,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 9dc2cb7ad46..4566fcb7af0 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -67,6 +67,9 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + --path The path to a directory for import. @@ -82,13 +85,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -97,13 +100,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -135,13 +138,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 958fedf38d3..b9ce3c54430 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -67,6 +67,9 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + --no-state Disables stages that require state. @@ -83,13 +86,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -98,13 +101,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -136,13 +139,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 8154798828e..9c2e072680c 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -67,6 +67,9 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + --without-evm Specifies whether to initialize the state without relying on EVM historical data. @@ -106,13 +109,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -121,13 +124,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -159,13 +162,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index 80e1558ffa0..33630fa5529 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -67,17 +67,20 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -86,13 +89,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -124,13 +127,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 2f4c06ba055..caad09bd5fb 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -233,6 +233,11 @@ Networking: [default: All] + --disable-tx-gossip + Disable transaction pool gossip + + Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag. + RPC: --http Enable the HTTP-RPC server @@ -671,6 +676,9 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + Dev testnet: --dev Start the node in dev mode @@ -855,13 +863,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -870,13 +878,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -908,13 +916,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 151c386ef48..efd9851d1aa 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -23,13 +23,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -38,13 +38,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -76,13 +76,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index 223dec04d25..c576c58c157 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -191,6 +191,11 @@ Networking: [default: All] + --disable-tx-gossip + Disable transaction pool gossip + + Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag. + Datadir: --datadir The path to the data dir for all reth files and subdirectories. @@ -225,13 +230,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -240,13 +245,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -278,13 +283,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index a7edd5b9a53..5875d6f317a 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -39,13 +39,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -54,13 +54,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -92,13 +92,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index 1fbaa1b1989..ebdc3fcacaa 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -191,6 +191,11 @@ Networking: [default: All] + --disable-tx-gossip + Disable transaction pool gossip + + Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag. + Datadir: --datadir The path to the data dir for all reth files and subdirectories. @@ -225,13 +230,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -240,13 +245,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -278,13 +283,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index 484a8005cbd..e97fec44773 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -20,13 +20,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -35,13 +35,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -73,13 +73,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index 5bedf145f3a..716e9038592 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -20,13 +20,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -35,13 +35,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -73,13 +73,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 5b604fa6ce7..ec902167295 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -67,17 +67,20 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -86,13 +89,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -124,13 +127,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index 22883e9d610..42bb54c0192 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -67,6 +67,9 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + --from The height to start at @@ -84,13 +87,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -99,13 +102,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -137,13 +140,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/recover.mdx b/docs/vocs/docs/pages/cli/reth/recover.mdx index af5b685ab7c..ddf9bf77d88 100644 --- a/docs/vocs/docs/pages/cli/reth/recover.mdx +++ b/docs/vocs/docs/pages/cli/reth/recover.mdx @@ -20,13 +20,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -35,13 +35,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -73,13 +73,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/recover/storage-tries.mdx b/docs/vocs/docs/pages/cli/reth/recover/storage-tries.mdx index aafce289076..c4afa9d6e37 100644 --- a/docs/vocs/docs/pages/cli/reth/recover/storage-tries.mdx +++ b/docs/vocs/docs/pages/cli/reth/recover/storage-tries.mdx @@ -67,17 +67,20 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -86,13 +89,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -124,13 +127,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index f2fa612b097..b35470ba9a7 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -23,13 +23,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -38,13 +38,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -76,13 +76,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index 5d3312b3ea0..e68b1161262 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -67,6 +67,9 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + Possible values: - headers: The headers stage within the pipeline @@ -85,13 +88,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -100,13 +103,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -138,13 +141,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 8af42029fa0..30116a24b0a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -74,17 +74,20 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -93,13 +96,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -131,13 +134,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index 6b5b97250ec..f35089b8201 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -38,13 +38,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -53,13 +53,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -91,13 +91,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index 8842d393671..7ed155b06dd 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -38,13 +38,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -53,13 +53,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -91,13 +91,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index 1e781ec4f96..0cf46118919 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -38,13 +38,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -53,13 +53,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -91,13 +91,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index 7bfb08b94f3..4324b8d49d5 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -38,13 +38,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -53,13 +53,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -91,13 +91,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 5a7a9ad10cf..80c0f5afa15 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -67,6 +67,9 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + --metrics Enable Prometheus metrics. @@ -284,17 +287,22 @@ Networking: [default: All] + --disable-tx-gossip + Disable transaction pool gossip + + Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag. + Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -303,13 +311,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -341,13 +349,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index d0671040dc4..d9a53bdb3ee 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -72,6 +72,9 @@ Database: --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --db.max-readers + Maximum number of readers allowed to access the database concurrently + --offline If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound @@ -79,13 +82,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -94,13 +97,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -132,13 +135,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index 04d6cfb0114..d1407b887e4 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -30,13 +30,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -45,13 +45,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -83,13 +83,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index 591a47258df..596cf06c115 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -30,13 +30,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -45,13 +45,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -83,13 +83,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - auto: Colors on - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index af794a3b1c5..3c2be96dda8 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -2,7 +2,7 @@ "__inputs": [ { "name": "DS_PROMETHEUS", - "label": "Prometheus", + "label": "prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", @@ -46,7 +46,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "11.5.1" + "version": "12.1.0-pre" }, { "type": "panel", @@ -143,7 +143,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] } @@ -164,7 +164,9 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -175,7 +177,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -211,7 +213,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] } @@ -232,7 +234,9 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -243,7 +247,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -279,7 +283,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] } @@ -300,7 +304,9 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -311,7 +317,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -347,7 +353,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] } @@ -368,7 +374,9 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -379,7 +387,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -415,7 +423,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] } @@ -436,7 +444,9 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -447,7 +457,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -483,7 +493,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] } @@ -504,7 +514,9 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -515,7 +527,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -553,7 +565,7 @@ "steps": [ { "color": "dark-red", - "value": null + "value": 0 }, { "color": "semi-dark-orange", @@ -584,7 +596,9 @@ "minVizWidth": 75, "orientation": "auto", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -592,7 +606,7 @@ "showThresholdMarkers": true, "sizing": "auto" }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -630,7 +644,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] } @@ -658,7 +672,9 @@ "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -666,7 +682,7 @@ "sizing": "auto", "valueMode": "color" }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -736,7 +752,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] }, @@ -758,7 +774,9 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -766,7 +784,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -874,7 +892,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] }, @@ -902,7 +920,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -968,7 +986,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -999,7 +1017,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -1066,7 +1084,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -1119,7 +1137,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -1414,7 +1432,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -1467,7 +1485,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -1842,7 +1860,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] }, @@ -1870,7 +1888,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -1973,7 +1991,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] }, @@ -2001,7 +2019,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -2104,7 +2122,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] }, @@ -2132,7 +2150,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -2294,7 +2312,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -2326,7 +2344,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -2429,7 +2447,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -2460,7 +2478,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -2539,7 +2557,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -2571,7 +2589,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -2649,7 +2667,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -2681,7 +2699,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -2748,7 +2766,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -2780,7 +2798,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -2995,7 +3013,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] }, @@ -3023,7 +3041,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -3101,7 +3119,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -3133,7 +3151,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -3269,7 +3287,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -3300,7 +3318,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -3367,7 +3385,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -3398,7 +3416,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -3478,7 +3496,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -3510,7 +3528,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -3598,7 +3616,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -3630,7 +3648,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -3737,7 +3755,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -3769,7 +3787,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -3809,6 +3827,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -3841,7 +3860,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -3851,7 +3870,32 @@ }, "unit": "percentunit" }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "Precompile cache hits" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, "gridPos": { "h": 8, @@ -3873,7 +3917,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -3958,7 +4002,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -3989,7 +4033,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -4056,7 +4100,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -4088,7 +4132,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -4155,7 +4199,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -4187,7 +4231,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -4254,7 +4298,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -4286,7 +4330,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -4353,7 +4397,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -4385,7 +4429,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -4452,7 +4496,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -4484,7 +4528,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -4551,7 +4595,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -4583,7 +4627,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -4651,7 +4695,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -4683,7 +4727,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -4751,7 +4795,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -4766,7 +4810,7 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, + "x": 0, "y": 128 }, "id": 263, @@ -4783,7 +4827,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -4802,6 +4846,135 @@ "title": "Proof fetching total duration", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Histogram for state root latency, the duration between finishing execution and receiving the state root", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "State Root Duration p0.95" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 128 + }, + "id": 1006, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.0-pre", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_sync_block_validation_state_root_histogram{\"$instance_label\"=\"$instance\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "State Root Duration p{{quantile}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "State root latency", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { @@ -4867,7 +5040,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] }, @@ -4895,7 +5068,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -4985,7 +5158,7 @@ "unit": "percentunit" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -5056,7 +5229,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] }, @@ -5084,7 +5257,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -5154,7 +5327,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 } ] }, @@ -5182,7 +5355,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -5255,7 +5428,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -5279,7 +5452,10 @@ { "id": "custom.lineStyle", "value": { - "dash": [0, 10], + "dash": [ + 0, + 10 + ], "fill": "dot" } }, @@ -5311,7 +5487,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -5412,7 +5588,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -5436,7 +5612,10 @@ { "id": "custom.lineStyle", "value": { - "dash": [0, 10], + "dash": [ + 0, + 10 + ], "fill": "dot" } }, @@ -5468,7 +5647,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -5551,16 +5730,22 @@ }, "id": 48, "options": { - "displayLabels": ["name"], + "displayLabels": [ + "name" + ], "legend": { "displayMode": "table", "placement": "right", "showLegend": true, - "values": ["value"] + "values": [ + "value" + ] }, "pieType": "pie", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -5570,7 +5755,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -5638,7 +5823,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -5670,7 +5855,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -5725,11 +5910,15 @@ "displayMode": "table", "placement": "right", "showLegend": true, - "values": ["value"] + "values": [ + "value" + ] }, "pieType": "pie", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -5739,7 +5928,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -5807,7 +5996,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -5839,7 +6028,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -5906,7 +6095,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -5938,7 +6127,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -5978,7 +6167,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -6090,12 +6279,14 @@ "footer": { "countRows": false, "fields": "", - "reducer": ["sum"], + "reducer": [ + "sum" + ], "show": false }, "showHeader": true }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -6159,16 +6350,22 @@ }, "id": 202, "options": { - "displayLabels": ["name"], + "displayLabels": [ + "name" + ], "legend": { "displayMode": "table", "placement": "right", "showLegend": true, - "values": ["value"] + "values": [ + "value" + ] }, "pieType": "pie", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -6178,7 +6375,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -6219,7 +6416,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -6319,12 +6516,14 @@ "footer": { "countRows": false, "fields": "", - "reducer": ["sum"], + "reducer": [ + "sum" + ], "show": false }, "showHeader": true }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -6367,7 +6566,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -6467,12 +6666,14 @@ "footer": { "countRows": false, "fields": "", - "reducer": ["sum"], + "reducer": [ + "sum" + ], "show": false }, "showHeader": true }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -6542,7 +6743,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -6574,7 +6775,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -6641,7 +6842,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -6673,7 +6874,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -6753,8 +6954,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6852,8 +7052,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6950,8 +7149,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7049,8 +7247,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7163,7 +7360,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -7220,7 +7417,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -7312,7 +7509,7 @@ "unit": "percentunit" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -7381,7 +7578,7 @@ "steps": [ { "color": "green", - "value": null + "value": 0 }, { "color": "red", @@ -7413,7 +7610,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -7501,7 +7698,7 @@ "unit": "percentunit" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -7569,7 +7766,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7637,7 +7835,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -7826,7 +8024,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7858,7 +8057,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -8311,6 +8510,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -8341,7 +8541,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8401,11 +8602,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -8512,6 +8714,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -8543,7 +8746,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 } ] }, @@ -8566,11 +8770,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -8629,6 +8834,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -8659,7 +8865,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8685,11 +8892,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -8736,6 +8944,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -8766,7 +8975,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8810,11 +9020,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -8862,6 +9073,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -8892,7 +9104,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8936,11 +9149,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -9014,6 +9228,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9044,7 +9259,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -9096,12 +9312,13 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "maxHeight": 600, "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -9141,6 +9358,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9171,7 +9389,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -9223,12 +9442,13 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "maxHeight": 600, "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -9268,6 +9488,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9298,7 +9519,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -9350,12 +9572,13 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "maxHeight": 600, "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -9395,6 +9618,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9425,7 +9649,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -9477,12 +9702,13 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "maxHeight": 600, "mode": "multi", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -9536,6 +9762,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9566,7 +9793,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -9592,11 +9820,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -9631,6 +9860,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9661,7 +9891,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -9687,11 +9918,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -9726,6 +9958,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9756,7 +9989,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -9782,11 +10016,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -9833,6 +10068,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9864,7 +10100,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -9891,11 +10128,12 @@ "showLegend": false }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -9930,6 +10168,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -9961,7 +10200,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -9988,11 +10228,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -10027,6 +10268,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10057,7 +10299,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -10084,11 +10327,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -10136,6 +10380,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10166,7 +10411,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -10206,11 +10452,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -10311,6 +10558,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10341,7 +10589,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -10368,11 +10617,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -10408,6 +10658,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10438,7 +10689,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -10465,11 +10717,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -10505,6 +10758,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10535,7 +10789,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -10562,11 +10817,12 @@ "showLegend": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "11.4.0", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -10602,6 +10858,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10632,7 +10889,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "semi-dark-red", @@ -10664,7 +10922,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -10701,6 +10959,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -10731,7 +10990,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "semi-dark-red", @@ -10776,7 +11036,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.3", + "pluginVersion": "12.1.0-pre", "targets": [ { "datasource": { @@ -11261,7 +11521,9 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": ["lastNotNull"], + "calcs": [ + "lastNotNull" + ], "fields": "", "values": false }, @@ -11628,7 +11890,7 @@ } ], "refresh": "5s", - "schemaVersion": 40, + "schemaVersion": 41, "tags": [], "templating": { "list": [ @@ -11694,6 +11956,6 @@ "timezone": "", "title": "Reth", "uid": "2k8BXz24x", - "version": 10, + "version": 2, "weekStart": "" -} +} \ No newline at end of file diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index c106651df38..3aeaaa71769 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -18,7 +18,8 @@ use reth_ethereum::{ evm::{ primitives::{ execute::{BlockExecutionError, BlockExecutor, InternalBlockExecutionError}, - Database, Evm, EvmEnv, InspectorFor, NextBlockEnvAttributes, OnStateHook, + Database, Evm, EvmEnv, EvmEnvFor, ExecutionCtxFor, InspectorFor, + NextBlockEnvAttributes, OnStateHook, }, revm::{ context::{result::ExecutionResult, TxEnv}, @@ -29,13 +30,14 @@ use reth_ethereum::{ EthBlockAssembler, EthEvmConfig, RethReceiptBuilder, }, node::{ - api::{ConfigureEvm, FullNodeTypes, NodeTypes}, + api::{ConfigureEngineEvm, ConfigureEvm, ExecutableTxIterator, FullNodeTypes, NodeTypes}, builder::{components::ExecutorBuilder, BuilderContext}, node::EthereumAddOns, EthereumNode, }, primitives::{Header, SealedBlock, SealedHeader}, provider::BlockExecutionResult, + rpc::types::engine::ExecutionData, Block, EthPrimitives, Receipt, TransactionSigned, }; use std::{fmt::Display, sync::Arc}; @@ -157,6 +159,20 @@ impl ConfigureEvm for CustomEvmConfig { } } +impl ConfigureEngineEvm for CustomEvmConfig { + fn evm_env_for_payload(&self, payload: &ExecutionData) -> EvmEnvFor { + self.inner.evm_env_for_payload(payload) + } + + fn context_for_payload<'a>(&self, payload: &'a ExecutionData) -> ExecutionCtxFor<'a, Self> { + self.inner.context_for_payload(payload) + } + + fn tx_iterator_for_payload(&self, payload: &ExecutionData) -> impl ExecutableTxIterator { + self.inner.tx_iterator_for_payload(payload) + } +} + pub struct CustomBlockExecutor<'a, Evm> { /// Inner Ethereum execution strategy. inner: EthBlockExecutor<'a, Evm, &'a Arc, &'a RethReceiptBuilder>, diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 06da2f3263e..059899a76da 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -34,13 +34,13 @@ use reth_ethereum::{ node::{ api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, - validate_version_specific_fields, AddOnsContext, EngineTypes, EngineValidator, + validate_version_specific_fields, AddOnsContext, EngineApiValidator, EngineTypes, FullNodeComponents, FullNodeTypes, InvalidPayloadAttributesError, NewPayloadError, NodeTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadTypes, PayloadValidator, }, builder::{ components::{BasicPayloadServiceBuilder, ComponentsBuilder, PayloadBuilderBuilder}, - rpc::{EngineValidatorBuilder, RpcAddOns}, + rpc::{PayloadValidatorBuilder, RpcAddOns}, BuilderContext, Node, NodeAdapter, NodeBuilder, }, core::{args::RpcServerArgs, node_config::NodeConfig}, @@ -212,7 +212,7 @@ impl PayloadValidator for CustomEngineValidator { } } -impl EngineValidator for CustomEngineValidator { +impl EngineApiValidator for CustomEngineValidator { fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, @@ -250,15 +250,9 @@ impl EngineValidator for CustomEngineValidator { #[non_exhaustive] pub struct CustomEngineValidatorBuilder; -impl EngineValidatorBuilder for CustomEngineValidatorBuilder +impl PayloadValidatorBuilder for CustomEngineValidatorBuilder where - N: FullNodeComponents< - Types: NodeTypes< - Payload = CustomEngineTypes, - ChainSpec = ChainSpec, - Primitives = EthPrimitives, - >, - >, + N: FullNodeComponents, { type Validator = CustomEngineValidator; diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index 54a68d98abe..3cac360fd48 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -19,7 +19,7 @@ reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-engine-primitives.workspace = true reth-rpc-engine-api.workspace = true -reth-ethereum = { workspace = true, features = ["node-api", "network", "evm", "pool", "trie", "storage-api"] } +reth-ethereum = { workspace = true, features = ["node-api", "network", "evm", "pool", "trie", "storage-api", "provider"] } # revm revm.workspace = true diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index 4c8bff3a1fd..87afb85edc9 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -1,11 +1,12 @@ use crate::{ chainspec::CustomChainSpec, + evm::CustomEvmConfig, primitives::{CustomHeader, CustomNodePrimitives, CustomTransaction}, CustomNode, }; use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload}; use reth_chain_state::ExecutedBlockWithTrieUpdates; -use reth_engine_primitives::EngineValidator; +use reth_engine_primitives::EngineApiValidator; use reth_ethereum::{ node::api::{ validate_version_specific_fields, AddOnsContext, BuiltPayload, EngineApiMessageVersion, @@ -17,7 +18,7 @@ use reth_ethereum::{ storage::StateProviderFactory, trie::{KeccakKeyHasher, KeyHasher}, }; -use reth_node_builder::{rpc::EngineValidatorBuilder, InvalidPayloadAttributesError}; +use reth_node_builder::{rpc::PayloadValidatorBuilder, InvalidPayloadAttributesError}; use reth_op::{ node::{ engine::OpEngineValidator, OpBuiltPayload, OpEngineTypes, OpPayloadAttributes, @@ -35,8 +36,8 @@ pub struct CustomPayloadTypes; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct CustomExecutionData { - inner: OpExecutionData, - extension: u64, + pub inner: OpExecutionData, + pub extension: u64, } impl ExecutionPayload for CustomExecutionData { @@ -250,7 +251,7 @@ where } } -impl

EngineValidator for CustomEngineValidator

+impl

EngineApiValidator for CustomEngineValidator

where P: StateProviderFactory + Send + Sync + Unpin + 'static, { @@ -296,9 +297,9 @@ pub enum CustomError { #[non_exhaustive] pub struct CustomEngineValidatorBuilder; -impl EngineValidatorBuilder for CustomEngineValidatorBuilder +impl PayloadValidatorBuilder for CustomEngineValidatorBuilder where - N: FullNodeComponents, + N: FullNodeComponents, { type Validator = CustomEngineValidator; diff --git a/examples/custom-node/src/engine_api.rs b/examples/custom-node/src/engine_api.rs index 7e5d1455f0e..7129d0fd30d 100644 --- a/examples/custom-node/src/engine_api.rs +++ b/examples/custom-node/src/engine_api.rs @@ -125,7 +125,7 @@ where } } -#[derive(Debug, Default)] +#[derive(Debug, Default, Clone)] pub struct CustomEngineApiBuilder {} impl EngineApiBuilder for CustomEngineApiBuilder diff --git a/examples/custom-node/src/evm/config.rs b/examples/custom-node/src/evm/config.rs index a4ac040b103..03542c8bd63 100644 --- a/examples/custom-node/src/evm/config.rs +++ b/examples/custom-node/src/evm/config.rs @@ -1,17 +1,26 @@ use crate::{ chainspec::CustomChainSpec, + engine::CustomExecutionData, evm::{alloy::CustomEvmFactory, CustomBlockAssembler}, - primitives::{Block, CustomHeader, CustomNodePrimitives}, + primitives::{Block, CustomHeader, CustomNodePrimitives, CustomTransaction}, }; use alloy_consensus::BlockHeader; +use alloy_eips::{eip2718::WithEncoded, Decodable2718}; use alloy_evm::EvmEnv; use alloy_op_evm::OpBlockExecutionCtx; +use alloy_rpc_types_engine::PayloadError; use op_revm::OpSpecId; +use reth_engine_primitives::ExecutableTxIterator; use reth_ethereum::{ node::api::ConfigureEvm, primitives::{SealedBlock, SealedHeader}, }; -use reth_op::node::{OpEvmConfig, OpNextBlockEnvAttributes, OpRethReceiptBuilder}; +use reth_node_builder::{ConfigureEngineEvm, NewPayloadError}; +use reth_op::{ + evm::primitives::{EvmEnvFor, ExecutionCtxFor}, + node::{OpEvmConfig, OpNextBlockEnvAttributes, OpRethReceiptBuilder}, + primitives::SignedTransaction, +}; use std::sync::Arc; #[derive(Debug, Clone)] @@ -81,3 +90,29 @@ impl ConfigureEvm for CustomEvmConfig { } } } + +impl ConfigureEngineEvm for CustomEvmConfig { + fn evm_env_for_payload(&self, payload: &CustomExecutionData) -> EvmEnvFor { + self.inner.evm_env_for_payload(&payload.inner) + } + + fn context_for_payload<'a>( + &self, + payload: &'a CustomExecutionData, + ) -> ExecutionCtxFor<'a, Self> { + self.inner.context_for_payload(&payload.inner) + } + + fn tx_iterator_for_payload( + &self, + payload: &CustomExecutionData, + ) -> impl ExecutableTxIterator { + payload.inner.payload.transactions().clone().into_iter().map(|encoded| { + let tx = CustomTransaction::decode_2718_exact(encoded.as_ref()) + .map_err(Into::into) + .map_err(PayloadError::Decode)?; + let signer = tx.try_recover().map_err(NewPayloadError::other)?; + Ok::<_, NewPayloadError>(WithEncoded::new(encoded, tx.with_signer(signer))) + }) + } +} diff --git a/examples/engine-api-access/src/main.rs b/examples/engine-api-access/src/main.rs index 492074a7b8e..5f43d94bf6e 100644 --- a/examples/engine-api-access/src/main.rs +++ b/examples/engine-api-access/src/main.rs @@ -10,9 +10,7 @@ use reth_db::test_utils::create_test_rw_db; use reth_node_builder::{EngineApiExt, FullNodeComponents, NodeBuilder, NodeConfig}; use reth_optimism_chainspec::BASE_MAINNET; use reth_optimism_node::{ - args::RollupArgs, - node::{OpAddOns, OpEngineValidatorBuilder}, - OpEngineApiBuilder, OpNode, + args::RollupArgs, node::OpEngineValidatorBuilder, OpAddOns, OpEngineApiBuilder, OpNode, }; use tokio::sync::oneshot; diff --git a/examples/full-contract-state/Cargo.toml b/examples/full-contract-state/Cargo.toml new file mode 100644 index 00000000000..f4f61244a29 --- /dev/null +++ b/examples/full-contract-state/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "example-full-contract-state" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[dependencies] +reth-ethereum = { workspace = true, features = ["node"] } +eyre.workspace = true + +[lints] +workspace = true diff --git a/examples/full-contract-state/README.md b/examples/full-contract-state/README.md new file mode 100644 index 00000000000..c0cd4a0def4 --- /dev/null +++ b/examples/full-contract-state/README.md @@ -0,0 +1,69 @@ +# Full Contract State Example + +This example demonstrates how to extract the complete state of a specific contract from the reth database. + +## What it does + +The example shows how to: + +1. **Connect to a reth database** - Uses the recommended builder pattern to create a read-only provider +2. **Get basic account information** - Retrieves balance, nonce, and code hash for the contract address +3. **Get contract bytecode** - Fetches the actual contract bytecode if the account is a contract +4. **Iterate through all storage slots** - Uses database cursors to efficiently retrieve all storage key-value pairs + +## Prerequisites + +- A reth database with some data (you can sync a node or use a pre-synced database) +- Set the `RETH_DATADIR` environment variable to point to your reth data directory +- Set the `CONTRACT_ADDRESS` environment variable to provide target contract address + +## Usage + +```bash +# Set your reth data directory +export RETH_DATADIR="/path/to/your/reth/datadir" +# Set target contract address +export CONTRACT_ADDRESS="0x0..." + +# Run the example +cargo run --example full-contract-state +``` + +## Code Structure + +The example consists of: + +- **`ContractState` struct** - Holds all contract state information +- **`extract_contract_state` function** - Main function that extracts contract state +- **`main` function** - Sets up the provider and demonstrates usage + +## Key Concepts + +### Provider Pattern +The example uses reth's provider pattern: +- `ProviderFactory` - Creates database connections +- `DatabaseProvider` - Provides low-level database access +- `StateProvider` - Provides high-level state access + +### Database Cursors +For efficient storage iteration, the example uses database cursors: +- `cursor_dup_read` - Creates a cursor for duplicate key tables +- `seek_exact` - Positions cursor at specific key +- `next_dup` - Iterates through duplicate entries + +## Output + +The example will print: +- Contract address +- Account balance +- Account nonce +- Code hash +- Number of storage slots +- All storage key-value pairs + +## Error Handling + +The example includes proper error handling: +- Returns `None` if the contract doesn't exist +- Uses `ProviderResult` for database operation errors +- Gracefully handles missing bytecode or storage diff --git a/examples/full-contract-state/src/main.rs b/examples/full-contract-state/src/main.rs new file mode 100644 index 00000000000..0a0cdf81adb --- /dev/null +++ b/examples/full-contract-state/src/main.rs @@ -0,0 +1,94 @@ +//! Example demonstrating how to extract the full state of a specific contract from the reth +//! database. +//! +//! This example shows how to: +//! 1. Connect to a reth database +//! 2. Get basic account information (balance, nonce, code hash) +//! 3. Get contract bytecode +//! 4. Iterate through all storage slots for the contract + +use reth_ethereum::{ + chainspec::ChainSpecBuilder, + evm::revm::primitives::{Address, B256, U256}, + node::EthereumNode, + primitives::{Account, Bytecode}, + provider::{ + db::{ + cursor::{DbCursorRO, DbDupCursorRO}, + tables, + transaction::DbTx, + }, + providers::ReadOnlyConfig, + ProviderResult, + }, + storage::{DBProvider, StateProvider}, +}; +use std::{collections::HashMap, str::FromStr}; + +/// Represents the complete state of a contract including account info, bytecode, and storage +#[derive(Debug, Clone)] +pub struct ContractState { + /// The address of the contract + pub address: Address, + /// Basic account information (balance, nonce, code hash) + pub account: Account, + /// Contract bytecode (None if not a contract or doesn't exist) + pub bytecode: Option, + /// All storage slots for the contract + pub storage: HashMap, +} + +/// Extract the full state of a specific contract +pub fn extract_contract_state( + provider: &P, + state_provider: &dyn StateProvider, + contract_address: Address, +) -> ProviderResult> { + let account = state_provider.basic_account(&contract_address)?; + let Some(account) = account else { + return Ok(None); + }; + + let bytecode = state_provider.account_code(&contract_address)?; + + let mut storage_cursor = provider.tx_ref().cursor_dup_read::()?; + let mut storage = HashMap::new(); + + if let Some((_, first_entry)) = storage_cursor.seek_exact(contract_address)? { + storage.insert(first_entry.key, first_entry.value); + + while let Some((_, entry)) = storage_cursor.next_dup()? { + storage.insert(entry.key, entry.value); + } + } + + Ok(Some(ContractState { address: contract_address, account, bytecode, storage })) +} + +fn main() -> eyre::Result<()> { + let address = std::env::var("CONTRACT_ADDRESS")?; + let contract_address = Address::from_str(&address)?; + + let datadir = std::env::var("RETH_DATADIR")?; + let spec = ChainSpecBuilder::mainnet().build(); + let factory = EthereumNode::provider_factory_builder() + .open_read_only(spec.into(), ReadOnlyConfig::from_datadir(datadir))?; + + let provider = factory.provider()?; + let state_provider = factory.latest()?; + let contract_state = + extract_contract_state(&provider, state_provider.as_ref(), contract_address)?; + + if let Some(state) = contract_state { + println!("Contract: {}", state.address); + println!("Balance: {}", state.account.balance); + println!("Nonce: {}", state.account.nonce); + println!("Code hash: {:?}", state.account.bytecode_hash); + println!("Storage slots: {}", state.storage.len()); + for (key, value) in &state.storage { + println!("\t{key}: {value}"); + } + } + + Ok(()) +} diff --git a/examples/op-db-access/Cargo.toml b/examples/op-db-access/Cargo.toml new file mode 100644 index 00000000000..ae06e600b9c --- /dev/null +++ b/examples/op-db-access/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "example-op-db-access" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth-op = { workspace = true, features = ["node"] } + +eyre.workspace = true diff --git a/examples/op-db-access/src/main.rs b/examples/op-db-access/src/main.rs new file mode 100644 index 00000000000..7a44a62174e --- /dev/null +++ b/examples/op-db-access/src/main.rs @@ -0,0 +1,23 @@ +//! Shows how manually access the database + +use reth_op::{chainspec::BASE_MAINNET, node::OpNode, provider::providers::ReadOnlyConfig}; + +// Providers are zero cost abstractions on top of an opened MDBX Transaction +// exposing a familiar API to query the chain's information without requiring knowledge +// of the inner tables. +// +// These abstractions do not include any caching and the user is responsible for doing that. +// Other parts of the code which include caching are parts of the `EthApi` abstraction. +fn main() -> eyre::Result<()> { + // The path to data directory, e.g. "~/.local/reth/share/base" + let datadir = std::env::var("RETH_DATADIR")?; + + // Instantiate a provider factory for Ethereum mainnet using the provided datadir path. + let factory = OpNode::provider_factory_builder() + .open_read_only(BASE_MAINNET.clone(), ReadOnlyConfig::from_datadir(datadir))?; + + // obtain a provider access that has direct access to the database. + let _provider = factory.provider(); + + Ok(()) +} diff --git a/flake.lock b/flake.lock new file mode 100644 index 00000000000..eaae0987d1d --- /dev/null +++ b/flake.lock @@ -0,0 +1,116 @@ +{ + "nodes": { + "crane": { + "locked": { + "lastModified": 1754269165, + "narHash": "sha256-0tcS8FHd4QjbCVoxN9jI+PjHgA4vc/IjkUSp+N3zy0U=", + "owner": "ipetkov", + "repo": "crane", + "rev": "444e81206df3f7d92780680e45858e31d2f07a08", + "type": "github" + }, + "original": { + "owner": "ipetkov", + "repo": "crane", + "type": "github" + } + }, + "fenix": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ], + "rust-analyzer-src": "rust-analyzer-src" + }, + "locked": { + "lastModified": 1754549159, + "narHash": "sha256-47e1Ar09kZlv2HvZilaNRFzRybIiJYNQ2MSvofbiw5o=", + "owner": "nix-community", + "repo": "fenix", + "rev": "5fe110751342a023d8c7ddce7fbf8311dca9f58d", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "fenix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1731603435, + "narHash": "sha256-CqCX4JG7UiHvkrBTpYC3wcEurvbtTADLbo3Ns2CEoL8=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "8b27c1239e5c421a2bbc2c65d52e4a6fbf2ff296", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "24.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "crane": "crane", + "fenix": "fenix", + "nixpkgs": "nixpkgs", + "utils": "utils" + } + }, + "rust-analyzer-src": { + "flake": false, + "locked": { + "lastModified": 1754496778, + "narHash": "sha256-fPDLP3z9XaYQBfSCemEdloEONz/uPyr35RHPRy9Vx8M=", + "owner": "rust-lang", + "repo": "rust-analyzer", + "rev": "529d3b935d68bdf9120fe4d7f8eded7b56271697", + "type": "github" + }, + "original": { + "owner": "rust-lang", + "ref": "nightly", + "repo": "rust-analyzer", + "type": "github" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 00000000000..f29f5b53ee9 --- /dev/null +++ b/flake.nix @@ -0,0 +1,127 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/24.11"; + utils.url = "github:numtide/flake-utils"; + crane.url = "github:ipetkov/crane"; + + fenix = { + url = "github:nix-community/fenix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + }; + + outputs = + { + nixpkgs, + utils, + crane, + fenix, + ... + }: + utils.lib.eachDefaultSystem ( + system: + let + pkgs = import nixpkgs { inherit system; }; + + # A useful helper for folding a list of `prevSet -> newSet` functions + # into an attribute set. + composeAttrOverrides = defaultAttrs: overrides: builtins.foldl' + (acc: f: acc // (f acc)) defaultAttrs overrides; + + cargoTarget = pkgs.stdenv.hostPlatform.rust.rustcTargetSpec; + cargoTargetEnvVar = builtins.replaceStrings ["-"] ["_"] + (pkgs.lib.toUpper cargoTarget); + + cargoTOML = builtins.fromTOML (builtins.readFile ./Cargo.toml); + packageVersion = cargoTOML.workspace.package.version; + rustVersion = cargoTOML.workspace.package."rust-version"; + + rustStable = fenix.packages.${system}.stable.withComponents [ + "cargo" "rustc" "rust-src" + ]; + + rustNightly = fenix.packages.${system}.latest; + + craneLib = (crane.mkLib pkgs).overrideToolchain rustStable; + + nativeBuildInputs = [ + pkgs.pkg-config + pkgs.libgit2 + pkgs.perl + ]; + + withClang = prev: { + buildInputs = prev.buildInputs or [] ++ [ + pkgs.clang + ]; + LIBCLANG_PATH = "${pkgs.libclang.lib}/lib"; + }; + + withMaxPerf = prev: { + cargoBuildCommand = "cargo build --profile=maxperf"; + cargoExtraArgs = prev.cargoExtraArgs or "" + " --features=jemalloc,asm-keccak"; + RUSTFLAGS = prev.RUSTFLAGS or [] ++ [ + "-Ctarget-cpu=native" + ]; + }; + + withMold = prev: { + buildInputs = prev.buildInputs or [] ++ [ + pkgs.mold + ]; + "CARGO_TARGET_${cargoTargetEnvVar}_LINKER" = "${pkgs.llvmPackages.clangUseLLVM}/bin/clang"; + RUSTFLAGS = prev.RUSTFLAGS or [] ++ [ + "-Clink-arg=-fuse-ld=${pkgs.mold}/bin/mold" + ]; + }; + + withOp = prev: { + cargoExtraArgs = prev.cargoExtraArgs or "" + " -p op-reth --bin=op-reth"; + }; + + mkReth = overrides: craneLib.buildPackage (composeAttrOverrides { + pname = "reth"; + version = packageVersion; + src = ./.; + inherit nativeBuildInputs; + doCheck = false; + } overrides); + + in + { + packages = rec { + + reth = mkReth ([ + withClang + withMaxPerf + ] ++ pkgs.lib.optionals pkgs.stdenv.isLinux [ + withMold + ]); + + op-reth = mkReth ([ + withClang + withMaxPerf + withOp + ] ++ pkgs.lib.optionals pkgs.stdenv.isLinux [ + withMold + ]); + + default = reth; + }; + + devShell = let + overrides = [ + withClang + ] ++ pkgs.lib.optionals pkgs.stdenv.isLinux [ + withMold + ]; + in craneLib.devShell (composeAttrOverrides { + packages = nativeBuildInputs ++ [ + rustNightly.rust-analyzer + rustNightly.clippy + rustNightly.rustfmt + ]; + } overrides); + } + ); +} diff --git a/fork.yaml b/fork.yaml index 1b5a96c4178..a13287f09df 100644 --- a/fork.yaml +++ b/fork.yaml @@ -4,7 +4,7 @@ footer: | base: name: reth url: https://github.com/paradigmxyz/reth - hash: 9d1af5a09cc7794a767858eb3219a24b7e52fc16 + hash: bcbd2d64cedbde0c8f4f044a74da1bfb9324e2ce fork: name: scroll-reth url: https://github.com/scroll-tech/reth diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index b4cbbf89fce..933ff6613cd 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -396,7 +396,7 @@ pub fn should_skip(path: &Path) -> bool { | "typeTwoBerlin.json" // Test checks if nonce overflows. We are handling this correctly but we are not parsing - // exception in testsuite There are more nonce overflow tests that are in internal + // exception in testsuite There are more nonce overflow tests that are internal // call/create, and those tests are passing and are enabled. | "CreateTransactionHighNonce.json" diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 793448cdba9..b35ae13a819 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -453,6 +453,7 @@ pub fn random_receipt( rng: &mut R, transaction: &TransactionSigned, logs_count: Option, + topics_count: Option, ) -> Receipt { let success = rng.random::(); let logs_count = logs_count.unwrap_or_else(|| rng.random::()); @@ -462,7 +463,7 @@ pub fn random_receipt( success, cumulative_gas_used: rng.random_range(0..=transaction.gas_limit()), logs: if success { - (0..logs_count).map(|_| random_log(rng, None, None)).collect() + (0..logs_count).map(|_| random_log(rng, None, topics_count)).collect() } else { vec![] },