diff --git a/.github/assets/check_rv32imac.sh b/.github/assets/check_rv32imac.sh new file mode 100755 index 000000000000..0556fa31deae --- /dev/null +++ b/.github/assets/check_rv32imac.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +set +e # Disable immediate exit on error + +# Array of crates to check +crates_to_check=( + reth-codecs-derive + reth-ethereum-forks + reth-primitives-traits + reth-optimism-forks + # reth-evm + # reth-primitives + # reth-optimism-chainspec +) + +# Array to hold the results +results=() +# Flag to track if any command fails +any_failed=0 + +for crate in "${crates_to_check[@]}"; do + cmd="cargo +stable build -p $crate --target riscv32imac-unknown-none-elf --no-default-features" + + if [ -n "$CI" ]; then + echo "::group::$cmd" + else + printf "\n%s:\n %s\n" "$crate" "$cmd" + fi + + set +e # Disable immediate exit on error + # Run the command and capture the return code + $cmd + ret_code=$? + set -e # Re-enable immediate exit on error + + # Store the result in the dictionary + if [ $ret_code -eq 0 ]; then + results+=("1:✅:$crate") + else + results+=("2:❌:$crate") + any_failed=1 + fi + + if [ -n "$CI" ]; then + echo "::endgroup::" + fi +done + +# Sort the results by status and then by crate name +IFS=$'\n' sorted_results=($(sort <<<"${results[*]}")) +unset IFS + +# Print summary +echo -e "\nSummary of build results:" +for result in "${sorted_results[@]}"; do + status="${result#*:}" + status="${status%%:*}" + crate="${result##*:}" + echo "$status $crate" +done + +# Exit with a non-zero status if any command fails +exit $any_failed diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 11e5b5e00b9e..971327f0cb21 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -70,6 +70,7 @@ exclude_crates=( reth-transaction-pool # c-kzg reth-trie-parallel # tokio reth-testing-utils + reth-network-peers ) # Array to hold the results diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 7e6b8747fff5..61ba54e95568 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -75,6 +75,22 @@ jobs: - name: Run Wasm checks run: .github/assets/check_wasm.sh + riscv: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + target: riscv32imac-unknown-none-elf + - uses: taiki-e/install-action@cargo-hack + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - uses: dcarbone/install-jq-action@v3 + - name: Run RISC-V checks + run: .github/assets/check_rv32imac.sh + crate-checks: runs-on: ubuntu-latest timeout-minutes: 30 diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 11ef24b5f1be..4c927df8be00 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -35,11 +35,11 @@ jobs: partition: 2 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 1 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 2 total_partitions: 2 - type: book diff --git a/Cargo.lock b/Cargo.lock index 03782f53a4a2..ea4ffec0dbcc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,9 +91,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" @@ -112,14 +112,15 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae09ffd7c29062431dd86061deefe4e3c6f07fa0d674930095f8dcedb0baf02c" +checksum = "73dd0ab7003dfa3efd252e423873cd3bc241d1456147e752f995cc8aabd1d1f6" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", + "alloy-trie", "arbitrary", "auto_impl", "c-kzg", @@ -129,11 +130,25 @@ dependencies = [ "serde_with", ] +[[package]] +name = "alloy-consensus-any" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d08234c0eece0e08602db5095a16dc942cad91967cccfcfc2c6a42c25563964f" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "serde", +] + [[package]] name = "alloy-contract" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66430a72d5bf5edead101c8c2f0a24bada5ec9f3cf9909b3e08b6d6899b4803e" +checksum = "6a01f5593f6878452c6dde102ece391b60cba79801c5f606f8fe898ff57cd5d7" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -146,14 +161,14 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] name = "alloy-dyn-abi" -version = "0.8.13" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cf633ae9a1f0c82fdb9e559ed2be1c8e415c3e48fc47e1feaf32c6078ec0cdd" +checksum = "80759b3f57b3b20fa7cd8fef6479930fc95461b58ff8adea6e87e618449c8a1d" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -198,9 +213,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b6aa3961694b30ba53d41006131a2fca3bdab22e4c344e46db2c639e7c2dfdd" +checksum = "50c242de43a1869bcb2fbce3b377130959d10dfd562b87ac7aa2f04d98baac51" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -219,9 +234,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e53f7877ded3921d18a0a9556d55bedf84535567198c9edab2aa23106da91855" +checksum = "9dd39b72f860cb0c542fac925f91d1939c2b14a0970b39d0ae304b5b7574a0ac" dependencies = [ "alloy-primitives", "alloy-serde", @@ -230,9 +245,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.13" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a500037938085feed8a20dbfc8fce58c599db68c948cfae711147175dee392c" +checksum = "ac4b22b3e51cac09fd2adfcc73b55f447b4df669f983c13f7894ec82b607c63f" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -242,29 +257,31 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3694b7e480728c0b3e228384f223937f14c10caef5a4c766021190fc8f283d35" +checksum = "6c15c11661571a19a06896663c93e804ccf013159275a89a98e892014df514d8" dependencies = [ "alloy-primitives", "alloy-sol-types", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", "tracing", ] [[package]] name = "alloy-network" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea94b8ceb5c75d7df0a93ba0acc53b55a22b47b532b600a800a87ef04eb5b0b4" +checksum = "60dd0b99eaa5e715dd90d42021f7f08a0a70976ea84f41a0ad233770e0c1962b" dependencies = [ "alloy-consensus", + "alloy-consensus-any", "alloy-eips", "alloy-json-rpc", "alloy-network-primitives", "alloy-primitives", + "alloy-rpc-types-any", "alloy-rpc-types-eth", "alloy-serde", "alloy-signer", @@ -274,14 +291,14 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] name = "alloy-network-primitives" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9f3e281005943944d15ee8491534a1c7b3cbf7a7de26f8c433b842b93eb5f9" +checksum = "18abfc73ce48f074c8bc6e05c1f08ef0b1ddc9b04f191a821d0beb9470a42a29" dependencies = [ "alloy-consensus", "alloy-eips", @@ -292,9 +309,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9805d126f24be459b958973c0569c73e1aadd27d4535eee82b2b6764aa03616" +checksum = "f9a04cf8f3a19b024b2bc71b5774d423cd2edda7f67df6029daa1368c5c02da5" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -302,16 +319,16 @@ dependencies = [ "rand 0.8.5", "serde_json", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.4", "tracing", "url", ] [[package]] name = "alloy-primitives" -version = "0.8.13" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3aeeb5825c2fc8c2662167058347cd0cafc3cb15bcb5cdb1758a63c2dca0409e" +checksum = "9db948902dfbae96a73c2fbf1f7abec62af034ab883e4c777c3fd29702bd6e2c" dependencies = [ "alloy-rlp", "arbitrary", @@ -324,7 +341,7 @@ dependencies = [ "getrandom 0.2.15", "hashbrown 0.15.2", "hex-literal", - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "k256", "keccak-asm", @@ -333,7 +350,7 @@ dependencies = [ "proptest-derive", "rand 0.8.5", "ruint", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "sha3", "tiny-keccak", @@ -341,9 +358,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40c1f9eede27bf4c13c099e8e64d54efd7ce80ef6ea47478aa75d5d74e2dba3b" +checksum = "4933c761f10e44d5e901804b56efb2ce6e0945e6c57d2fa1e5ace303fae6f74a" dependencies = [ "alloy-chains", "alloy-consensus", @@ -373,7 +390,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tracing", "url", @@ -382,9 +399,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f1f34232f77341076541c405482e4ae12f0ee7153d8f9969fc1691201b2247" +checksum = "808719714bfb2aa24b0eb2a38411ce8e654ba11c0ebf2a6648fcbe9fabfe696d" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -418,14 +435,14 @@ checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "alloy-rpc-client" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374dbe0dc3abdc2c964f36b3d3edf9cdb3db29d16bda34aa123f03d810bec1dd" +checksum = "6ce26c25efb8290b6ba559ae6c40bf6630d337e107ae242e5790501420dba7b7" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -448,9 +465,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c74832aa474b670309c20fffc2a869fa141edab7c79ff7963fad0a08de60bae1" +checksum = "41080ce2640928f0df45c41d2af629b88db3cb31af3abbe614964ae10001ddac" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -461,9 +478,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bfd9b2cc3a1985f1f6da5afc41120256f9f9316fcd89e054cea99dbb10172f6" +checksum = "db981579da4d597d9d35f56ad7641b929bf8f551ab696715132f554863c83540" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -473,9 +490,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca97963132f78ddfc60e43a017348e6d52eea983925c23652f5b330e8e02291" +checksum = "252b7433e731e5d24f7eb7a54a368bc813a1086aaf84643ab10e99599a6ff16c" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -483,25 +500,37 @@ dependencies = [ "serde", ] +[[package]] +name = "alloy-rpc-types-any" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abca110e59f760259e26d0c84912121468008aba48dd227af0f306cfd7bce9ae" +dependencies = [ + "alloy-consensus-any", + "alloy-rpc-types-eth", + "alloy-serde", +] + [[package]] name = "alloy-rpc-types-beacon" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "922fa76678d2f9f07ea1b19309b5cfbf244c6029dcba3515227b515fdd6ed4a7" +checksum = "45c8db5fb70d2fece7bc1cd5adf42e72fc8a23547adeff8f558d9063f1e7788c" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "alloy-serde", "serde", "serde_with", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] name = "alloy-rpc-types-debug" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba2253bee958658ebd614c07a61c40580e09dd1fad3f017684314442332ab753" +checksum = "ea3a662ced0bfbe582d26ed85d6a0092310787331555c8f7a86f843c7ca272ef" dependencies = [ "alloy-primitives", "serde", @@ -509,9 +538,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56294dce86af23ad6ee8df46cf8b0d292eb5d1ff67dc88a0886051e32b1faf" +checksum = "d3b000c7f3469e7faa575ba70207294cf07e91dfd6ce4d04d5d5d8069f974a66" dependencies = [ "alloy-consensus", "alloy-eips", @@ -530,11 +559,12 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a477281940d82d29315846c7216db45b15e90bcd52309da9f54bcf7ad94a11" +checksum = "3468e7385fbb86b0fde5497d685c02f765ea09d36f7e07c5d1c9a52b077d38e2" dependencies = [ "alloy-consensus", + "alloy-consensus-any", "alloy-eips", "alloy-network-primitives", "alloy-primitives", @@ -551,12 +581,13 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8647f8135ee3d5de1cf196706c905c05728a4e38bb4a5b61a7214bd1ba8f60a6" +checksum = "26988fb56d87414c96b8fd9b69ad6ce3768bc9acc953ed02c18a66f74ab98c66" dependencies = [ "alloy-eips", "alloy-primitives", + "alloy-rpc-types-eth", "alloy-serde", "serde", "serde_json", @@ -564,23 +595,23 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd8b4877ef520c138af702097477cdd19504a8e1e4675ba37e92ba40f2d3c6f" +checksum = "7a90be1bc8e3659db1c9512191873a268a917efbc62b8bd39a92c12bf613b193" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] name = "alloy-rpc-types-txpool" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d4ab49acf90a71f7fb894dc5fd485f1f07a1e348966c714c4d1e0b7478850a8" +checksum = "beade2858d292442f5be6fce452c923072a7ac4d3898d333abf42703945444d0" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -590,9 +621,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dfa4a7ccf15b2492bb68088692481fd6b2604ccbee1d0d6c44c21427ae4df83" +checksum = "42de6002e2154b50b3568aea27e26bd9caf7b754658f43065f2e9b6ee0a8c839" dependencies = [ "alloy-primitives", "arbitrary", @@ -602,23 +633,23 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e10aec39d60dc27edcac447302c7803d2371946fb737245320a05b78eb2fafd" +checksum = "f288a9a25e2578dab17845fd8d2be1d32de33565783ed185ded161a65f92381b" dependencies = [ "alloy-primitives", "async-trait", "auto_impl", "elliptic-curve", "k256", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] name = "alloy-signer-local" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8396f6dff60700bc1d215ee03d86ff56de268af96e2bf833a14d0bafcab9882" +checksum = "0d8081f589ddc11a959605e30c723d51cad2562d9072305f8e3ef311f077e5eb" dependencies = [ "alloy-consensus", "alloy-network", @@ -629,61 +660,61 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] name = "alloy-sol-macro" -version = "0.8.13" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c0279d09463a4695788a3622fd95443625f7be307422deba4b55dd491a9c7a1" +checksum = "3bfd7853b65a2b4f49629ec975fee274faf6dff15ab8894c620943398ef283c0" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.13" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4feea540fc8233df2ad1156efd744b2075372f43a8f942a68b3b19c8a00e2c12" +checksum = "82ec42f342d9a9261699f8078e57a7a4fda8aaa73c1a212ed3987080e6a9cd13" dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.6.0", + "indexmap 2.7.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.13" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0ad281f3d1b613af814b66977ee698e443d4644a1510962d0241f26e0e53ae" +checksum = "ed2c50e6a62ee2b4f7ab3c6d0366e5770a21cad426e109c2f40335a1b3aff3df" dependencies = [ "const-hex", "dunce", "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.13" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96eff16c797438add6c37bb335839d015b186c5421ee5626f5559a7bfeb38ef5" +checksum = "ac17c6e89a50fb4a758012e4b409d9a0ba575228e69b539fe37d7a1bd507ca4a" dependencies = [ "serde", "winnow", @@ -691,9 +722,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.13" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff34e0682d6665da243a3e81da96f07a2dd50f7e64073e382b1a141f5a2a2f6" +checksum = "c9dc0fffe397aa17628160e16b89f704098bf3c9d74d5d369ebc239575936de5" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -704,9 +735,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f99acddb34000d104961897dbb0240298e8b775a7efffb9fda2a1a3efedd65b3" +checksum = "90352f4cf78017905c3244f48b38fadc345970bbc9095087c0f985a580550488" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -714,7 +745,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tower 0.5.1", "tracing", @@ -724,9 +755,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc013132e34eeadaa0add7e74164c1503988bfba8bae885b32e0918ba85a8a6" +checksum = "7d26c94d51fa8b1aee3d15db113dd0773776c02bb36dbaa2590b900dadd7e7d0" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -739,9 +770,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063edc0660e81260653cc6a95777c29d54c2543a668aa5da2359fb450d25a1ba" +checksum = "14c498fcdec50650be6b6a22ce7928a1b2738086b4f94f31b132e83498d45bbb" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -758,9 +789,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.6.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abd170e600801116d5efe64f74a4fc073dbbb35c807013a7d0a388742aeebba0" +checksum = "cd7b21335b55c9f715e2acca0228dc1d6880d961756916c13a9ce70f9f413e70" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -866,9 +897,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "aquamarine" @@ -881,7 +912,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1104,7 +1135,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1115,7 +1146,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1153,7 +1184,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1259,7 +1290,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1354,9 +1385,9 @@ dependencies = [ "bitflags 2.6.0", "boa_interner", "boa_macros", - "indexmap 2.6.0", + "indexmap 2.7.0", "num-bigint", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", ] [[package]] @@ -1380,7 +1411,7 @@ dependencies = [ "fast-float", "hashbrown 0.14.5", "icu_normalizer", - "indexmap 2.6.0", + "indexmap 2.7.0", "intrusive-collections", "itertools 0.13.0", "num-bigint", @@ -1392,7 +1423,7 @@ dependencies = [ "portable-atomic", "rand 0.8.5", "regress", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "ryu-js", "serde", "serde_json", @@ -1426,10 +1457,10 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.14.5", - "indexmap 2.6.0", + "indexmap 2.7.0", "once_cell", "phf", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "static_assertions", ] @@ -1441,7 +1472,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -1461,7 +1492,7 @@ dependencies = [ "num-bigint", "num-traits", "regress", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", ] [[package]] @@ -1478,7 +1509,7 @@ checksum = "ae85205289bab1f2c7c8a30ddf0541cf89ba2ff7dbd144feef50bbfa664288d4" dependencies = [ "fast-float", "paste", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "sptr", "static_assertions", ] @@ -1563,7 +1594,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1651,9 +1682,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" +checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc" dependencies = [ "jobserver", "libc", @@ -1752,9 +1783,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "69371e34337c4c984bbe322360c2547210bf632eb2814bbe78a6e87a2935bd2b" dependencies = [ "clap_builder", "clap_derive", @@ -1762,9 +1793,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "6e24c1b4099818523236a8ca881d2b45db98dadfb4625cf6608c12069fcbbde1" dependencies = [ "anstream", "anstyle", @@ -1781,7 +1812,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1915,9 +1946,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487981fa1af147182687064d0a2c336586d337a606595ced9ffb0c685c250c73" +checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" dependencies = [ "cfg-if", "cpufeatures", @@ -2125,7 +2156,7 @@ checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ "bitflags 2.6.0", "crossterm_winapi", - "mio 1.0.2", + "mio 1.0.3", "parking_lot", "rustix", "signal-hook", @@ -2235,7 +2266,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2259,7 +2290,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2270,7 +2301,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2392,7 +2423,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2403,7 +2434,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2424,7 +2455,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "unicode-xid", ] @@ -2538,7 +2569,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2624,7 +2655,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", "walkdir", ] @@ -2688,7 +2719,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2699,7 +2730,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2756,7 +2787,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2780,7 +2811,7 @@ dependencies = [ "reth-node-ethereum", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] @@ -2868,7 +2899,7 @@ dependencies = [ "reth-tracing", "reth-trie-db", "serde", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", ] @@ -2956,6 +2987,7 @@ dependencies = [ name = "example-db-access" version = "0.0.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-eth", "eyre", @@ -3318,7 +3350,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3507,7 +3539,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.6.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -3668,9 +3700,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -3844,7 +3876,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3994,7 +4026,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4061,7 +4093,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4102,9 +4134,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "arbitrary", "equivalent", @@ -4131,7 +4163,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.6.0", + "indexmap 2.7.0", "is-terminal", "itoa", "log", @@ -4183,7 +4215,7 @@ dependencies = [ "pretty_assertions", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4319,10 +4351,11 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "a865e038f7f6ed956f788f0d7d60c541fff74c7bd74272c5d4cf15c63743e705" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -4386,7 +4419,7 @@ dependencies = [ "parking_lot", "pin-project", "rand 0.8.5", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "serde_json", "thiserror 1.0.69", @@ -4431,7 +4464,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4591,15 +4624,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.166" +version = "0.2.167" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2ccc108bbc0b1331bd061864e7cd823c0cab660bbe6970e66e2c0614decde36" +checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -4849,7 +4882,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4859,7 +4892,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", - "indexmap 2.6.0", + "indexmap 2.7.0", "metrics", "metrics-util", "quanta", @@ -4891,7 +4924,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.2", - "indexmap 2.6.0", + "indexmap 2.7.0", "metrics", "ordered-float", "quanta", @@ -4963,11 +4996,10 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi 0.3.9", "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", @@ -4997,7 +5029,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5245,7 +5277,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5298,9 +5330,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.8" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce158d886815d419222daa67fcdf949a34f7950653a4498ebeb4963331f70ed" +checksum = "77284451ec70602f148f4f3bc6d1106fdfefd57c11ff459c4b2985e400ed1a18" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5311,14 +5343,14 @@ dependencies = [ "derive_more 1.0.0", "serde", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] name = "op-alloy-genesis" -version = "0.6.8" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2734e9a65efb90fe4520303f984c124766b7d2f2e5dd51cbe54d6269c85a3c91" +checksum = "c912ec93ec839076e8bbaaf7bd3d80aeedbe38cd5e8e3e76dfc67d217637e651" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5326,14 +5358,14 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] name = "op-alloy-network" -version = "0.6.8" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87e4aef8ed017004a176ab1de49df419f59c0fb4a6ce3b693a10fe099fe1afe7" +checksum = "bef4620ba6309ecc18e1aaa339836ca839b001a420ca245add040a3bde1ae9b1" dependencies = [ "alloy-consensus", "alloy-network", @@ -5346,9 +5378,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.8" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c68a3e2770890da3ad2fd20d7fe0c8e15672707577b4168a60e388c8eceaca0" +checksum = "9ab24c1b9c21cedd691938b5667c951b04ae8b89429d7cb7a88f30afb79cbbf1" dependencies = [ "alloc-no-stdlib", "alloy-consensus", @@ -5358,20 +5390,21 @@ dependencies = [ "alloy-serde", "async-trait", "brotli", + "cfg-if", "miniz_oxide", "op-alloy-consensus", "op-alloy-genesis", "serde", - "thiserror 2.0.3", + "thiserror 2.0.4", "tracing", "unsigned-varint", ] [[package]] name = "op-alloy-rpc-types" -version = "0.6.8" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060ebeaea8c772e396215f69bb86d231ec8b7f36aca0dd6ce367ceaa9a8c33e6" +checksum = "2bdc32eba4d43bbd23f1f16dece7afd991d41ab4ffc2494a72b048e9f38db622" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5388,9 +5421,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.8" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864dbd5511ef4ef00b6c2c980739259b25b24048007b7751ca0069b30b1e3fee" +checksum = "b07175fcfd9d03a587ece7ce79fc288331e6d9ae523464eb677c751d5737713b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5402,7 +5435,7 @@ dependencies = [ "op-alloy-protocol", "serde", "snap", - "thiserror 2.0.3", + "thiserror 2.0.4", ] [[package]] @@ -5482,9 +5515,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arbitrary", "arrayvec", @@ -5493,20 +5526,19 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", - "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 1.0.109", ] [[package]] @@ -5621,7 +5653,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5650,7 +5682,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5822,7 +5854,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5873,7 +5905,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5971,7 +6003,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -6023,10 +6055,10 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -6041,11 +6073,11 @@ dependencies = [ "getrandom 0.2.15", "rand 0.8.5", "ring", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.3", + "thiserror 2.0.4", "tinyvec", "tracing", "web-time", @@ -6503,7 +6535,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "schnellru", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -6539,7 +6571,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tracing", "serde", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tower 0.4.13", "tracing", @@ -6593,7 +6625,7 @@ dependencies = [ "reth-execution-errors", "reth-primitives", "reth-storage-errors", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] @@ -6749,7 +6781,7 @@ dependencies = [ "reth-fs-util", "secp256k1", "serde", - "thiserror 1.0.69", + "thiserror 2.0.4", "tikv-jemallocator", "tracy-client", ] @@ -6784,7 +6816,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -6887,14 +6919,14 @@ dependencies = [ "reth-storage-errors", "reth-tracing", "reth-trie-common", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "serde_json", "strum", "sysinfo", "tempfile", "test-fuzz", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] @@ -6921,6 +6953,7 @@ dependencies = [ "reth-stages-types", "reth-storage-errors", "reth-trie-common", + "roaring", "serde", "test-fuzz", ] @@ -6950,7 +6983,7 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", "tracing", ] @@ -6992,7 +7025,7 @@ dependencies = [ "schnellru", "secp256k1", "serde", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -7017,7 +7050,7 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -7043,7 +7076,7 @@ dependencies = [ "secp256k1", "serde", "serde_with", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -7082,7 +7115,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tokio-util", @@ -7159,7 +7192,7 @@ dependencies = [ "secp256k1", "sha2 0.10.8", "sha3", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tokio-util", @@ -7171,6 +7204,7 @@ dependencies = [ name = "reth-engine-local" version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-engine", "eyre", @@ -7184,10 +7218,10 @@ dependencies = [ "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-evm", + "reth-node-types", "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-payload-validator", "reth-provider", "reth-prune", "reth-rpc-types-compat", @@ -7214,7 +7248,7 @@ dependencies = [ "reth-primitives-traits", "reth-trie", "serde", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", ] @@ -7236,13 +7270,12 @@ dependencies = [ "reth-network-p2p", "reth-node-types", "reth-payload-builder", - "reth-payload-validator", "reth-primitives", "reth-provider", "reth-prune", "reth-stages-api", "reth-tasks", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", ] @@ -7280,7 +7313,6 @@ dependencies = [ "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-payload-validator", "reth-primitives", "reth-provider", "reth-prune", @@ -7298,7 +7330,7 @@ dependencies = [ "reth-trie-parallel", "reth-trie-sparse", "revm-primitives", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -7315,6 +7347,7 @@ dependencies = [ "futures", "itertools 0.13.0", "pin-project", + "reth-consensus-common", "reth-engine-primitives", "reth-errors", "reth-ethereum-forks", @@ -7343,7 +7376,7 @@ dependencies = [ "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] @@ -7376,7 +7409,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tokio-util", @@ -7401,10 +7434,11 @@ dependencies = [ "rand 0.8.5", "reth-chainspec", "reth-codecs-derive", + "reth-ethereum-forks", "reth-primitives", "reth-primitives-traits", "serde", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] @@ -7468,9 +7502,9 @@ dependencies = [ "once_cell", "proptest", "proptest-derive", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", - "thiserror-no-std", + "thiserror 2.0.4", ] [[package]] @@ -7494,7 +7528,6 @@ dependencies = [ "reth-provider", "reth-revm", "reth-transaction-pool", - "reth-trie", "revm", "tracing", ] @@ -7567,12 +7600,12 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "derive_more 1.0.0", "nybbles", "reth-consensus", "reth-prune-types", "reth-storage-errors", "revm-primitives", + "thiserror 2.0.4", ] [[package]] @@ -7667,7 +7700,7 @@ dependencies = [ "reth-transaction-pool", "reth-trie-db", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", ] @@ -7694,7 +7727,7 @@ version = "1.1.2" dependencies = [ "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] @@ -7737,7 +7770,7 @@ dependencies = [ "rand 0.8.5", "reth-tracing", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tokio-util", @@ -7754,7 +7787,7 @@ dependencies = [ "criterion", "dashmap 6.1.0", "derive_more 1.0.0", - "indexmap 2.6.0", + "indexmap 2.7.0", "parking_lot", "pprof", "rand 0.8.5", @@ -7762,7 +7795,7 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.4", "tracing", ] @@ -7801,7 +7834,7 @@ dependencies = [ "reqwest", "reth-tracing", "serde_with", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -7854,14 +7887,14 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "reth-transaction-pool", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "schnellru", "secp256k1", "serde", "serial_test", "smallvec", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tokio-util", @@ -7886,7 +7919,7 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", ] @@ -7924,7 +7957,7 @@ dependencies = [ "secp256k1", "serde_json", "serde_with", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "url", ] @@ -7955,7 +7988,7 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.4", "tracing", "zstd", ] @@ -7964,7 +7997,6 @@ dependencies = [ name = "reth-node-api" version = "1.1.2" dependencies = [ - "alloy-consensus", "alloy-rpc-types-engine", "eyre", "reth-beacon-consensus", @@ -8070,6 +8102,7 @@ dependencies = [ "reth-db", "reth-discv4", "reth-discv5", + "reth-ethereum-forks", "reth-net-nat", "reth-network", "reth-network-p2p", @@ -8089,7 +8122,7 @@ dependencies = [ "serde", "shellexpand", "strum", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "toml", "tracing", @@ -8285,6 +8318,7 @@ dependencies = [ "reth-consensus-common", "reth-optimism-chainspec", "reth-optimism-forks", + "reth-optimism-primitives", "reth-primitives", "reth-trie-common", "tracing", @@ -8302,6 +8336,7 @@ dependencies = [ "op-alloy-consensus", "reth-chainspec", "reth-consensus", + "reth-consensus-common", "reth-ethereum-forks", "reth-evm", "reth-execution-errors", @@ -8413,10 +8448,9 @@ dependencies = [ "reth-revm", "reth-rpc-types-compat", "reth-transaction-pool", - "reth-trie", "revm", "sha2 0.10.8", - "thiserror 1.0.69", + "thiserror 2.0.4", "tracing", ] @@ -8432,10 +8466,15 @@ dependencies = [ "bytes", "derive_more 1.0.0", "op-alloy-consensus", + "proptest", + "proptest-arbitrary-interop", + "rand 0.8.5", "reth-codecs", "reth-primitives", "reth-primitives-traits", + "revm-primitives", "rstest", + "secp256k1", "serde", ] @@ -8448,7 +8487,6 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-debug", "alloy-rpc-types-eth", - "derive_more 1.0.0", "jsonrpsee-core", "jsonrpsee-types", "op-alloy-consensus", @@ -8479,7 +8517,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -8544,7 +8582,7 @@ dependencies = [ "reth-primitives", "revm-primitives", "serde", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", ] @@ -8630,12 +8668,12 @@ dependencies = [ "bytes", "derive_more 1.0.0", "modular-bitfield", + "op-alloy-consensus", "proptest", "proptest-arbitrary-interop", "rand 0.8.5", "reth-codecs", "revm-primitives", - "roaring", "serde", "serde_json", "serde_with", @@ -8696,6 +8734,7 @@ dependencies = [ name = "reth-prune" version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "assert_matches", @@ -8717,8 +8756,8 @@ dependencies = [ "reth-testing-utils", "reth-tokio-util", "reth-tracing", - "rustc-hash 2.0.0", - "thiserror 1.0.69", + "rustc-hash 2.1.0", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -8739,7 +8778,7 @@ dependencies = [ "serde", "serde_json", "test-fuzz", - "thiserror 1.0.69", + "thiserror 2.0.4", "toml", ] @@ -8820,13 +8859,12 @@ dependencies = [ "reth-tasks", "reth-testing-utils", "reth-transaction-pool", - "reth-trie", "revm", "revm-inspectors", "revm-primitives", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tower 0.4.13", @@ -8920,7 +8958,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-util", "tower 0.4.13", @@ -8961,7 +8999,7 @@ dependencies = [ "reth-tokio-util", "reth-transaction-pool", "serde", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -8993,6 +9031,7 @@ dependencies = [ "reth-network-api", "reth-node-api", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-eth-types", @@ -9000,7 +9039,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tasks", "reth-transaction-pool", - "reth-trie", + "reth-trie-common", "revm", "revm-inspectors", "revm-primitives", @@ -9027,7 +9066,6 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-errors", - "reth-evm", "reth-execution-types", "reth-metrics", "reth-primitives", @@ -9045,7 +9083,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -9093,10 +9131,8 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types-engine", "alloy-rpc-types-eth", - "alloy-serde", "jsonrpsee-types", "reth-primitives", - "reth-trie-common", "serde", "serde_json", ] @@ -9147,7 +9183,7 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -9174,7 +9210,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -9253,6 +9289,7 @@ dependencies = [ "reth-storage-errors", "reth-trie", "reth-trie-db", + "revm", ] [[package]] @@ -9264,7 +9301,8 @@ dependencies = [ "alloy-rlp", "derive_more 1.0.0", "reth-fs-util", - "reth-primitives", + "reth-primitives-traits", + "reth-static-file-types", ] [[package]] @@ -9278,7 +9316,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tracing", "tracing-futures", @@ -9356,13 +9394,13 @@ dependencies = [ "reth-tasks", "reth-tracing", "revm", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "schnellru", "serde", "serde_json", "smallvec", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tokio-stream", "tracing", @@ -9389,6 +9427,7 @@ dependencies = [ "reth-stages-types", "reth-storage-errors", "reth-trie-common", + "reth-trie-sparse", "revm", "serde_json", "tracing", @@ -9403,6 +9442,8 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-eth", + "alloy-serde", "alloy-trie", "arbitrary", "bincode", @@ -9474,7 +9515,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "reth-trie-db", - "thiserror 1.0.69", + "thiserror 2.0.4", "tokio", "tracing", ] @@ -9493,13 +9534,14 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", + "reth-execution-errors", "reth-primitives-traits", "reth-testing-utils", "reth-tracing", "reth-trie", "reth-trie-common", "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.4", ] [[package]] @@ -9519,9 +9561,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747291a18ad6726a08dd73f8b6a6b3a844db582ecae2063ccf0a04880c44f482" +checksum = "0b7f5f8a2deafb3c76f357bbf9e71b73bddb915c4994bbbe3208fbfbe8fc7f8e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -9684,7 +9726,6 @@ checksum = "f81dc953b2244ddd5e7860cb0bb2a790494b898ef321d4aff8e260efab60cc88" dependencies = [ "bytemuck", "byteorder", - "serde", ] [[package]] @@ -9728,7 +9769,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.89", + "syn 2.0.90", "unicode-ident", ] @@ -9777,9 +9818,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" dependencies = [ "rand 0.8.5", ] @@ -10130,7 +10171,7 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10139,7 +10180,7 @@ version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "memchr", "ryu", @@ -10165,7 +10206,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10199,7 +10240,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_derive", "serde_json", @@ -10216,7 +10257,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10239,7 +10280,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10338,7 +10379,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" dependencies = [ "libc", - "mio 1.0.2", + "mio 1.0.3", "signal-hook", ] @@ -10443,9 +10484,9 @@ dependencies = [ [[package]] name = "soketto" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" +checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" dependencies = [ "base64 0.22.1", "bytes", @@ -10522,7 +10563,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10580,9 +10621,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.89" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -10591,14 +10632,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.13" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdaa7b9e815582ba343a20c66627437cf45f1c6fba7f69772cbfd1358c7e197" +checksum = "da0523f59468a2696391f2a772edc089342aacd53c3caa2ac3264e598edf119b" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10624,7 +10665,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10701,7 +10742,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -10734,11 +10775,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "2f49a1853cf82743e3b7950f77e0f4d622ca36cf4317cba00c767838bac8d490" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.4", ] [[package]] @@ -10749,38 +10790,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "thiserror-impl" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.89", -] - -[[package]] -name = "thiserror-impl-no-std" -version = "2.0.2" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e6318948b519ba6dc2b442a6d0b904ebfb8d411a3ad3e07843615a72249758" +checksum = "8381894bb3efe0c4acac3ded651301ceee58a15d47c2e34885ed1908ad667061" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", -] - -[[package]] -name = "thiserror-no-std" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3ad459d94dd517257cc96add8a43190ee620011bb6e6cdc82dafd97dfafafea" -dependencies = [ - "thiserror-impl-no-std", + "syn 2.0.90", ] [[package]] @@ -10835,9 +10856,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -10859,9 +10880,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -10913,14 +10934,14 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", "libc", - "mio 1.0.2", + "mio 1.0.3", "parking_lot", "pin-project-lite", "signal-hook-registry", @@ -10937,7 +10958,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -11021,7 +11042,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -11138,7 +11159,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -11163,9 +11184,9 @@ dependencies = [ [[package]] name = "tracing-journald" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba316a74e8fc3c3896a850dba2375928a9fa171b085ecddfc7c054d39970f3fd" +checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657" dependencies = [ "libc", "tracing-core", @@ -11197,9 +11218,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" dependencies = [ "serde", "tracing-core", @@ -11207,9 +11228,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -11536,7 +11557,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -11587,9 +11608,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "d15e63b4482863c109d70a7b8706c1e364eb6ea449b201a76c5b89cedcec2d5c" dependencies = [ "cfg-if", "once_cell", @@ -11598,36 +11619,37 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "8d36ef12e3aaca16ddd3f67922bc63e48e953f126de60bd33ccc0101ef9998cd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "9dfaf8f50e5f293737ee323940c7d8b08a66a95a419223d9f41610ca08b0833d" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "705440e08b42d3e4b36de7d66c944be628d579796b8090bfa3471478a2260051" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -11635,22 +11657,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "98c9ae5a76e46f4deecd0f0255cc223cfa18dc9b261213b8aa0c7b36f61b3f1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "6ee99da9c5ba11bd675621338ef6fa52296b76b83305e9b6e5c77d4c286d6d49" [[package]] name = "wasm-streams" @@ -11681,9 +11703,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.72" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "a98bc3c33f0fe7e59ad7cd041b89034fa82a7c2d4365ca538dda6cdaf513863c" dependencies = [ "js-sys", "wasm-bindgen", @@ -11807,7 +11829,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -11818,7 +11840,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -11829,7 +11851,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -11840,7 +11862,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -12115,7 +12137,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -12137,7 +12159,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -12157,7 +12179,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -12178,7 +12200,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -12200,7 +12222,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 113d0661f3ff..550a71352061 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -339,7 +339,7 @@ reth-eth-wire-types = { path = "crates/net/eth-wire-types" } reth-ethereum-cli = { path = "crates/ethereum/cli" } reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } -reth-ethereum-forks = { path = "crates/ethereum-forks" } +reth-ethereum-forks = { path = "crates/ethereum-forks", default-features = false } reth-ethereum-payload-builder = { path = "crates/ethereum/payload" } reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } @@ -375,7 +375,7 @@ reth-node-types = { path = "crates/node/types" } reth-optimism-chainspec = { path = "crates/optimism/chainspec" } reth-optimism-cli = { path = "crates/optimism/cli" } reth-optimism-consensus = { path = "crates/optimism/consensus" } -reth-optimism-forks = { path = "crates/optimism/hardforks" } +reth-optimism-forks = { path = "crates/optimism/hardforks", default-features = false } reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-optimism-primitives = { path = "crates/optimism/primitives" } reth-optimism-rpc = { path = "crates/optimism/rpc" } @@ -423,59 +423,57 @@ reth-trie-sparse = { path = "crates/trie/sparse" } # revm revm = { version = "18.0.0", features = ["std"], default-features = false } -revm-inspectors = "0.11.0" -revm-primitives = { version = "14.0.0", features = [ - "std", -], default-features = false } +revm-inspectors = "0.12.0" +revm-primitives = { version = "14.0.0", default-features = false } # eth -alloy-chains = "0.1.32" +alloy-chains = { version = "0.1.32", default-features = false } alloy-dyn-abi = "0.8.11" alloy-primitives = { version = "0.8.11", default-features = false } -alloy-rlp = "0.3.4" +alloy-rlp = { version = "0.3.4", default-features = false } alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.6.4", default-features = false } -alloy-contract = { version = "0.6.4", default-features = false } -alloy-eips = { version = "0.6.4", default-features = false } -alloy-genesis = { version = "0.6.4", default-features = false } -alloy-json-rpc = { version = "0.6.4", default-features = false } -alloy-network = { version = "0.6.4", default-features = false } -alloy-network-primitives = { version = "0.6.4", default-features = false } -alloy-node-bindings = { version = "0.6.4", default-features = false } -alloy-provider = { version = "0.6.4", features = [ +alloy-consensus = { version = "0.7.2", default-features = false } +alloy-contract = { version = "0.7.2", default-features = false } +alloy-eips = { version = "0.7.2", default-features = false } +alloy-genesis = { version = "0.7.2", default-features = false } +alloy-json-rpc = { version = "0.7.2", default-features = false } +alloy-network = { version = "0.7.2", default-features = false } +alloy-network-primitives = { version = "0.7.2", default-features = false } +alloy-node-bindings = { version = "0.7.2", default-features = false } +alloy-provider = { version = "0.7.2", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.6.4", default-features = false } -alloy-rpc-client = { version = "0.6.4", default-features = false } -alloy-rpc-types = { version = "0.6.4", features = [ +alloy-pubsub = { version = "0.7.2", default-features = false } +alloy-rpc-client = { version = "0.7.2", default-features = false } +alloy-rpc-types = { version = "0.7.2", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.6.4", default-features = false } -alloy-rpc-types-anvil = { version = "0.6.4", default-features = false } -alloy-rpc-types-beacon = { version = "0.6.4", default-features = false } -alloy-rpc-types-debug = { version = "0.6.4", default-features = false } -alloy-rpc-types-engine = { version = "0.6.4", default-features = false } -alloy-rpc-types-eth = { version = "0.6.4", default-features = false } -alloy-rpc-types-mev = { version = "0.6.4", default-features = false } -alloy-rpc-types-trace = { version = "0.6.4", default-features = false } -alloy-rpc-types-txpool = { version = "0.6.4", default-features = false } -alloy-serde = { version = "0.6.4", default-features = false } -alloy-signer = { version = "0.6.4", default-features = false } -alloy-signer-local = { version = "0.6.4", default-features = false } -alloy-transport = { version = "0.6.4" } -alloy-transport-http = { version = "0.6.4", features = [ +alloy-rpc-types-admin = { version = "0.7.2", default-features = false } +alloy-rpc-types-anvil = { version = "0.7.2", default-features = false } +alloy-rpc-types-beacon = { version = "0.7.2", default-features = false } +alloy-rpc-types-debug = { version = "0.7.2", default-features = false } +alloy-rpc-types-engine = { version = "0.7.2", default-features = false } +alloy-rpc-types-eth = { version = "0.7.2", default-features = false } +alloy-rpc-types-mev = { version = "0.7.2", default-features = false } +alloy-rpc-types-trace = { version = "0.7.2", default-features = false } +alloy-rpc-types-txpool = { version = "0.7.2", default-features = false } +alloy-serde = { version = "0.7.2", default-features = false } +alloy-signer = { version = "0.7.2", default-features = false } +alloy-signer-local = { version = "0.7.2", default-features = false } +alloy-transport = { version = "0.7.2" } +alloy-transport-http = { version = "0.7.2", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.6.4", default-features = false } -alloy-transport-ws = { version = "0.6.4", default-features = false } +alloy-transport-ipc = { version = "0.7.2", default-features = false } +alloy-transport-ws = { version = "0.7.2", default-features = false } # op -op-alloy-rpc-types = "0.6.7" -op-alloy-rpc-types-engine = "0.6.7" -op-alloy-network = "0.6.7" -op-alloy-consensus = "0.6.7" +op-alloy-rpc-types = "0.7.2" +op-alloy-rpc-types-engine = "0.7.2" +op-alloy-network = "0.7.2" +op-alloy-consensus = "0.7.2" # misc aquamarine = "0.6" @@ -487,12 +485,12 @@ backon = { version = "1.2", default-features = false, features = [ bincode = "1.3" bitflags = "2.4" boyer-moore-magiclen = "0.2.16" -bytes = "1.5" +bytes = { version = "1.5", default-features = false } cfg-if = "1.0" clap = "4" const_format = { version = "0.2.32", features = ["rust_1_64"] } dashmap = "6.0" -derive_more = { version = "1", features = ["full"] } +derive_more = { version = "1", default-features = false, features = ["full"] } dyn-clone = "1.0.17" eyre = "0.6" fdlimit = "0.3.0" @@ -505,7 +503,7 @@ modular-bitfield = "0.11.2" notify = { version = "6.1.1", default-features = false, features = [ "macos_fsevent", ] } -nybbles = "0.2.1" +nybbles = { version = "0.2.1", default-features = false } once_cell = { version = "1.19", default-features = false, features = [ "critical-section", ] } @@ -517,14 +515,13 @@ rustc-hash = { version = "2.0", default-features = false } schnellru = "0.2" serde = { version = "1.0", default-features = false } serde_json = "1.0.94" -serde_with = "3.3.0" +serde_with = { version = "3", default-features = false, features = ["macros"] } sha2 = { version = "0.10", default-features = false } shellexpand = "3.0.0" smallvec = "1" strum = { version = "0.26", default-features = false } syn = "2.0" -thiserror = "1.0" -thiserror-no-std = { version = "2.0.2", default-features = false } +thiserror = { version = "2.0.0", default-features = false } tracing = "0.1.0" tracing-appender = "0.2" url = "2.3" @@ -612,35 +609,36 @@ tikv-jemalloc-ctl = "0.6" tikv-jemallocator = "0.6" tracy-client = "0.17.3" -#[patch.crates-io] -#alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-serde = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } -#alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } - -#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } +# [patch.crates-io] +# alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-network-primitives = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-serde = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } + +# op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } +# op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } +# op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } +# op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index a152bea2681e..cf9c53261b45 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -59,7 +59,7 @@ reth-trie-db = { workspace = true, features = ["metrics"] } reth-node-api.workspace = true reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true -reth-node-ethereum.workspace = true +reth-node-ethereum = { workspace = true, features = ["js-tracer"] } reth-node-builder.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index dc00e07d8830..0e4d3f7188a9 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -17,7 +17,7 @@ use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, FullConsensus}; use reth_errors::RethResult; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; @@ -128,7 +128,7 @@ impl> Command { ) -> eyre::Result<()> { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - let consensus: Arc = + let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); @@ -224,6 +224,8 @@ impl> Command { suggested_fee_recipient: self.suggested_fee_recipient, // TODO: add support for withdrawals withdrawals: None, + target_blobs_per_block: None, + max_blobs_per_block: None, }; let payload_config = PayloadConfig::new( Arc::new(SealedHeader::new(best_block.header().clone(), best_block.hash())), @@ -261,7 +263,8 @@ impl> Command { let block_with_senders = SealedBlockWithSenders::>::new(block.clone(), senders).unwrap(); - let db = StateProviderDatabase::new(blockchain_db.latest()?); + let state_provider = blockchain_db.latest()?; + let db = StateProviderDatabase::new(&state_provider); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); @@ -271,7 +274,7 @@ impl> Command { ExecutionOutcome::from((block_execution_output, block.number)); debug!(target: "reth::cli", ?execution_outcome, "Executed block"); - let hashed_post_state = execution_outcome.hash_state_slow(); + let hashed_post_state = state_provider.hashed_post_state(execution_outcome.state()); let (state_root, trie_updates) = StateRoot::overlay_root_with_updates( provider_factory.provider()?.tx_ref(), hashed_post_state.clone(), diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 870dc1ddf233..58b86648b901 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -24,8 +24,9 @@ use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockExt; use reth_provider::{ providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, - HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, - StageCheckpointReader, StateWriter, StorageLocation, StorageReader, + HashedPostStateProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, StorageLocation, + StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; @@ -63,6 +64,7 @@ impl> Command { Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, >( @@ -142,7 +144,8 @@ impl> Command { ) .await?; - let db = StateProviderDatabase::new(LatestStateProviderRef::new(&provider)); + let state_provider = LatestStateProviderRef::new(&provider); + let db = StateProviderDatabase::new(&state_provider); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); @@ -164,7 +167,7 @@ impl> Command { // Unpacked `BundleState::state_root_slow` function let (in_memory_state_root, in_memory_updates) = StateRoot::overlay_root_with_updates( provider.tx_ref(), - execution_outcome.hash_state_slow(), + state_provider.hashed_post_state(execution_outcome.state()), )?; if in_memory_state_root == block.state_root { diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 78e32df52664..16a1f1112726 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -62,6 +62,7 @@ impl> Command { Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, >( diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 04d3b5763aef..4b98fc85d0b2 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -12,7 +12,7 @@ use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, Environ use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_db::DatabaseEnv; use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage}; use reth_fs_util as fs; @@ -61,6 +61,7 @@ impl> Command { Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, >( @@ -92,7 +93,7 @@ impl> Command { let Environment { provider_factory, config, data_dir } = self.env.init::(AccessRights::RW)?; - let consensus: Arc = + let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index d1bca0a43b22..cf05ae66e28e 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -245,7 +245,7 @@ RPC: --http.api Rpc Modules to be configured for the HTTP server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner] --http.corsdomain Http Corsdomain to allow request from @@ -269,7 +269,7 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner] --ipcdisable Disable the IPC-RPC server @@ -381,8 +381,8 @@ RPC State Cache: [default: 2000] - --rpc-cache.max-envs - Max number of bytes for cached env data + --rpc-cache.max-envs + Max number of headers in cache [default: 1000] diff --git a/book/sources/exex/hello-world/src/bin/3.rs b/book/sources/exex/hello-world/src/bin/3.rs index ebeaf6c84f19..9b429d3eb086 100644 --- a/book/sources/exex/hello-world/src/bin/3.rs +++ b/book/sources/exex/hello-world/src/bin/3.rs @@ -1,10 +1,10 @@ use futures_util::TryStreamExt; -use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; +use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; -async fn my_exex>>( +async fn my_exex>>( mut ctx: ExExContext, ) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.try_next().await? { diff --git a/book/sources/exex/remote/src/exex.rs b/book/sources/exex/remote/src/exex.rs index 00392b4dad10..c823d98ded49 100644 --- a/book/sources/exex/remote/src/exex.rs +++ b/book/sources/exex/remote/src/exex.rs @@ -3,7 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; -use reth::{primitives::Block, providers::BlockReader}; +use reth::{builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -45,7 +45,7 @@ impl RemoteExEx for ExExService { } } -async fn remote_exex>>( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/remote/src/exex_4.rs b/book/sources/exex/remote/src/exex_4.rs index c37f26d739dc..8286c0289342 100644 --- a/book/sources/exex/remote/src/exex_4.rs +++ b/book/sources/exex/remote/src/exex_4.rs @@ -3,7 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; -use reth::{primitives::Block, providers::BlockReader}; +use reth::{builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -47,7 +47,7 @@ impl RemoteExEx for ExExService { // ANCHOR: snippet #[allow(dead_code)] -async fn remote_exex>>( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/tracking-state/src/bin/1.rs b/book/sources/exex/tracking-state/src/bin/1.rs index 2cf43bec3a17..b1a8609b727a 100644 --- a/book/sources/exex/tracking-state/src/bin/1.rs +++ b/book/sources/exex/tracking-state/src/bin/1.rs @@ -5,7 +5,7 @@ use std::{ }; use futures_util::{FutureExt, TryStreamExt}; -use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; +use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -14,7 +14,9 @@ struct MyExEx { ctx: ExExContext, } -impl>> Future for MyExEx { +impl>> Future + for MyExEx +{ type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/book/sources/exex/tracking-state/src/bin/2.rs b/book/sources/exex/tracking-state/src/bin/2.rs index b58d2a39c85c..7e9aadf8a04f 100644 --- a/book/sources/exex/tracking-state/src/bin/2.rs +++ b/book/sources/exex/tracking-state/src/bin/2.rs @@ -6,7 +6,7 @@ use std::{ use alloy_primitives::BlockNumber; use futures_util::{FutureExt, TryStreamExt}; -use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; +use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -25,7 +25,9 @@ impl MyExEx { } } -impl>> Future for MyExEx { +impl>> Future + for MyExEx +{ type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index bbf1cb099617..e8576de4a711 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -24,8 +24,8 @@ use reth_primitives::{ use reth_provider::{ BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, - ChainSplitTarget, DBProvider, DisplayBlocksChain, HeaderProvider, ProviderError, - StaticFileProviderFactory, StorageLocation, + ChainSplitTarget, DBProvider, DisplayBlocksChain, HashedPostStateProvider, HeaderProvider, + ProviderError, StaticFileProviderFactory, StorageLocation, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; @@ -95,7 +95,7 @@ impl BlockchainTree { impl BlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. /// @@ -1215,7 +1215,7 @@ where recorder: &mut MakeCanonicalDurationsRecorder, ) -> Result<(), CanonicalError> { let (blocks, state, chain_trie_updates) = chain.into_inner(); - let hashed_state = state.hash_state_slow(); + let hashed_state = self.externals.provider_factory.hashed_post_state(state.state()); let prefix_sets = hashed_state.construct_prefix_sets().freeze(); let hashed_state_sorted = hashed_state.into_sorted(); @@ -1390,7 +1390,7 @@ mod tests { use reth_node_types::FullNodePrimitives; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, - Account, BlockBody, Transaction, TransactionSigned, TransactionSignedEcRecovered, + Account, BlockBody, RecoveredTx, Transaction, TransactionSigned, }; use reth_provider::{ providers::ProviderNodeTypes, @@ -1424,7 +1424,12 @@ mod tests { } fn setup_genesis< - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + BlockBody = reth_primitives::BlockBody, + BlockHeader = reth_primitives::Header, + >, + >, >( factory: &ProviderFactory, mut genesis: SealedBlock, @@ -1569,7 +1574,7 @@ mod tests { } let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); - let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { + let mock_tx = |nonce: u64| -> RecoveredTx { TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), @@ -1586,7 +1591,7 @@ mod tests { let mock_block = |number: u64, parent: Option, - body: Vec, + body: Vec, num_of_signer_txs: u64| -> SealedBlockWithSenders { let signed_body = @@ -1880,7 +1885,12 @@ mod tests { ); let provider = tree.externals.provider_factory.provider().unwrap(); - let prefix_sets = exec5.hash_state_slow().construct_prefix_sets().freeze(); + let prefix_sets = tree + .externals + .provider_factory + .hashed_post_state(exec5.state()) + .construct_prefix_sets() + .freeze(); let state_root = StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap(); assert_eq!(state_root, block5.state_root); diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 6ac39c316702..4002fae1ac91 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -11,18 +11,18 @@ use reth_blockchain_tree_api::{ error::{BlockchainTreeError, InsertBlockErrorKind}, BlockAttachment, BlockValidationKind, }; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; +use reth_consensus::{ConsensusError, PostExecutionInput}; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ - providers::{BundleStateProvider, ConsistentDbView, ProviderNodeTypes}, - DBProvider, FullExecutionDataProvider, ProviderError, StateRootProvider, - TryIntoHistoricalStateProvider, + providers::{BundleStateProvider, ConsistentDbView, TreeNodeTypes}, + DBProvider, FullExecutionDataProvider, HashedPostStateProvider, ProviderError, + StateRootProvider, TryIntoHistoricalStateProvider, }; use reth_revm::database::StateProviderDatabase; -use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; +use reth_trie::{updates::TrieUpdates, TrieInput}; use reth_trie_parallel::root::ParallelStateRoot; use std::{ collections::BTreeMap, @@ -76,8 +76,8 @@ impl AppendableChain { block_validation_kind: BlockValidationKind, ) -> Result where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { let execution_outcome = ExecutionOutcome::default(); let empty = BTreeMap::new(); @@ -114,8 +114,8 @@ impl AppendableChain { block_validation_kind: BlockValidationKind, ) -> Result where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { let parent_number = block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; @@ -177,8 +177,8 @@ impl AppendableChain { ) -> Result<(ExecutionOutcome, Option), BlockExecutionError> where EDP: FullExecutionDataProvider, - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -228,14 +228,13 @@ impl AppendableChain { execution_outcome.extend(initial_execution_outcome.clone()); ParallelStateRoot::new( consistent_view, - TrieInput::from_state(execution_outcome.hash_state_slow()), + TrieInput::from_state(provider.hashed_post_state(execution_outcome.state())), ) .incremental_root_with_updates() .map(|(root, updates)| (root, Some(updates))) .map_err(ProviderError::from)? } else { - let hashed_state = - HashedPostState::from_bundle_state(&initial_execution_outcome.state().state); + let hashed_state = provider.hashed_post_state(initial_execution_outcome.state()); let state_root = provider.state_root(hashed_state)?; (state_root, None) }; @@ -284,8 +283,8 @@ impl AppendableChain { block_validation_kind: BlockValidationKind, ) -> Result<(), InsertBlockErrorKind> where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 2a825921f893..9e72008e838f 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -1,7 +1,7 @@ //! Blockchain tree externals. use alloy_primitives::{BlockHash, BlockNumber}; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_db::{static_file::BlockHashMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_node_types::NodeTypesWithDB; @@ -28,7 +28,7 @@ pub struct TreeExternals { /// The provider factory, used to commit the canonical chain, or unwind it. pub(crate) provider_factory: ProviderFactory, /// The consensus engine. - pub(crate) consensus: Arc, + pub(crate) consensus: Arc, /// The executor factory to execute blocks with. pub(crate) executor_factory: E, } @@ -37,7 +37,7 @@ impl TreeExternals { /// Create new tree externals. pub fn new( provider_factory: ProviderFactory, - consensus: Arc, + consensus: Arc, executor_factory: E, ) -> Self { Self { provider_factory, consensus, executor_factory } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 484b4b51869e..e668f4e2dac0 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -39,7 +39,7 @@ impl ShareableBlockchainTree { impl BlockchainTreeEngine for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); @@ -110,7 +110,7 @@ where impl BlockchainTreeViewer for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); @@ -173,7 +173,7 @@ where impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn find_pending_state_provider( &self, diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index d2ef5870947b..2b06bd93707b 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -26,6 +26,7 @@ reth-trie.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +revm.workspace = true # async tokio = { workspace = true, default-features = false, features = ["sync", "macros"] } @@ -44,7 +45,6 @@ pin-project.workspace = true alloy-signer = { workspace = true, optional = true } alloy-signer-local = { workspace = true, optional = true } rand = { workspace = true, optional = true } -revm = { workspace = true, optional = true } [dev-dependencies] reth-testing-utils.workspace = true @@ -52,17 +52,15 @@ alloy-signer.workspace = true alloy-signer-local.workspace = true alloy-consensus.workspace = true rand.workspace = true -revm.workspace = true [features] test-utils = [ "alloy-signer", "alloy-signer-local", "rand", - "revm", "reth-chainspec/test-utils", "reth-primitives/test-utils", "reth-primitives-traits/test-utils", "reth-trie/test-utils", - "revm?/test-utils", + "revm/test-utils", ] diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index f43aae562e00..670c340db4bf 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -12,7 +12,7 @@ use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, HeaderExt, NodePrimitives, Receipts, SealedBlock, SealedBlockFor, + BlockWithSenders, EthPrimitives, NodePrimitives, Receipts, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, TransactionMeta, }; use reth_primitives_traits::{Block, BlockBody as _, SignedTransaction}; @@ -51,7 +51,7 @@ pub(crate) struct InMemoryStateMetrics { /// This holds, because only lookup by number functions need to acquire the numbers lock first to /// get the block hash. #[derive(Debug, Default)] -pub(crate) struct InMemoryState { +pub(crate) struct InMemoryState { /// All canonical blocks that are not on disk yet. blocks: RwLock>>>, /// Mapping of block numbers to block hashes. @@ -166,7 +166,7 @@ type PendingBlockAndReceipts = /// all canonical blocks not on disk yet and keeps track of the block range that /// is in memory. #[derive(Debug, Clone)] -pub struct CanonicalInMemoryState { +pub struct CanonicalInMemoryState { pub(crate) inner: Arc>, } @@ -598,7 +598,7 @@ impl CanonicalInMemoryState { /// State after applying the given block, this block is part of the canonical chain that partially /// stored in memory and can be traced back to a canonical block on disk. #[derive(Debug, PartialEq, Eq, Clone)] -pub struct BlockState { +pub struct BlockState { /// The executed block that determines the state after this block has been executed. block: ExecutedBlock, /// The block's parent block if it exists. @@ -801,7 +801,7 @@ impl BlockState { /// Represents an executed block stored in-memory. #[derive(Clone, Debug, PartialEq, Eq, Default)] -pub struct ExecutedBlock { +pub struct ExecutedBlock { /// Sealed block the rest of fields refer to. pub block: Arc>, /// Block's senders. @@ -861,7 +861,7 @@ impl ExecutedBlock { /// Non-empty chain of blocks. #[derive(Debug)] -pub enum NewCanonicalChain { +pub enum NewCanonicalChain { /// A simple append to the current canonical head Commit { /// all blocks that lead back to the canonical head @@ -949,8 +949,8 @@ mod tests { use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode, EthPrimitives, Receipt}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, - StorageRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, }; use reth_trie::{ AccountProof, HashedStorage, MultiProof, StorageMultiProof, StorageProof, TrieInput, @@ -1047,6 +1047,12 @@ mod tests { } } + impl HashedPostStateProvider for MockStateProvider { + fn hashed_post_state(&self, _bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } + } + impl StorageRootProvider for MockStateProvider { fn storage_root( &self, diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index c84bd8c93f06..21bc30b07cf7 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -8,13 +8,14 @@ use alloy_primitives::{ use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode, NodePrimitives}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, - StorageRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageMultiProof, TrieInput, }; +use revm::db::BundleState; use std::sync::OnceLock; /// A state provider that stores references to in-memory blocks along with their state as well as a @@ -218,6 +219,12 @@ macro_rules! impl_state_provider { } } + impl $($tokens)* HashedPostStateProvider for $type { + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { + self.historical.hashed_post_state(bundle_state) + } + } + impl $($tokens)* StateProvider for $type { fn storage( &self, diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index c4e0415436a5..498528813d66 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -162,19 +162,22 @@ pub struct ForkChoiceNotifications( /// A trait that allows to register to fork choice related events /// and get notified when a new fork choice is available. pub trait ForkChoiceSubscriptions: Send + Sync { + /// Block Header type. + type Header: Clone + Send + Sync + 'static; + /// Get notified when a new safe block of the chain is selected. - fn subscribe_safe_block(&self) -> ForkChoiceNotifications; + fn subscribe_safe_block(&self) -> ForkChoiceNotifications; /// Get notified when a new finalized block of the chain is selected. - fn subscribe_finalized_block(&self) -> ForkChoiceNotifications; + fn subscribe_finalized_block(&self) -> ForkChoiceNotifications; /// Convenience method to get a stream of the new safe blocks of the chain. - fn safe_block_stream(&self) -> ForkChoiceStream { + fn safe_block_stream(&self) -> ForkChoiceStream> { ForkChoiceStream::new(self.subscribe_safe_block().0) } /// Convenience method to get a stream of the new finalized blocks of the chain. - fn finalized_block_stream(&self) -> ForkChoiceStream { + fn finalized_block_stream(&self) -> ForkChoiceStream> { ForkChoiceStream::new(self.subscribe_finalized_block().0) } } diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index f6b0a4f17723..1cd9f2df96b9 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -14,9 +14,8 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, EthPrimitives, NodePrimitives, Receipt, Receipts, SealedBlock, + BlockBody, EthPrimitives, NodePrimitives, Receipt, Receipts, RecoveredTx, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, - TransactionSignedEcRecovered, }; use reth_storage_api::NodePrimitivesProvider; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; @@ -91,7 +90,7 @@ impl TestBlockBuilder { ) -> SealedBlockWithSenders { let mut rng = thread_rng(); - let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { + let mock_tx = |nonce: u64| -> RecoveredTx { let tx = Transaction::Eip1559(TxEip1559 { chain_id: self.chain_spec.chain.id(), nonce, @@ -109,7 +108,7 @@ impl TestBlockBuilder { let num_txs = rng.gen_range(0..5); let signer_balance_decrease = Self::single_tx_cost() * U256::from(num_txs); - let transactions: Vec = (0..num_txs) + let transactions: Vec = (0..num_txs) .map(|_| { let tx = mock_tx(self.signer_build_account_info.nonce); self.signer_build_account_info.nonce += 1; diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index 5bac582cd8b6..0e56cf2d3d94 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -47,7 +47,11 @@ std = [ "alloy-trie/std", "reth-primitives-traits/std", "alloy-consensus/std", - "once_cell/std" + "once_cell/std", + "alloy-rlp/std", + "reth-ethereum-forks/std", + "derive_more/std", + "reth-network-peers/std" ] arbitrary = [ "alloy-chains/arbitrary", diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index 94b4285f92dd..348051bef9cc 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -14,6 +14,9 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { // todo: make chain spec type generic over hardfork //type Hardfork: Clone + Copy + 'static; + /// The header type of the network. + type Header; + /// Returns the [`Chain`] object this spec targets. fn chain(&self) -> Chain; @@ -41,7 +44,7 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { fn display_hardforks(&self) -> Box; /// The genesis header. - fn genesis_header(&self) -> &Header; + fn genesis_header(&self) -> &Self::Header; /// The genesis block specification. fn genesis(&self) -> &Genesis; @@ -64,6 +67,8 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { } impl EthChainSpec for ChainSpec { + type Header = Header; + fn chain(&self) -> Chain { self.chain } @@ -92,7 +97,7 @@ impl EthChainSpec for ChainSpec { Box::new(Self::display_hardforks(self)) } - fn genesis_header(&self) -> &Header { + fn genesis_header(&self) -> &Self::Header { self.genesis_header() } diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 90acb82d71d7..2220efda5c6e 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -17,7 +17,7 @@ reth-cli.workspace = true reth-ethereum-cli.workspace = true reth-cli-runner.workspace = true reth-cli-util.workspace = true -reth-codecs = { workspace = true, optional = true } +reth-codecs.workspace = true reth-config.workspace = true reth-consensus.workspace = true reth-db = { workspace = true, features = ["mdbx"] } @@ -110,7 +110,7 @@ arbitrary = [ "reth-prune-types/test-utils", "reth-stages-types/test-utils", "reth-trie-common/test-utils", - "reth-codecs?/arbitrary", + "reth-codecs/arbitrary", "reth-prune-types?/arbitrary", "reth-stages-types?/arbitrary", "reth-trie-common?/arbitrary", diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index b2ad1452aa46..174eeffa396e 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -3,7 +3,7 @@ use alloy_primitives::B256; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::EthChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_config::{config::EtlConfig, Config}; use reth_db::{init_db, open_db_read_only, DatabaseEnv}; @@ -54,13 +54,13 @@ pub struct EnvironmentArgs { pub db: DatabaseArgs, } -impl> EnvironmentArgs { +impl EnvironmentArgs { /// Initializes environment according to [`AccessRights`] and returns an instance of /// [`Environment`]. - pub fn init>( - &self, - access: AccessRights, - ) -> eyre::Result> { + pub fn init(&self, access: AccessRights) -> eyre::Result> + where + C: ChainSpecParser, + { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); let db_path = data_dir.db(); let sf_path = data_dir.static_files(); @@ -109,12 +109,15 @@ impl> Environmen /// If it's a read-write environment and an issue is found, it will attempt to heal (including a /// pipeline unwind). Otherwise, it will print out an warning, advising the user to restart the /// node to heal. - fn create_provider_factory>( + fn create_provider_factory( &self, config: &Config, db: Arc, static_file_provider: StaticFileProvider, - ) -> eyre::Result>>> { + ) -> eyre::Result>>> + where + C: ChainSpecParser, + { let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); let prune_modes = config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 8f9a5f1d322e..13b7b70347e2 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -9,6 +9,7 @@ use reth_db::{ }; use reth_db_api::table::{Decompress, DupSort, Table}; use reth_db_common::DbTool; +use reth_node_api::{ReceiptTy, TxTy}; use reth_node_builder::NodeTypesWithDB; use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; @@ -65,14 +66,12 @@ impl Command { StaticFileSegment::Headers => { (table_key::(&key)?, >::MASK) } - StaticFileSegment::Transactions => ( - table_key::(&key)?, - ::Value>>::MASK, - ), - StaticFileSegment::Receipts => ( - table_key::(&key)?, - ::Value>>::MASK, - ), + StaticFileSegment::Transactions => { + (table_key::(&key)?, >>::MASK) + } + StaticFileSegment::Receipts => { + (table_key::(&key)?, >>::MASK) + } }; let content = tool.provider_factory.static_file_provider().find_static_file( diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index c1f6408b49b0..dc99ae7f98d0 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -60,7 +60,7 @@ impl> ImportComm pub async fn execute(self, executor: F) -> eyre::Result<()> where N: CliNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); @@ -169,7 +169,7 @@ pub fn build_import_pipeline( where N: ProviderNodeTypes + CliNodeTypes, C: Consensus + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { if !file_client.has_canonical_blocks() { eyre::bail!("unable to import non canonical blocks"); diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index 22236d14c76b..f8f72709a7e1 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,7 +1,8 @@ use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rlp::Decodable; -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader, Header}; +use reth_codecs::Compact; use reth_node_builder::NodePrimitives; use reth_primitives::{SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment}; use reth_provider::{ @@ -27,26 +28,26 @@ pub(crate) fn read_header_from_file(path: PathBuf) -> Result( provider_rw: &Provider, - header: SealedHeader, + header: SealedHeader<::BlockHeader>, total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: StaticFileProviderFactory + Provider: StaticFileProviderFactory> + StageCheckpointWriter - + BlockWriter>, + + BlockWriter::Block>, { info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); let static_file_provider = provider_rw.static_file_provider(); // Write EVM dummy data up to `header - 1` block - append_dummy_chain(&static_file_provider, header.number - 1)?; + append_dummy_chain(&static_file_provider, header.number() - 1)?; info!(target: "reth::cli", "Appending first valid block."); append_first_block(provider_rw, &header, total_difficulty)?; for stage in StageId::ALL { - provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number))?; + provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number()))?; } info!(target: "reth::cli", "Set up finished."); @@ -60,12 +61,12 @@ where /// height. fn append_first_block( provider_rw: &Provider, - header: &SealedHeader, + header: &SealedHeader<::BlockHeader>, total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: BlockWriter> - + StaticFileProviderFactory, + Provider: BlockWriter::Block> + + StaticFileProviderFactory>, { provider_rw.insert_block( SealedBlockWithSenders::new(SealedBlock::new(header.clone(), Default::default()), vec![]) @@ -81,9 +82,9 @@ where &header.hash(), )?; - sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number)?; + sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number())?; - sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number)?; + sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number())?; Ok(()) } @@ -93,7 +94,7 @@ where /// * Headers: It will push an empty block. /// * Transactions: It will not push any tx, only increments the end block range. /// * Receipts: It will not push any receipt, only increments the end block range. -fn append_dummy_chain( +fn append_dummy_chain>( sf_provider: &StaticFileProvider, target_height: BlockNumber, ) -> Result<(), eyre::Error> { diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 49bbc55ec241..b93ab1a3c40e 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -2,7 +2,7 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use itertools::Itertools; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::EthChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_db::{mdbx::tx::Tx, static_file::iter_static_files, tables, DatabaseError}; use reth_db_api::transaction::{DbTx, DbTxMut}; @@ -27,9 +27,12 @@ pub struct Command { stage: StageEnum, } -impl> Command { +impl Command { /// Execute `db` command - pub async fn execute>(self) -> eyre::Result<()> { + pub async fn execute(self) -> eyre::Result<()> + where + C: ChainSpecParser, + { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; let tool = DbTool::new(provider_factory)?; diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 000c1b542dbf..73d2e8a9f8f0 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -31,9 +31,10 @@ where Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; @@ -143,6 +144,7 @@ fn unwind_and_copy< Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, >( @@ -186,9 +188,10 @@ where Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { info!(target: "reth::cli", "Executing stage. [dry-run]"); diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index ce187437218a..59a25c492aa8 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -25,21 +25,23 @@ use reth_stages::{ }; use tracing::info; -pub(crate) async fn dump_merkle_stage< +pub(crate) async fn dump_merkle_stage( + db_tool: &DbTool, + from: BlockNumber, + to: BlockNumber, + output_datadir: ChainPath, + should_run: bool, +) -> Result<()> +where N: ProviderNodeTypes< DB = Arc, Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, ->( - db_tool: &DbTool, - from: BlockNumber, - to: BlockNumber, - output_datadir: ChainPath, - should_run: bool, -) -> Result<()> { +{ let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; output_db.update(|tx| { @@ -81,6 +83,7 @@ fn unwind_and_copy< Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, >, >( @@ -161,11 +164,10 @@ fn unwind_and_copy< } /// Try to re-execute the stage straight away -fn dry_run( - output_provider_factory: ProviderFactory, - to: u64, - from: u64, -) -> eyre::Result<()> { +fn dry_run(output_provider_factory: ProviderFactory, to: u64, from: u64) -> eyre::Result<()> +where + N: ProviderNodeTypes>, +{ info!(target: "reth::cli", "Executing stage."); let provider = output_provider_factory.database_provider_rw()?; diff --git a/crates/cli/commands/src/stage/dump/mod.rs b/crates/cli/commands/src/stage/dump/mod.rs index 36b8fb122580..9cc0f54dd33f 100644 --- a/crates/cli/commands/src/stage/dump/mod.rs +++ b/crates/cli/commands/src/stage/dump/mod.rs @@ -93,7 +93,7 @@ impl> Command pub async fn execute(self, executor: F) -> eyre::Result<()> where N: CliNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; diff --git a/crates/cli/commands/src/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs index b9e0725428a9..91ab458daf64 100644 --- a/crates/cli/commands/src/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -44,7 +44,7 @@ impl> Command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where N: CliNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { match self.command { diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index c852eea05a7e..88a5fa6204e8 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -107,7 +107,7 @@ impl> Command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where N: CliNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { // Raise the fd limit of the process. diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index 5490f568d3a8..c321e35be731 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -22,7 +22,7 @@ use reth_db::{ }; use reth_fs_util as fs; use reth_primitives::{ - Account, Log, LogData, Receipt, StorageEntry, Transaction, TransactionSignedNoHash, TxType, + Account, Log, LogData, Receipt, StorageEntry, Transaction, TransactionSigned, TxType, }; use reth_prune_types::{PruneCheckpoint, PruneMode}; use reth_stages_types::{ @@ -111,7 +111,7 @@ compact_types!( StoredBlockBodyIndices, StoredBlockWithdrawals, // Manual implementations - TransactionSignedNoHash, + TransactionSigned, // Bytecode, // todo revm arbitrary StorageEntry, // MerkleCheckpoint, // todo storedsubnode -> branchnodecompact arbitrary diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index fd7d3b3799d8..f845d2a66130 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -11,7 +11,7 @@ use proptest_arbitrary_interop::arb; use reth_db::tables; use reth_db_api::table::{DupSort, Table, TableRow}; use reth_fs_util as fs; -use reth_primitives::TransactionSignedNoHash; +use reth_primitives::TransactionSigned; use std::collections::HashSet; use tracing::error; @@ -72,9 +72,9 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { (HeaderNumbers, PER_TABLE, TABLE), (Headers
, PER_TABLE, TABLE), (BlockBodyIndices, PER_TABLE, TABLE), - (BlockOmmers, 100, TABLE), + (BlockOmmers
, 100, TABLE), (TransactionHashNumbers, PER_TABLE, TABLE), - (Transactions, 100, TABLE), + (Transactions, 100, TABLE), (PlainStorageState, PER_TABLE, DUPSORT), (PlainAccountState, PER_TABLE, TABLE) ]); diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index a7e326848391..b937eb2b4683 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -83,7 +83,7 @@ assert_matches.workspace = true [features] optimism = [ "reth-blockchain-tree/optimism", - "reth-codecs/optimism", + "reth-codecs/op", "reth-chainspec", "reth-db-api/optimism", "reth-db/optimism", diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 7cd286f659c3..b4b38239a03f 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -36,7 +36,7 @@ where Provider: StaticFileProviderFactory + DatabaseProviderFactory< Provider: StaticFileProviderFactory< - Primitives: NodePrimitives, + Primitives: NodePrimitives, > + StageCheckpointReader + BlockReader + ChainStateBlockReader, @@ -152,7 +152,7 @@ where Provider: StaticFileProviderFactory + DatabaseProviderFactory< Provider: StaticFileProviderFactory< - Primitives: NodePrimitives, + Primitives: NodePrimitives, > + StageCheckpointReader + BlockReader + ChainStateBlockReader, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 7a894f08e1c7..f188e495be4e 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -21,7 +21,7 @@ use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, EthBlockClient, }; -use reth_node_types::{Block, BlockTy, NodeTypesWithEngine}; +use reth_node_types::{Block, BlockTy, HeaderTy, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; @@ -234,9 +234,9 @@ impl BeaconConsensusEngine where N: EngineNodeTypes, BT: BlockchainTreeEngine - + BlockReader> + + BlockReader, Header = HeaderTy> + BlockIdReader - + CanonChainTracker + + CanonChainTracker
> + StageCheckpointReader + ChainSpecProvider + 'static, @@ -1804,9 +1804,9 @@ where N: EngineNodeTypes, Client: EthBlockClient + 'static, BT: BlockchainTreeEngine - + BlockReader> + + BlockReader, Header = HeaderTy> + BlockIdReader - + CanonChainTracker + + CanonChainTracker
> + StageCheckpointReader + ChainSpecProvider + Unpin @@ -2179,7 +2179,12 @@ mod tests { fn insert_blocks< 'a, - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + BlockBody = reth_primitives::BlockBody, + BlockHeader = reth_primitives::Header, + >, + >, >( provider_factory: ProviderFactory, mut blocks: impl Iterator, diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 0ebef1efe6e6..ae627cae6961 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -13,7 +13,7 @@ use reth_blockchain_tree::{ }; use reth_chainspec::ChainSpec; use reth_config::config::StageConfig; -use reth_consensus::{test_utils::TestConsensus, Consensus}; +use reth_consensus::{test_utils::TestConsensus, FullConsensus}; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, @@ -332,7 +332,7 @@ where let provider_factory = create_test_provider_factory_with_chain_spec(self.base_config.chain_spec.clone()); - let consensus: Arc = match self.base_config.consensus { + let consensus: Arc = match self.base_config.consensus { TestConsensusConfig::Real => { Arc::new(EthBeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) } @@ -374,13 +374,17 @@ where .into_task(); let body_downloader = BodiesDownloaderBuilder::default() - .build(client.clone(), consensus.clone(), provider_factory.clone()) + .build( + client.clone(), + consensus.clone().as_consensus(), + provider_factory.clone(), + ) .into_task(); Pipeline::::builder().add_stages(DefaultStages::new( provider_factory.clone(), tip_rx.clone(), - Arc::clone(&consensus), + consensus.clone().as_consensus(), header_downloader, body_downloader, executor_factory.clone(), diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index b5314cdd1ec9..2d681be449a5 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,7 +1,10 @@ //! Collection of methods for block validation. -use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader, Header}; -use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader}; +use alloy_eips::{ + calc_next_block_base_fee, + eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, +}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives::{ @@ -62,11 +65,14 @@ pub fn validate_shanghai_withdrawals Result<(), ConsensusError> { +pub fn validate_cancun_gas( + block: &SealedBlock, +) -> Result<(), ConsensusError> { // Check that the blob gas used in the header matches the sum of the blob gas used by each // blob tx - let header_blob_gas_used = block.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)?; - let total_blob_gas = block.blob_gas_used(); + let header_blob_gas_used = + block.header().blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + let total_blob_gas = block.body.blob_gas_used(); if total_blob_gas != header_blob_gas_used { return Err(ConsensusError::BlobGasUsedDiff(GotExpected { got: header_blob_gas_used, @@ -162,11 +168,11 @@ pub fn validate_block_pre_execution( /// * `blob_gas_used` is less than or equal to `MAX_DATA_GAS_PER_BLOCK` /// * `blob_gas_used` is a multiple of `DATA_GAS_PER_BLOB` /// * `excess_blob_gas` is a multiple of `DATA_GAS_PER_BLOB` -pub fn validate_4844_header_standalone(header: &Header) -> Result<(), ConsensusError> { - let blob_gas_used = header.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)?; - let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?; +pub fn validate_4844_header_standalone(header: &H) -> Result<(), ConsensusError> { + let blob_gas_used = header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + let excess_blob_gas = header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; - if header.parent_beacon_block_root.is_none() { + if header.parent_beacon_block_root().is_none() { return Err(ConsensusError::ParentBeaconBlockRootMissing) } @@ -201,8 +207,8 @@ pub fn validate_4844_header_standalone(header: &Header) -> Result<(), ConsensusE /// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. /// This must be 32 bytes or fewer; formally Hx. #[inline] -pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { - let extradata_len = header.extra_data.len(); +pub fn validate_header_extradata(header: &H) -> Result<(), ConsensusError> { + let extradata_len = header.extra_data().len(); if extradata_len > MAXIMUM_EXTRA_DATA_SIZE { Err(ConsensusError::ExtraDataExceedsMax { len: extradata_len }) } else { @@ -215,21 +221,21 @@ pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> /// This function ensures that the header block number is sequential and that the hash of the parent /// header matches the parent hash in the header. #[inline] -pub fn validate_against_parent_hash_number( - header: &Header, +pub fn validate_against_parent_hash_number( + header: &H, parent: &SealedHeader, ) -> Result<(), ConsensusError> { // Parent number is consistent. - if parent.number + 1 != header.number { + if parent.number + 1 != header.number() { return Err(ConsensusError::ParentBlockNumberMismatch { parent_block_number: parent.number, - block_number: header.number, + block_number: header.number(), }) } - if parent.hash() != header.parent_hash { + if parent.hash() != header.parent_hash() { return Err(ConsensusError::ParentHashMismatch( - GotExpected { got: header.parent_hash, expected: parent.hash() }.into(), + GotExpected { got: header.parent_hash(), expected: parent.hash() }.into(), )) } @@ -238,23 +244,30 @@ pub fn validate_against_parent_hash_number( /// Validates the base fee against the parent and EIP-1559 rules. #[inline] -pub fn validate_against_parent_eip1559_base_fee( - header: &Header, - parent: &Header, +pub fn validate_against_parent_eip1559_base_fee< + H: BlockHeader, + ChainSpec: EthChainSpec + EthereumHardforks, +>( + header: &H, + parent: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number) { - let base_fee = header.base_fee_per_gas.ok_or(ConsensusError::BaseFeeMissing)?; + if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number()) { + let base_fee = header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; let expected_base_fee = - if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) { + if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number()) { alloy_eips::eip1559::INITIAL_BASE_FEE } else { // This BaseFeeMissing will not happen as previous blocks are checked to have // them. - parent - .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(header.timestamp)) - .ok_or(ConsensusError::BaseFeeMissing)? + let base_fee = parent.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; + calc_next_block_base_fee( + parent.gas_used(), + parent.gas_limit(), + base_fee, + chain_spec.base_fee_params_at_timestamp(header.timestamp()), + ) }; if expected_base_fee != base_fee { return Err(ConsensusError::BaseFeeDiff(GotExpected { @@ -269,14 +282,14 @@ pub fn validate_against_parent_eip1559_base_fee( + header: &H, + parent: &H, ) -> Result<(), ConsensusError> { - if header.timestamp <= parent.timestamp { + if header.timestamp() <= parent.timestamp() { return Err(ConsensusError::TimestampIsInPast { - parent_timestamp: parent.timestamp, - timestamp: header.timestamp, + parent_timestamp: parent.timestamp(), + timestamp: header.timestamp(), }) } Ok(()) @@ -286,9 +299,9 @@ pub const fn validate_against_parent_timestamp( /// ensures that the `blob_gas_used` and `excess_blob_gas` fields exist in the child header, and /// that the `excess_blob_gas` field matches the expected `excess_blob_gas` calculated from the /// parent header fields. -pub fn validate_against_parent_4844( - header: &Header, - parent: &Header, +pub fn validate_against_parent_4844( + header: &H, + parent: &H, ) -> Result<(), ConsensusError> { // From [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension): // @@ -296,13 +309,13 @@ pub fn validate_against_parent_4844( // > are evaluated as 0. // // This means in the first post-fork block, calc_excess_blob_gas will return 0. - let parent_blob_gas_used = parent.blob_gas_used.unwrap_or(0); - let parent_excess_blob_gas = parent.excess_blob_gas.unwrap_or(0); + let parent_blob_gas_used = parent.blob_gas_used().unwrap_or(0); + let parent_excess_blob_gas = parent.excess_blob_gas().unwrap_or(0); - if header.blob_gas_used.is_none() { + if header.blob_gas_used().is_none() { return Err(ConsensusError::BlobGasUsedMissing) } - let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?; + let excess_blob_gas = header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; let expected_excess_blob_gas = calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used); @@ -320,7 +333,7 @@ pub fn validate_against_parent_4844( #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; + use alloy_consensus::{Header, TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, @@ -378,6 +391,8 @@ mod tests { } impl HeaderProvider for Provider { + type Header = Header; + fn is_known(&self, _block_hash: &BlockHash) -> ProviderResult { Ok(self.is_known) } @@ -482,7 +497,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }; // size: 0x9b5 diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 55188dd8472b..36356a4de369 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -32,7 +32,8 @@ std = [ "alloy-primitives/std", "alloy-eips/std", "alloy-consensus/std", - "reth-primitives-traits/std" + "reth-primitives-traits/std", + "derive_more/std" ] test-utils = [ "reth-primitives/test-utils", diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 3ad53456cbdf..ba1b1321e776 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -16,8 +16,8 @@ use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_primitives::{ - BlockBody, BlockWithSenders, GotExpected, GotExpectedBoxed, InvalidTransactionError, Receipt, - SealedBlock, SealedHeader, + BlockBody, BlockWithSenders, EthPrimitives, GotExpected, GotExpectedBoxed, + InvalidTransactionError, NodePrimitives, Receipt, SealedBlock, SealedHeader, }; use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; @@ -28,7 +28,7 @@ pub mod noop; /// test helpers for mocking consensus pub mod test_utils; -/// Post execution input passed to [`Consensus::validate_block_post_execution`]. +/// Post execution input passed to [`FullConsensus::validate_block_post_execution`]. #[derive(Debug)] pub struct PostExecutionInput<'a, R = Receipt> { /// Receipts of the block. @@ -44,11 +44,28 @@ impl<'a, R> PostExecutionInput<'a, R> { } } -/// Consensus is a protocol that chooses canonical chain. +/// [`Consensus`] implementation which knows full node primitives and is able to validation block's +/// execution outcome. #[auto_impl::auto_impl(&, Arc)] -pub trait Consensus: - AsHeaderValidator + HeaderValidator + Debug + Send + Sync +pub trait FullConsensus: + AsConsensus { + /// Validate a block considering world state, i.e. things that can not be checked before + /// execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity". + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_, N::Receipt>, + ) -> Result<(), ConsensusError>; +} + +/// Consensus is a protocol that chooses canonical chain. +#[auto_impl::auto_impl(&, Arc)] +pub trait Consensus: AsHeaderValidator { /// Ensures that body field values match the header. fn validate_body_against_header( &self, @@ -67,18 +84,6 @@ pub trait Consensus: /// Note: validating blocks does not include other validations of the Consensus fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError>; - - /// Validate a block considering world state, i.e. things that can not be checked before - /// execution. - /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity". - /// - /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError>; } /// HeaderValidator is a protocol that validates headers and their relationships. @@ -162,6 +167,23 @@ impl, H> AsHeaderValidator for T { } } +/// Helper trait to cast `Arc` to `Arc` +pub trait AsConsensus: Consensus { + /// Converts the [`Arc`] of self to [`Arc`] of [`HeaderValidator`] + fn as_consensus<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a; +} + +impl, H, B> AsConsensus for T { + fn as_consensus<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a, + { + self + } +} + /// Consensus Errors #[derive(Debug, PartialEq, Eq, Clone, derive_more::Display, derive_more::Error)] pub enum ConsensusError { diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 6d12af08d519..c56e9867a256 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,6 +1,6 @@ -use crate::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; +use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; -use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, NodePrimitives, SealedBlock, SealedHeader}; /// A Consensus implementation that does nothing. #[derive(Debug, Copy, Clone, Default)] @@ -44,11 +44,13 @@ impl Consensus for NoopConsensus { ) -> Result<(), ConsensusError> { Ok(()) } +} +impl FullConsensus for NoopConsensus { fn validate_block_post_execution( &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, + _block: &BlockWithSenders, + _input: PostExecutionInput<'_, N::Receipt>, ) -> Result<(), ConsensusError> { Ok(()) } diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index ba683dd255f8..082c8ca8bb5a 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,7 +1,7 @@ -use crate::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; +use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; use core::sync::atomic::{AtomicBool, Ordering}; -use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, NodePrimitives, SealedBlock, SealedHeader}; /// Consensus engine implementation for testing #[derive(Debug)] @@ -46,34 +46,36 @@ impl TestConsensus { } } -impl Consensus for TestConsensus { - fn validate_body_against_header( +impl FullConsensus for TestConsensus { + fn validate_block_post_execution( &self, - _body: &B, - _header: &SealedHeader, + _block: &BlockWithSenders, + _input: PostExecutionInput<'_, N::Receipt>, ) -> Result<(), ConsensusError> { - if self.fail_body_against_header() { + if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } +} - fn validate_block_pre_execution( +impl Consensus for TestConsensus { + fn validate_body_against_header( &self, - _block: &SealedBlock, + _body: &B, + _header: &SealedHeader, ) -> Result<(), ConsensusError> { - if self.fail_validation() { + if self.fail_body_against_header() { Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } - fn validate_block_post_execution( + fn validate_block_pre_execution( &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, + _block: &SealedBlock, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 256833898640..72d912d6b54f 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -5,6 +5,7 @@ use reth_chainspec::EthChainSpec; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_network_api::test_utils::PeersHandleProvider; +use reth_node_api::EngineValidator; use reth_node_builder::{ components::NodeComponentsBuilder, rpc::{EngineValidatorAddOn, RethRpcAddOns}, @@ -52,7 +53,7 @@ pub async fn setup( chain_spec: Arc, is_dev: bool, attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, -) -> eyre::Result<(Vec>, TaskManager, Wallet)> +) -> eyre::Result<(Vec>, TaskManager, Wallet)> where N: Default + Node> + NodeTypesForTree + NodeTypesWithEngine, N::ComponentsBuilder: NodeComponentsBuilder< @@ -114,7 +115,7 @@ pub async fn setup_engine( is_dev: bool, attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<( - Vec>>>, + Vec>>>, TaskManager, Wallet, )> @@ -131,7 +132,10 @@ where >, >, N::AddOns: RethRpcAddOns>>> - + EngineValidatorAddOn>>>, + + EngineValidatorAddOn< + Adapter>>, + Validator: EngineValidator, + >, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, @@ -179,6 +183,9 @@ where let mut node = NodeTestContext::new(node, attributes_generator).await?; + let genesis = node.block_hash(0); + node.engine_api.update_forkchoice(genesis, genesis).await?; + // Connect each node in a chain. if let Some(previous_node) = nodes.last_mut() { previous_node.connect(&mut node).await; @@ -199,7 +206,8 @@ where // Type aliases -type TmpDB = Arc>; +/// Testing database +pub type TmpDB = Arc>; type TmpNodeAdapter>> = FullNodeTypesAdapter, Provider>; @@ -212,5 +220,5 @@ pub type Adapter; /// Type alias for a type of `NodeHelper` -pub type NodeHelperType>> = - NodeTestContext, AO>; +pub type NodeHelperType>> = + NodeTestContext, >>::AddOns>; diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index b3eb641c1371..dcd24df5c7a2 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -3,6 +3,7 @@ use crate::{ rpc::RpcTestContext, traits::PayloadEnvelopeExt, }; use alloy_consensus::BlockHeader; +use alloy_eips::BlockId; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use alloy_rpc_types_engine::PayloadStatusEnum; use alloy_rpc_types_eth::BlockNumberOrTag; @@ -134,8 +135,8 @@ where Ok((self.payload.expect_built_payload().await?, eth_attr)) } - /// Advances the node forward one block - pub async fn advance_block( + /// Triggers payload building job and submits it to the engine. + pub async fn build_and_submit_payload( &mut self, ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where @@ -146,13 +147,27 @@ where { let (payload, eth_attr) = self.new_payload().await?; - let block_hash = self - .engine_api + self.engine_api .submit_payload(payload.clone(), eth_attr.clone(), PayloadStatusEnum::Valid) .await?; + Ok((payload, eth_attr)) + } + + /// Advances the node forward one block + pub async fn advance_block( + &mut self, + ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> + where + ::ExecutionPayloadEnvelopeV3: + From + PayloadEnvelopeExt, + ::ExecutionPayloadEnvelopeV4: + From + PayloadEnvelopeExt, + { + let (payload, eth_attr) = self.build_and_submit_payload().await?; + // trigger forkchoice update via engine api to commit the block to the blockchain - self.engine_api.update_forkchoice(block_hash, block_hash).await?; + self.engine_api.update_forkchoice(payload.block().hash(), payload.block().hash()).await?; Ok((payload, eth_attr)) } @@ -238,6 +253,41 @@ where Ok(()) } + /// Gets block hash by number. + pub fn block_hash(&self, number: u64) -> BlockHash { + self.inner + .provider + .sealed_header_by_number_or_tag(BlockNumberOrTag::Number(number)) + .unwrap() + .unwrap() + .hash() + } + + /// Sends FCU and waits for the node to sync to the given block. + pub async fn sync_to(&self, block: BlockHash) -> eyre::Result<()> { + self.engine_api.update_forkchoice(block, block).await?; + + let start = std::time::Instant::now(); + + while self + .inner + .provider + .sealed_header_by_id(BlockId::Number(BlockNumberOrTag::Latest))? + .is_none_or(|h| h.hash() != block) + { + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + assert!(start.elapsed() <= std::time::Duration::from_secs(10), "timed out"); + } + + // Hack to make sure that all components have time to process canonical state update. + // Otherwise, this might result in e.g "nonce too low" errors when advancing chain further, + // making tests flaky. + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + + Ok(()) + } + /// Returns the RPC URL. pub fn rpc_url(&self) -> Url { let addr = self.inner.rpc_server_handle().http_local_addr().unwrap(); diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 8399a482dfd6..37ee12987ca5 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,7 +1,6 @@ use alloy_consensus::TxEnvelope; use alloy_network::eip2718::Decodable2718; use alloy_primitives::{Bytes, B256}; -use alloy_rlp::Encodable; use reth_chainspec::EthereumHardforks; use reth_node_api::{FullNodeComponents, NodePrimitives}; use reth_node_builder::{rpc::RpcRegistry, NodeTypes}; @@ -21,7 +20,10 @@ where Node: FullNodeComponents< Types: NodeTypes< ChainSpec: EthereumHardforks, - Primitives: NodePrimitives, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, >, >, EthApi: EthApiSpec + EthTransactions + TraceExt, diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 98ee8dd2d137..632428d6b642 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,4 +1,4 @@ -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_primitives::{keccak256, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use eyre::OptionExt; @@ -8,7 +8,7 @@ use reth_engine_primitives::InvalidBlockHook; use reth_evm::{ state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, }; -use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_primitives_traits::SignedTransaction; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ @@ -17,7 +17,7 @@ use reth_revm::{ }; use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; -use reth_trie::{updates::TrieUpdates, HashedPostState, HashedStorage}; +use reth_trie::{updates::TrieUpdates, HashedStorage}; use serde::Serialize; use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; @@ -54,15 +54,18 @@ where + Send + Sync + 'static, - EvmConfig: ConfigureEvm
, { - fn on_invalid_block( + fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, - ) -> eyre::Result<()> { + ) -> eyre::Result<()> + where + N: NodePrimitives, + EvmConfig: ConfigureEvm
, + { // TODO(alexey): unify with `DebugApi::debug_execution_witness` // Setup database. @@ -86,7 +89,7 @@ where SystemCaller::new(self.evm_config.clone(), self.provider.chain_spec()); // Apply pre-block system contract calls. - system_caller.apply_pre_execution_changes(&block.clone().unseal(), &mut evm)?; + system_caller.apply_pre_execution_changes(&block.clone().unseal().block, &mut evm)?; // Re-execute all of the transactions in the block to load all touched accounts into // the cache DB. @@ -106,7 +109,7 @@ where // NOTE: This is not mut because we are not doing the DAO irregular state change here let balance_increments = post_block_balance_increments( self.provider.chain_spec().as_ref(), - &block.block.clone().unseal(), + &block.clone().unseal().block, U256::MAX, ); @@ -126,7 +129,7 @@ where // // Note: We grab *all* accounts in the cache here, as the `BundleState` prunes // referenced accounts + storage slots. - let mut hashed_state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut hashed_state = db.database.hashed_post_state(&bundle_state); for (address, account) in db.cache.accounts { let hashed_address = keccak256(address); hashed_state @@ -163,24 +166,24 @@ where keys: state_preimages, }; let re_executed_witness_path = self.save_file( - format!("{}_{}.witness.re_executed.json", block.number, block.hash()), + format!("{}_{}.witness.re_executed.json", block.number(), block.hash()), &response, )?; if let Some(healthy_node_client) = &self.healthy_node_client { // Compare the witness against the healthy node. let healthy_node_witness = futures::executor::block_on(async move { - DebugApiClient::debug_execution_witness(healthy_node_client, block.number.into()) + DebugApiClient::debug_execution_witness(healthy_node_client, block.number().into()) .await })?; let healthy_path = self.save_file( - format!("{}_{}.witness.healthy.json", block.number, block.hash()), + format!("{}_{}.witness.healthy.json", block.number(), block.hash()), &healthy_node_witness, )?; // If the witnesses are different, write the diff to the output directory. if response != healthy_node_witness { - let filename = format!("{}_{}.witness.diff", block.number, block.hash()); + let filename = format!("{}_{}.witness.diff", block.number(), block.hash()); let diff_path = self.save_diff(filename, &response, &healthy_node_witness)?; warn!( target: "engine::invalid_block_hooks::witness", @@ -210,15 +213,15 @@ where if bundle_state != output.state { let original_path = self.save_file( - format!("{}_{}.bundle_state.original.json", block.number, block.hash()), + format!("{}_{}.bundle_state.original.json", block.number(), block.hash()), &output.state, )?; let re_executed_path = self.save_file( - format!("{}_{}.bundle_state.re_executed.json", block.number, block.hash()), + format!("{}_{}.bundle_state.re_executed.json", block.number(), block.hash()), &bundle_state, )?; - let filename = format!("{}_{}.bundle_state.diff", block.number, block.hash()); + let filename = format!("{}_{}.bundle_state.diff", block.number(), block.hash()); let diff_path = self.save_diff(filename, &bundle_state, &output.state)?; warn!( @@ -236,26 +239,27 @@ where state_provider.state_root_with_updates(hashed_state)?; if let Some((original_updates, original_root)) = trie_updates { if re_executed_root != original_root { - let filename = format!("{}_{}.state_root.diff", block.number, block.hash()); + let filename = format!("{}_{}.state_root.diff", block.number(), block.hash()); let diff_path = self.save_diff(filename, &re_executed_root, &original_root)?; warn!(target: "engine::invalid_block_hooks::witness", ?original_root, ?re_executed_root, diff_path = %diff_path.display(), "State root mismatch after re-execution"); } // If the re-executed state root does not match the _header_ state root, also log that. - if re_executed_root != block.state_root { - let filename = format!("{}_{}.header_state_root.diff", block.number, block.hash()); - let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root)?; - warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root, ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root"); + if re_executed_root != block.state_root() { + let filename = + format!("{}_{}.header_state_root.diff", block.number(), block.hash()); + let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root())?; + warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root(), ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root"); } if &trie_output != original_updates { // Trie updates are too big to diff, so we just save the original and re-executed let original_path = self.save_file( - format!("{}_{}.trie_updates.original.json", block.number, block.hash()), + format!("{}_{}.trie_updates.original.json", block.number(), block.hash()), original_updates, )?; let re_executed_path = self.save_file( - format!("{}_{}.trie_updates.re_executed.json", block.number, block.hash()), + format!("{}_{}.trie_updates.re_executed.json", block.number(), block.hash()), &trie_output, )?; warn!( @@ -292,23 +296,24 @@ where } } -impl InvalidBlockHook for InvalidBlockWitnessHook +impl InvalidBlockHook for InvalidBlockWitnessHook where + N: NodePrimitives, P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, { fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { - if let Err(err) = self.on_invalid_block(parent_header, block, output, trie_updates) { + if let Err(err) = self.on_invalid_block::(parent_header, block, output, trie_updates) { warn!(target: "engine::invalid_block_hooks::witness", %err, "Failed to invoke hook"); } } diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index a1b74d13fee7..b3ad169e3189 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -16,12 +16,12 @@ reth-consensus.workspace = true reth-engine-primitives.workspace = true reth-engine-service.workspace = true reth-engine-tree.workspace = true +reth-node-types.workspace = true reth-evm.workspace = true reth-ethereum-engine-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-rpc-types-compat.workspace = true @@ -29,6 +29,7 @@ reth-transaction-pool.workspace = true reth-stages-api.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index a5c7cf4d4c60..29418c0b714c 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -1,5 +1,6 @@ //! Contains the implementation of the mining mode for the local engine. +use alloy_consensus::BlockHeader; use alloy_primitives::{TxHash, B256}; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar, ForkchoiceState}; use eyre::OptionExt; @@ -114,7 +115,7 @@ where to_engine, mode, payload_builder, - last_timestamp: latest_header.timestamp, + last_timestamp: latest_header.timestamp(), last_block_hashes: vec![latest_header.hash()], }; diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 5111360d5bf6..6355a2a00af2 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -39,6 +39,8 @@ where .chain_spec .is_cancun_active_at_timestamp(timestamp) .then(B256::random), + target_blobs_per_block: None, + max_blobs_per_block: None, } } } diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index e2b5e056d028..b8cab99970af 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -18,8 +18,8 @@ use crate::miner::{LocalMiner, MiningMode}; use futures_util::{Stream, StreamExt}; use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; -use reth_consensus::Consensus; -use reth_engine_primitives::BeaconEngineMessage; +use reth_consensus::FullConsensus; +use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_service::service::EngineMessageStream; use reth_engine_tree::{ chain::{ChainEvent, HandlerEvent}, @@ -31,9 +31,9 @@ use reth_engine_tree::{ tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; use reth_evm::execute::BlockExecutorProvider; +use reth_node_types::BlockTy; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes}; -use reth_payload_validator::ExecutionPayloadValidator; use reth_provider::{providers::BlockchainProvider2, ChainSpecProvider, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::MetricEventsSender; @@ -63,15 +63,16 @@ where { /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] - pub fn new( - consensus: Arc, - executor_factory: impl BlockExecutorProvider, + pub fn new( + consensus: Arc, + executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, + payload_validator: V, tree_config: TreeConfig, - invalid_block_hook: Box, + invalid_block_hook: Box>, sync_metrics_tx: MetricEventsSender, to_engine: UnboundedSender>, from_engine: EngineMessageStream, @@ -80,6 +81,7 @@ where ) -> Self where B: PayloadAttributesBuilder<::PayloadAttributes>, + V: EngineValidator>, { let chain_spec = provider.chain_spec(); let engine_kind = @@ -87,8 +89,6 @@ where let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - let payload_validator = ExecutionPayloadValidator::new(chain_spec); - let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( diff --git a/crates/engine/primitives/src/invalid_block_hook.rs b/crates/engine/primitives/src/invalid_block_hook.rs index 13c606511ddc..cfd127ae6f4c 100644 --- a/crates/engine/primitives/src/invalid_block_hook.rs +++ b/crates/engine/primitives/src/invalid_block_hook.rs @@ -1,35 +1,36 @@ use alloy_primitives::B256; use reth_execution_types::BlockExecutionOutput; -use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_trie::updates::TrieUpdates; /// An invalid block hook. -pub trait InvalidBlockHook: Send + Sync { +pub trait InvalidBlockHook: Send + Sync { /// Invoked when an invalid block is encountered. fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ); } -impl InvalidBlockHook for F +impl InvalidBlockHook for F where + N: NodePrimitives, F: Fn( - &SealedHeader, - &SealedBlockWithSenders, - &BlockExecutionOutput, + &SealedHeader, + &SealedBlockWithSenders, + &BlockExecutionOutput, Option<(&TrieUpdates, B256)>, ) + Send + Sync, { fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { self(parent_header, block, output, trie_updates) diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index 8359c453dccb..8854fd18879d 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -18,7 +18,6 @@ reth-engine-tree.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true reth-payload-builder.workspace = true -reth-payload-validator.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-stages-api.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 44d145c9c0b6..bc3e36beafc8 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -2,8 +2,8 @@ use futures::{Stream, StreamExt}; use pin_project::pin_project; use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; -use reth_consensus::Consensus; -use reth_engine_primitives::BeaconEngineMessage; +use reth_consensus::FullConsensus; +use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, @@ -17,9 +17,8 @@ pub use reth_engine_tree::{ }; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::EthBlockClient; -use reth_node_types::NodeTypesWithEngine; +use reth_node_types::{BlockTy, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_validator::ExecutionPayloadValidator; use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::{MetricEventsSender, Pipeline}; @@ -61,12 +60,12 @@ impl EngineService where N: EngineNodeTypes + PersistenceNodeTypes, Client: EthBlockClient + 'static, - E: BlockExecutorProvider + 'static, + E: BlockExecutorProvider + 'static, { /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] - pub fn new( - consensus: Arc, + pub fn new( + consensus: Arc, executor_factory: E, chain_spec: Arc, client: Client, @@ -77,18 +76,21 @@ where blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, + payload_validator: V, tree_config: TreeConfig, - invalid_block_hook: Box, + invalid_block_hook: Box>, sync_metrics_tx: MetricEventsSender, - ) -> Self { + ) -> Self + where + V: EngineValidator>, + { let engine_kind = if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum }; - let downloader = BasicBlockDownloader::new(client, consensus.clone()); + let downloader = BasicBlockDownloader::new(client, consensus.clone().as_consensus()); let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - let payload_validator = ExecutionPayloadValidator::new(chain_spec); let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); @@ -148,7 +150,7 @@ mod tests { use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_engine_primitives::BeaconEngineMessage; use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; - use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; @@ -186,7 +188,7 @@ mod tests { let blockchain_db = BlockchainProvider2::with_latest(provider_factory.clone(), SealedHeader::default()) .unwrap(); - + let engine_payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs); let pruner = Pruner::new_with_factory(provider_factory.clone(), vec![], 0, 0, None, rx); @@ -204,6 +206,7 @@ mod tests { blockchain_db, pruner, PayloadBuilderHandle::new(tx), + engine_payload_validator, TreeConfig::default(), Box::new(NoopInvalidBlockHook::default()), sync_metrics_tx, diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 47e5c2b04fe2..680b6933ebe6 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -16,7 +16,7 @@ reth-beacon-consensus.workspace = true reth-blockchain-tree-api.workspace = true reth-blockchain-tree.workspace = true reth-chain-state.workspace = true -reth-chainspec.workspace = true +reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true @@ -25,7 +25,6 @@ reth-network-p2p.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 005d4e54399c..947d025e9ab6 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -10,7 +10,7 @@ use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chain_state::ExecutedBlock; use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; -use reth_primitives::SealedBlockWithSenders; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; use std::{ collections::HashSet, fmt::Display, @@ -270,25 +270,25 @@ impl From> for FromEngine { /// Event from the consensus engine. // TODO(mattsse): find a more appropriate name for this variant, consider phasing it out. - BeaconConsensus(BeaconConsensusEngineEvent), + BeaconConsensus(BeaconConsensusEngineEvent), /// Backfill action is needed. BackfillAction(BackfillAction), /// Block download is needed. Download(DownloadRequest), } -impl EngineApiEvent { +impl EngineApiEvent { /// Returns `true` if the event is a backfill action. pub const fn is_backfill_action(&self) -> bool { matches!(self, Self::BackfillAction(_)) } } -impl From for EngineApiEvent { - fn from(event: BeaconConsensusEngineEvent) -> Self { +impl From> for EngineApiEvent { + fn from(event: BeaconConsensusEngineEvent) -> Self { Self::BeaconConsensus(event) } } diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 950310b170f7..dcdeee674489 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -2,7 +2,7 @@ use crate::metrics::PersistenceMetrics; use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; -use reth_primitives::EthPrimitives; +use reth_primitives::{EthPrimitives, NodePrimitives}; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StaticFileProviderFactory, @@ -169,13 +169,13 @@ pub enum PersistenceError { /// A signal to the persistence service that part of the tree state can be persisted. #[derive(Debug)] -pub enum PersistenceAction { +pub enum PersistenceAction { /// The section of tree state that should be persisted. These blocks are expected in order of /// increasing block number. /// /// First, header, transaction, and receipt-related data should be written to static files. /// Then the execution history-related data will be written to the database. - SaveBlocks(Vec, oneshot::Sender>), + SaveBlocks(Vec>, oneshot::Sender>), /// Removes block data above the given block number from the database. /// @@ -192,15 +192,16 @@ pub enum PersistenceAction { /// A handle to the persistence service #[derive(Debug, Clone)] -pub struct PersistenceHandle { +pub struct PersistenceHandle { /// The channel used to communicate with the persistence service sender: Sender, + _marker: std::marker::PhantomData, } -impl PersistenceHandle { +impl PersistenceHandle { /// Create a new [`PersistenceHandle`] from a [`Sender`]. pub const fn new(sender: Sender) -> Self { - Self { sender } + Self { sender, _marker: std::marker::PhantomData } } /// Create a new [`PersistenceHandle`], and spawn the persistence service. diff --git a/crates/engine/tree/src/tree/invalid_block_hook.rs b/crates/engine/tree/src/tree/invalid_block_hook.rs index 98244ed13499..7c7b0631dd20 100644 --- a/crates/engine/tree/src/tree/invalid_block_hook.rs +++ b/crates/engine/tree/src/tree/invalid_block_hook.rs @@ -1,6 +1,6 @@ use alloy_primitives::B256; use reth_engine_primitives::InvalidBlockHook; -use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_provider::BlockExecutionOutput; use reth_trie::updates::TrieUpdates; @@ -9,32 +9,32 @@ use reth_trie::updates::TrieUpdates; #[non_exhaustive] pub struct NoopInvalidBlockHook; -impl InvalidBlockHook for NoopInvalidBlockHook { +impl InvalidBlockHook for NoopInvalidBlockHook { fn on_invalid_block( &self, - _parent_header: &SealedHeader, - _block: &SealedBlockWithSenders, - _output: &BlockExecutionOutput, + _parent_header: &SealedHeader, + _block: &SealedBlockWithSenders, + _output: &BlockExecutionOutput, _trie_updates: Option<(&TrieUpdates, B256)>, ) { } } /// Multiple [`InvalidBlockHook`]s that are executed in order. -pub struct InvalidBlockHooks(pub Vec>); +pub struct InvalidBlockHooks(pub Vec>>); -impl std::fmt::Debug for InvalidBlockHooks { +impl std::fmt::Debug for InvalidBlockHooks { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("InvalidBlockHooks").field("len", &self.0.len()).finish() } } -impl InvalidBlockHook for InvalidBlockHooks { +impl InvalidBlockHook for InvalidBlockHooks { fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { for hook in &self.0 { diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 44270cbfdf48..763d5d990c5d 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1,8 +1,9 @@ use crate::{ backfill::{BackfillAction, BackfillSyncState}, chain::FromOrchestrator, - engine::{DownloadRequest, EngineApiEvent, FromEngine}, + engine::{DownloadRequest, EngineApiEvent, EngineApiKind, EngineApiRequest, FromEngine}, persistence::PersistenceHandle, + tree::metrics::EngineApiMetrics, }; use alloy_consensus::{BlockHeader, Header}; use alloy_eips::BlockNumHash; @@ -24,32 +25,30 @@ use reth_blockchain_tree::{ use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; -use reth_chainspec::EthereumHardforks; -use reth_consensus::{Consensus, PostExecutionInput}; +use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; use reth_engine_primitives::{ BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, - ForkchoiceStateTracker, OnForkChoiceUpdated, + EngineValidator, ForkchoiceStateTracker, OnForkChoiceUpdated, }; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; -use reth_payload_validator::ExecutionPayloadValidator; +use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ Block, EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockWithSenders, SealedHeader, }; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, - ProviderError, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - TransactionVariant, + HashedPostStateProvider, ProviderError, StateCommitmentProvider, StateProviderBox, + StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; -use revm_primitives::ResultAndState; +use revm_primitives::EvmState; use std::{ cmp::Ordering, collections::{btree_map, hash_map, BTreeMap, VecDeque}, @@ -72,10 +71,6 @@ pub mod config; mod invalid_block_hook; mod metrics; mod persistence_state; -use crate::{ - engine::{EngineApiKind, EngineApiRequest}, - tree::metrics::EngineApiMetrics, -}; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; @@ -472,11 +467,15 @@ pub enum TreeAction { /// /// This type is responsible for processing engine API requests, maintaining the canonical state and /// emitting events. -pub struct EngineApiTreeHandler { +pub struct EngineApiTreeHandler +where + N: NodePrimitives, + T: EngineTypes, +{ provider: P, executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, + consensus: Arc, + payload_validator: V, /// Keeps track of internals such as executed and buffered blocks. state: EngineApiTreeState, /// The half for sending messages to the engine. @@ -509,15 +508,17 @@ pub struct EngineApiTreeHandler { /// Metrics for the engine api. metrics: EngineApiMetrics, /// An invalid block hook. - invalid_block_hook: Box, + invalid_block_hook: Box>, /// The engine API variant of this handler engine_kind: EngineApiKind, /// Captures the types the engine operates on _primtives: PhantomData, } -impl std::fmt::Debug - for EngineApiTreeHandler +impl std::fmt::Debug + for EngineApiTreeHandler +where + N: NodePrimitives, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EngineApiTreeHandler") @@ -540,27 +541,33 @@ impl std::fmt::Debug } } -impl EngineApiTreeHandler +impl EngineApiTreeHandler where - N: NodePrimitives, + N: NodePrimitives< + Block = reth_primitives::Block, + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, P: DatabaseProviderFactory - + BlockReader + + BlockReader + StateProviderFactory + StateReader + + StateCommitmentProvider + + HashedPostStateProvider + Clone + 'static,

::Provider: BlockReader, - E: BlockExecutorProvider, + E: BlockExecutorProvider, T: EngineTypes, - Spec: Send + Sync + EthereumHardforks + 'static, + V: EngineValidator, { /// Creates a new [`EngineApiTreeHandler`]. #[allow(clippy::too_many_arguments)] pub fn new( provider: P, executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, + consensus: Arc, + payload_validator: V, outgoing: UnboundedSender, state: EngineApiTreeState, canonical_in_memory_state: CanonicalInMemoryState, @@ -595,7 +602,7 @@ where } /// Sets the invalid block hook. - fn set_invalid_block_hook(&mut self, invalid_block_hook: Box) { + fn set_invalid_block_hook(&mut self, invalid_block_hook: Box>) { self.invalid_block_hook = invalid_block_hook; } @@ -608,13 +615,13 @@ where pub fn spawn_new( provider: P, executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, + consensus: Arc, + payload_validator: V, persistence: PersistenceHandle, payload_builder: PayloadBuilderHandle, canonical_in_memory_state: CanonicalInMemoryState, config: TreeConfig, - invalid_block_hook: Box, + invalid_block_hook: Box>, kind: EngineApiKind, ) -> (Sender>>, UnboundedReceiver) { let best_block_number = provider.best_block_number().unwrap_or(0); @@ -1359,7 +1366,7 @@ where // update the tracked chain height, after backfill sync both the canonical height and // persisted height are the same self.state.tree_state.set_canonical_head(new_head.num_hash()); - self.persistence_state.finish(new_head.hash(), new_head.number); + self.persistence_state.finish(new_head.hash(), new_head.number()); // update the tracked canonical head self.canonical_in_memory_state.set_canonical_head(new_head); @@ -1563,7 +1570,7 @@ where .provider .get_state(block.number())? .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number()))?; - let hashed_state = execution_output.hash_state_slow(); + let hashed_state = self.provider.hashed_post_state(execution_output.state()); Ok(Some(ExecutedBlock { block: Arc::new(block), @@ -1624,7 +1631,7 @@ where // the hash could belong to an unknown block or a persisted block if let Some(header) = self.provider.header(&hash)? { - debug!(target: "engine::tree", %hash, number = %header.number, "found canonical state for block in database"); + debug!(target: "engine::tree", %hash, number = %header.number(), "found canonical state for block in database"); // the block is known and persisted let historical = self.provider.state_by_block_hash(hash)?; return Ok(Some(historical)) @@ -2214,7 +2221,7 @@ where // TODO: create StateRootTask with the receiving end of a channel and // pass the sending end of the channel to the state hook. - let noop_state_hook = |_result_and_state: &ResultAndState| {}; + let noop_state_hook = |_state: &EvmState| {}; let output = self.metrics.executor.execute_metered( executor, (&block, U256::MAX).into(), @@ -2237,7 +2244,7 @@ where return Err(err.into()) } - let hashed_state = HashedPostState::from_bundle_state(&output.state.state); + let hashed_state = self.provider.hashed_post_state(&output.state); trace!(target: "engine::tree", block=?sealed_block.num_hash(), "Calculating block state root"); let root_time = Instant::now(); @@ -2534,12 +2541,10 @@ where state: ForkchoiceState, version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { - // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp - // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held - // client software MUST respond with -38003: `Invalid payload attributes` and MUST NOT - // begin a payload build process. In such an event, the forkchoiceState update MUST NOT - // be rolled back. - if attrs.timestamp() <= head.timestamp { + if let Err(err) = + self.payload_validator.validate_payload_attributes_against_header(&attrs, head) + { + warn!(target: "engine::tree", %err, ?head, "Invalid payload attributes"); return OnForkChoiceUpdated::invalid_payload_attributes() } @@ -2629,7 +2634,7 @@ mod tests { use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; use reth_engine_primitives::ForkchoiceStatus; - use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::test_utils::MockExecutorProvider; use reth_primitives::{BlockExt, EthPrimitives}; use reth_provider::test_utils::MockEthProvider; @@ -2701,7 +2706,7 @@ mod tests { MockEthProvider, MockExecutorProvider, EthEngineTypes, - ChainSpec, + EthereumEngineValidator, >, to_tree_tx: Sender>>, from_tree_rx: UnboundedReceiver, @@ -2736,7 +2741,7 @@ mod tests { let provider = MockEthProvider::default(); let executor_provider = MockExecutorProvider::default(); - let payload_validator = ExecutionPayloadValidator::new(chain_spec.clone()); + let payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (from_tree_tx, from_tree_rx) = unbounded_channel(); diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 7d25461c50a6..eea236fdaaec 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -3,6 +3,7 @@ use alloy_primitives::map::{HashMap, HashSet}; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, + StateCommitmentProvider, }; use reth_trie::{ proof::Proof, updates::TrieUpdates, HashedPostState, HashedStorage, MultiProof, Nibbles, @@ -10,7 +11,10 @@ use reth_trie::{ }; use reth_trie_db::DatabaseProof; use reth_trie_parallel::root::ParallelStateRootError; -use reth_trie_sparse::{SparseStateTrie, SparseStateTrieResult}; +use reth_trie_sparse::{ + errors::{SparseStateTrieResult, SparseTrieError}, + SparseStateTrie, +}; use revm_primitives::{keccak256, EvmState, B256}; use std::{ collections::BTreeMap, @@ -137,9 +141,7 @@ impl ProofSequencer { } } - if !consecutive_proofs.is_empty() { - self.next_to_deliver += consecutive_proofs.len() as u64; - } + self.next_to_deliver += consecutive_proofs.len() as u64; consecutive_proofs } @@ -160,15 +162,15 @@ impl ProofSequencer { /// Then it updates relevant leaves according to the result of the transaction. #[derive(Debug)] pub(crate) struct StateRootTask { - /// Task configuration + /// Task configuration. config: StateRootConfig, - /// Receiver for state root related messages + /// Receiver for state root related messages. rx: Receiver, - /// Sender for state root related messages + /// Sender for state root related messages. tx: Sender, - /// Proof targets that have been already fetched - fetched_proof_targets: HashSet, - /// Proof sequencing handler + /// Proof targets that have been already fetched. + fetched_proof_targets: HashMap>, + /// Proof sequencing handler. proof_sequencer: ProofSequencer, /// The sparse trie used for the state root calculation. If [`None`], then update is in /// progress. @@ -178,7 +180,12 @@ pub(crate) struct StateRootTask { #[allow(dead_code)] impl StateRootTask where - Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, + Factory: DatabaseProviderFactory + + StateCommitmentProvider + + Clone + + Send + + Sync + + 'static, { /// Creates a new state root task with the unified message channel pub(crate) fn new( @@ -218,38 +225,44 @@ where view: ConsistentDbView, input: Arc, update: EvmState, - fetched_proof_targets: &HashSet, + fetched_proof_targets: &mut HashMap>, proof_sequence_number: u64, state_root_message_sender: Sender, - ) -> HashMap> { + ) { let mut hashed_state_update = HashedPostState::default(); for (address, account) in update { if account.is_touched() { let hashed_address = keccak256(address); + trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); let destroyed = account.is_selfdestructed(); - hashed_state_update.accounts.insert( - hashed_address, - if destroyed || account.is_empty() { None } else { Some(account.info.into()) }, - ); + let info = if account.is_empty() { None } else { Some(account.info.into()) }; + hashed_state_update.accounts.insert(hashed_address, info); - if destroyed || !account.storage.is_empty() { - let storage = account.storage.into_iter().filter_map(|(slot, value)| { + let mut changed_storage_iter = account + .storage + .into_iter() + .filter_map(|(slot, value)| { value .is_changed() .then(|| (keccak256(B256::from(slot)), value.present_value)) - }); - hashed_state_update - .storages - .insert(hashed_address, HashedStorage::from_iter(destroyed, storage)); + }) + .peekable(); + if destroyed || changed_storage_iter.peek().is_some() { + hashed_state_update.storages.insert( + hashed_address, + HashedStorage::from_iter(destroyed, changed_storage_iter), + ); } } } let proof_targets = get_proof_targets(&hashed_state_update, fetched_proof_targets); + for (address, slots) in &proof_targets { + fetched_proof_targets.entry(*address).or_default().extend(slots) + } // Dispatch proof gathering for this state update - let targets = proof_targets.clone(); rayon::spawn(move || { let provider = match view.provider_ro() { Ok(provider) => provider, @@ -264,7 +277,7 @@ where provider.tx_ref(), // TODO(alexey): this clone can be expensive, we should avoid it input.as_ref().clone(), - targets, + proof_targets, ); match result { Ok(proof) => { @@ -279,8 +292,6 @@ where } } }); - - proof_targets } /// Handler for new proof calculated, aggregates all the existing sequential proofs. @@ -316,7 +327,7 @@ where ); // TODO(alexey): store proof targets in `ProofSequecner` to avoid recomputing them - let targets = get_proof_targets(&state, &HashSet::default()); + let targets = get_proof_targets(&state, &HashMap::default()); let tx = self.tx.clone(); rayon::spawn(move || { @@ -355,16 +366,14 @@ where total_updates = updates_received, "Received new state update" ); - let targets = Self::on_state_update( + Self::on_state_update( self.config.consistent_view.clone(), self.config.input.clone(), update, - &self.fetched_proof_targets, + &mut self.fetched_proof_targets, self.proof_sequencer.next_sequence(), self.tx.clone(), ); - self.fetched_proof_targets.extend(targets.keys()); - self.fetched_proof_targets.extend(targets.values().flatten()); } StateRootMessage::ProofCalculated { proof, state_update, sequence_number } => { proofs_processed += 1; @@ -375,6 +384,8 @@ where "Processing calculated proof" ); + trace!(target: "engine::root", ?proof, "Proof calculated"); + if let Some((combined_proof, combined_state_update)) = self.on_proof(sequence_number, proof, state_update) { @@ -459,19 +470,37 @@ where } } +/// Returns accounts only with those storages that were not already fetched, and +/// if there are no such storages and the account itself was already fetched, the +/// account shouldn't be included. fn get_proof_targets( state_update: &HashedPostState, - fetched_proof_targets: &HashSet, + fetched_proof_targets: &HashMap>, ) -> HashMap> { - state_update - .accounts - .keys() - .filter(|hashed_address| !fetched_proof_targets.contains(*hashed_address)) - .map(|hashed_address| (*hashed_address, HashSet::default())) - .chain(state_update.storages.iter().map(|(hashed_address, storage)| { - (*hashed_address, storage.storage.keys().copied().collect()) - })) - .collect() + let mut targets = HashMap::default(); + + // first collect all new accounts (not previously fetched) + for &hashed_address in state_update.accounts.keys() { + if !fetched_proof_targets.contains_key(&hashed_address) { + targets.insert(hashed_address, HashSet::default()); + } + } + + // then process storage slots for all accounts in the state update + for (hashed_address, storage) in &state_update.storages { + let fetched = fetched_proof_targets.get(hashed_address); + let mut changed_slots = storage + .storage + .keys() + .filter(|slot| !fetched.is_some_and(|f| f.contains(*slot))) + .peekable(); + + if changed_slots.peek().is_some() { + targets.entry(*hashed_address).or_default().extend(changed_slots); + } + } + + targets } /// Updates the sparse trie with the given proofs and state, and returns the updated trie and the @@ -482,6 +511,7 @@ fn update_sparse_trie( targets: HashMap>, state: HashedPostState, ) -> SparseStateTrieResult<(Box, Duration)> { + trace!(target: "engine::root::sparse", "Updating sparse trie"); let started_at = Instant::now(); // Reveal new accounts and storage slots. @@ -489,29 +519,34 @@ fn update_sparse_trie( // Update storage slots with new values and calculate storage roots. for (address, storage) in state.storages { + trace!(target: "engine::root::sparse", ?address, "Updating storage"); + let storage_trie = trie.storage_trie_mut(&address).ok_or(SparseTrieError::Blind)?; + if storage.wiped { - trie.wipe_storage(address)?; + trace!(target: "engine::root::sparse", ?address, "Wiping storage"); + storage_trie.wipe(); } for (slot, value) in storage.storage { let slot_nibbles = Nibbles::unpack(slot); if value.is_zero() { + trace!(target: "engine::root::sparse", ?address, ?slot, "Removing storage slot"); + // TODO: handle blinded node error - trie.remove_storage_leaf(address, &slot_nibbles)?; + storage_trie.remove_leaf(&slot_nibbles)?; } else { - trie.update_storage_leaf( - address, - slot_nibbles, - alloy_rlp::encode_fixed_size(&value).to_vec(), - )?; + trace!(target: "engine::root::sparse", ?address, ?slot, "Updating storage slot"); + storage_trie + .update_leaf(slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; } } - trie.storage_root(address).unwrap(); + storage_trie.root(); } // Update accounts with new values for (address, account) in state.accounts { + trace!(target: "engine::root::sparse", ?address, "Updating account"); trie.update_account(address, account.unwrap_or_default())?; } @@ -770,4 +805,166 @@ mod tests { assert_eq!(ready.len(), 5); assert!(!sequencer.has_pending()); } + + fn create_get_proof_targets_state() -> HashedPostState { + let mut state = HashedPostState::default(); + + let addr1 = B256::random(); + let addr2 = B256::random(); + state.accounts.insert(addr1, Some(Default::default())); + state.accounts.insert(addr2, Some(Default::default())); + + let mut storage = HashedStorage::default(); + let slot1 = B256::random(); + let slot2 = B256::random(); + storage.storage.insert(slot1, U256::ZERO); + storage.storage.insert(slot2, U256::from(1)); + state.storages.insert(addr1, storage); + + state + } + + #[test] + fn test_get_proof_targets_new_account_targets() { + let state = create_get_proof_targets_state(); + let fetched = HashMap::default(); + + let targets = get_proof_targets(&state, &fetched); + + // should return all accounts as targets since nothing was fetched before + assert_eq!(targets.len(), state.accounts.len()); + for addr in state.accounts.keys() { + assert!(targets.contains_key(addr)); + } + } + + #[test] + fn test_get_proof_targets_new_storage_targets() { + let state = create_get_proof_targets_state(); + let fetched = HashMap::default(); + + let targets = get_proof_targets(&state, &fetched); + + // verify storage slots are included for accounts with storage + for (addr, storage) in &state.storages { + assert!(targets.contains_key(addr)); + let target_slots = &targets[addr]; + assert_eq!(target_slots.len(), storage.storage.len()); + for slot in storage.storage.keys() { + assert!(target_slots.contains(slot)); + } + } + } + + #[test] + fn test_get_proof_targets_filter_already_fetched_accounts() { + let state = create_get_proof_targets_state(); + let mut fetched = HashMap::default(); + + // select an account that has no storage updates + let fetched_addr = state + .accounts + .keys() + .find(|&&addr| !state.storages.contains_key(&addr)) + .expect("Should have an account without storage"); + + // mark the account as already fetched + fetched.insert(*fetched_addr, HashSet::default()); + + let targets = get_proof_targets(&state, &fetched); + + // should not include the already fetched account since it has no storage updates + assert!(!targets.contains_key(fetched_addr)); + // other accounts should still be included + assert_eq!(targets.len(), state.accounts.len() - 1); + } + + #[test] + fn test_get_proof_targets_filter_already_fetched_storage() { + let state = create_get_proof_targets_state(); + let mut fetched = HashMap::default(); + + // mark one storage slot as already fetched + let (addr, storage) = state.storages.iter().next().unwrap(); + let mut fetched_slots = HashSet::default(); + let fetched_slot = *storage.storage.keys().next().unwrap(); + fetched_slots.insert(fetched_slot); + fetched.insert(*addr, fetched_slots); + + let targets = get_proof_targets(&state, &fetched); + + // should not include the already fetched storage slot + let target_slots = &targets[addr]; + assert!(!target_slots.contains(&fetched_slot)); + assert_eq!(target_slots.len(), storage.storage.len() - 1); + } + + #[test] + fn test_get_proof_targets_empty_state() { + let state = HashedPostState::default(); + let fetched = HashMap::default(); + + let targets = get_proof_targets(&state, &fetched); + + assert!(targets.is_empty()); + } + + #[test] + fn test_get_proof_targets_mixed_fetched_state() { + let mut state = HashedPostState::default(); + let mut fetched = HashMap::default(); + + let addr1 = B256::random(); + let addr2 = B256::random(); + let slot1 = B256::random(); + let slot2 = B256::random(); + + state.accounts.insert(addr1, Some(Default::default())); + state.accounts.insert(addr2, Some(Default::default())); + + let mut storage = HashedStorage::default(); + storage.storage.insert(slot1, U256::ZERO); + storage.storage.insert(slot2, U256::from(1)); + state.storages.insert(addr1, storage); + + let mut fetched_slots = HashSet::default(); + fetched_slots.insert(slot1); + fetched.insert(addr1, fetched_slots); + + let targets = get_proof_targets(&state, &fetched); + + assert!(targets.contains_key(&addr2)); + assert!(!targets[&addr1].contains(&slot1)); + assert!(targets[&addr1].contains(&slot2)); + } + + #[test] + fn test_get_proof_targets_unmodified_account_with_storage() { + let mut state = HashedPostState::default(); + let fetched = HashMap::default(); + + let addr = B256::random(); + let slot1 = B256::random(); + let slot2 = B256::random(); + + // don't add the account to state.accounts (simulating unmodified account) + // but add storage updates for this account + let mut storage = HashedStorage::default(); + storage.storage.insert(slot1, U256::from(1)); + storage.storage.insert(slot2, U256::from(2)); + state.storages.insert(addr, storage); + + assert!(!state.accounts.contains_key(&addr)); + assert!(!fetched.contains_key(&addr)); + + let targets = get_proof_targets(&state, &fetched); + + // verify that we still get the storage slots for the unmodified account + assert!(targets.contains_key(&addr)); + + let target_slots = &targets[&addr]; + assert_eq!(target_slots.len(), 2); + assert!(target_slots.contains(&slot1)); + assert!(target_slots.contains(&slot2)); + } } diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index 6eb22340ec10..54f9321f239a 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -14,6 +14,7 @@ workspace = true # reth reth-primitives.workspace = true reth-errors.workspace = true +reth-consensus-common.workspace = true reth-fs-util.workspace = true reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 20e2b21446ae..24e141622842 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -18,7 +18,10 @@ use reth_evm::{ ConfigureEvm, }; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{proofs, Block, BlockBody, BlockExt, Receipt, Receipts}; +use reth_primitives::{ + proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, BlockExt, Receipt, + Receipts, +}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, @@ -26,7 +29,6 @@ use reth_revm::{ DatabaseCommit, }; use reth_rpc_types_compat::engine::payload::block_to_payload; -use reth_trie::HashedPostState; use revm_primitives::{calc_excess_blob_gas, EVMError, EnvWithHandlerCfg}; use std::{ collections::VecDeque, @@ -108,7 +110,7 @@ where S: Stream>, Engine: EngineTypes, Provider: BlockReader + StateProviderFactory, - Evm: ConfigureEvm

, + Evm: ConfigureEvm
, Spec: EthereumHardforks, { type Item = S::Item; @@ -255,7 +257,7 @@ fn create_reorg_head( ) -> RethResult<(ExecutionPayload, ExecutionPayloadSidecar)> where Provider: BlockReader + StateProviderFactory, - Evm: ConfigureEvm
, + Evm: ConfigureEvm
, Spec: EthereumHardforks, { let chain_spec = payload_validator.chain_spec(); @@ -379,7 +381,7 @@ where reorg_target.number, Default::default(), ); - let hashed_state = HashedPostState::from_bundle_state(&outcome.state().state); + let hashed_state = state_provider.hashed_post_state(outcome.state()); let (blob_gas_used, excess_blob_gas) = if chain_spec.is_cancun_active_at_timestamp(reorg_target.timestamp) { @@ -415,11 +417,12 @@ where transactions_root: proofs::calculate_transaction_root(&transactions), receipts_root: outcome.receipts_root_slow(reorg_target.header.number).unwrap(), logs_bloom: outcome.block_logs_bloom(reorg_target.header.number).unwrap(), - requests_hash: None, // TODO(prague) gas_used: cumulative_gas_used, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), state_root: state_provider.state_root(hashed_state)?, + requests_hash: None, // TODO(prague) + target_blobs_per_block: None, // TODO(prague) }, body: BlockBody { transactions, diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index 9f7ce7ee8f39..1a08498633c4 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # ethereum alloy-chains.workspace = true -alloy-primitives = { workspace = true, features = ["serde", "rand", "rlp"] } +alloy-primitives = { workspace = true, features = ["serde", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec", "derive"] } once_cell.workspace = true @@ -23,7 +23,7 @@ crc = "3" # misc serde = { workspace = true, features = ["derive"], optional = true } -thiserror-no-std = { workspace = true, default-features = false } +thiserror.workspace = true dyn-clone.workspace = true rustc-hash = { workspace = true, optional = true } @@ -56,10 +56,11 @@ serde = [ std = [ "alloy-chains/std", "alloy-primitives/std", - "thiserror-no-std/std", + "thiserror/std", "rustc-hash/std", "alloy-consensus/std", "once_cell/std", - "serde?/std" + "serde?/std", + "alloy-rlp/std" ] rustc-hash = ["dep:rustc-hash"] diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index b612f3b0b1ae..ebc9fb106371 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -176,7 +176,7 @@ impl From for ForkId { } /// Reason for rejecting provided `ForkId`. -#[derive(Clone, Copy, Debug, thiserror_no_std::Error, PartialEq, Eq, Hash)] +#[derive(Clone, Copy, Debug, thiserror::Error, PartialEq, Eq, Hash)] pub enum ValidationError { /// Remote node is outdated and needs a software update. #[error( diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 2c260c4a7d1c..ba737e56728c 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -11,14 +11,18 @@ use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; -use reth_consensus::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; +use reth_consensus::{ + Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput, +}; use reth_consensus_common::validation::{ validate_4844_header_standalone, validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, validate_against_parent_timestamp, validate_block_pre_execution, validate_body_against_header, validate_header_base_fee, validate_header_extradata, validate_header_gas, }; -use reth_primitives::{BlockBody, BlockWithSenders, SealedBlock, SealedHeader}; +use reth_primitives::{ + Block, BlockBody, BlockWithSenders, NodePrimitives, Receipt, SealedBlock, SealedHeader, +}; use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; use std::{fmt::Debug, sync::Arc, time::SystemTime}; @@ -90,6 +94,25 @@ impl EthBeaconConsensus } } +impl FullConsensus for EthBeaconConsensus +where + ChainSpec: Send + Sync + EthChainSpec + EthereumHardforks + Debug, + N: NodePrimitives< + BlockHeader = Header, + BlockBody = BlockBody, + Block = Block, + Receipt = Receipt, + >, +{ + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) + } +} + impl Consensus for EthBeaconConsensus { @@ -104,14 +127,6 @@ impl Consensu fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { validate_block_pre_execution(block, &self.chain_spec) } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) - } } impl HeaderValidator @@ -134,7 +149,7 @@ impl HeaderVa // Ensures that EIP-4844 fields are valid once cancun is active. if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_4844_header_standalone(header)?; + validate_4844_header_standalone(header.header())?; } else if header.blob_gas_used.is_some() { return Err(ConsensusError::BlobGasUsedUnexpected) } else if header.excess_blob_gas.is_some() { @@ -159,19 +174,23 @@ impl HeaderVa header: &SealedHeader, parent: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_against_parent_hash_number(header, parent)?; + validate_against_parent_hash_number(header.header(), parent)?; - validate_against_parent_timestamp(header, parent)?; + validate_against_parent_timestamp(header.header(), parent.header())?; // TODO Check difficulty increment between parent and self // Ace age did increment it by some formula that we need to follow. self.validate_against_parent_gas_limit(header, parent)?; - validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?; + validate_against_parent_eip1559_base_fee( + header.header(), + parent.header(), + &self.chain_spec, + )?; // ensure that the blob gas fields for this block if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_against_parent_4844(header, parent)?; + validate_against_parent_4844(header.header(), parent.header())?; } Ok(()) diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 094a1df26577..ff07856f1ca6 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -332,6 +332,8 @@ mod tests { .unwrap(), withdrawals: None, parent_beacon_block_root: None, + target_blobs_per_block: None, + max_blobs_per_block: None, }; // Verify that the generated payload ID matches the expected value @@ -369,6 +371,8 @@ mod tests { }, ]), parent_beacon_block_root: None, + target_blobs_per_block: None, + max_blobs_per_block: None, }; // Verify that the generated payload ID matches the expected value @@ -401,6 +405,8 @@ mod tests { ) .unwrap(), ), + target_blobs_per_block: None, + max_blobs_per_block: None, }; // Verify that the generated payload ID matches the expected value diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 17e870e61113..4ee072599188 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -50,5 +50,6 @@ std = [ "alloy-genesis/std", "alloy-primitives/std", "revm-primitives/std", - "secp256k1/std" + "secp256k1/std", + "reth-ethereum-forks/std" ] diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 8642df89698d..6cbbb69c906b 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -4,7 +4,7 @@ use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, EthEvmConfig, }; -use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; use alloy_eips::eip7685::Requests; use core::fmt::Display; @@ -13,14 +13,15 @@ use reth_consensus::ConsensusError; use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ execute::{ - BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, - BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, ProviderError, + balance_increment_state, BasicBlockExecutorProvider, BlockExecutionError, + BlockExecutionStrategy, BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, + ProviderError, }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, ConfigureEvm, TxEnvOverrides, }; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; use reth_revm::db::State; use revm_primitives::{ db::{Database, DatabaseCommit}, @@ -57,9 +58,18 @@ impl EthExecutionStrategyFactory { impl BlockExecutionStrategyFactory for EthExecutionStrategyFactory where - EvmConfig: - Clone + Unpin + Sync + Send + 'static + ConfigureEvm
, + EvmConfig: Clone + + Unpin + + Sync + + Send + + 'static + + ConfigureEvm< + Header = alloy_consensus::Header, + Transaction = reth_primitives::TransactionSigned, + >, { + type Primitives = EthPrimitives; + type Strategy + Display>> = EthExecutionStrategy; @@ -122,13 +132,19 @@ where } } -impl BlockExecutionStrategy for EthExecutionStrategy +impl BlockExecutionStrategy for EthExecutionStrategy where DB: Database + Display>, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm< + Header = alloy_consensus::Header, + Transaction = reth_primitives::TransactionSigned, + >, { + type DB = DB; type Error = BlockExecutionError; + type Primitives = EthPrimitives; + fn init(&mut self, tx_env_overrides: Box) { self.tx_env_overrides = Some(tx_env_overrides); } @@ -146,7 +162,7 @@ where let env = self.evm_env_for_block(&block.header, total_difficulty); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - self.system_caller.apply_pre_execution_changes(block, &mut evm)?; + self.system_caller.apply_pre_execution_changes(&block.block, &mut evm)?; Ok(()) } @@ -155,7 +171,7 @@ where &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result { + ) -> Result, Self::Error> { let env = self.evm_env_for_block(&block.header, total_difficulty); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); @@ -188,7 +204,7 @@ where error: Box::new(new_err), } })?; - self.system_caller.on_state(&result_and_state); + self.system_caller.on_state(&result_and_state.state); let ResultAndState { result, state } = result_and_state; evm.db_mut().commit(state); @@ -227,7 +243,12 @@ where let deposit_requests = crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, receipts)?; - let mut requests = Requests::new(vec![deposit_requests]); + let mut requests = Requests::default(); + + if !deposit_requests.is_empty() { + requests.push_request(core::iter::once(0).chain(deposit_requests).collect()); + } + requests.extend(self.system_caller.apply_post_execution_changes(&mut evm)?); requests } else { @@ -236,7 +257,7 @@ where drop(evm); let mut balance_increments = - post_block_balance_increments(&self.chain_spec, block, total_difficulty); + post_block_balance_increments(&self.chain_spec, &block.block, total_difficulty); // Irregular state change at Ethereum DAO hardfork if self.chain_spec.fork(EthereumHardfork::Dao).transitions_at_block(block.number) { @@ -253,8 +274,11 @@ where } // increment balances self.state - .increment_balances(balance_increments) + .increment_balances(balance_increments.clone()) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + // call state hook with changes due to balance increments. + let balance_state = balance_increment_state(&balance_increments, &mut self.state)?; + self.system_caller.on_state(&balance_state); Ok(requests) } @@ -307,6 +331,7 @@ mod tests { use alloy_eips::{ eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, + eip4895::Withdrawal, eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, eip7685::EMPTY_REQUESTS_HASH, }; @@ -323,9 +348,9 @@ mod tests { database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; - use revm_primitives::BLOCKHASH_SERVE_WINDOW; + use revm_primitives::{address, EvmState, BLOCKHASH_SERVE_WINDOW}; use secp256k1::{Keypair, Secp256k1}; - use std::collections::HashMap; + use std::{collections::HashMap, sync::mpsc}; fn create_state_provider_with_beacon_root_contract() -> StateProviderTest { let mut db = StateProviderTest::default(); @@ -1122,9 +1147,9 @@ mod tests { let receipt = receipts.first().unwrap(); assert!(receipt.success); - assert!(requests[0].is_empty(), "there should be no deposits"); - assert!(!requests[1].is_empty(), "there should be a withdrawal"); - assert!(requests[2].is_empty(), "there should be no consolidations"); + // There should be exactly one entry with withdrawal requests + assert_eq!(requests.len(), 1); + assert_eq!(requests[0][0], 1); } #[test] @@ -1210,4 +1235,67 @@ mod tests { ), } } + + #[test] + fn test_balance_increment_not_duplicated() { + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) + .build(), + ); + + let withdrawal_recipient = address!("1000000000000000000000000000000000000000"); + + let mut db = StateProviderTest::default(); + let initial_balance = 100; + db.insert_account( + withdrawal_recipient, + Account { balance: U256::from(initial_balance), nonce: 1, bytecode_hash: None }, + None, + HashMap::default(), + ); + + let withdrawal = + Withdrawal { index: 0, validator_index: 0, address: withdrawal_recipient, amount: 1 }; + + let header = Header { timestamp: 1, number: 1, ..Header::default() }; + + let block = BlockWithSenders { + block: Block { + header, + body: BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: Some(vec![withdrawal].into()), + }, + }, + senders: vec![], + }; + + let provider = executor_provider(chain_spec); + let executor = provider.executor(StateProviderDatabase::new(&db)); + + let (tx, rx) = mpsc::channel(); + let tx_clone = tx.clone(); + + let _output = executor + .execute_with_state_hook((&block, U256::ZERO).into(), move |state: &EvmState| { + if let Some(account) = state.get(&withdrawal_recipient) { + let _ = tx_clone.send(account.info.balance); + } + }) + .expect("Block execution should succeed"); + + drop(tx); + let balance_changes: Vec = rx.try_iter().collect(); + + if let Some(final_balance) = balance_changes.last() { + let expected_final_balance = U256::from(initial_balance) + U256::from(1_000_000_000); // initial + 1 Gwei in Wei + assert_eq!( + *final_balance, expected_final_balance, + "Final balance should match expected value after withdrawal" + ); + } + } } diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 8042562357f4..509b61cb2ece 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -62,6 +62,7 @@ impl EthEvmConfig { impl ConfigureEvmEnv for EthEvmConfig { type Header = Header; + type Transaction = TransactionSigned; type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index e6f47483b586..f5fe1dac234c 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -32,8 +32,6 @@ reth-primitives.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-trie-db.workspace = true -alloy-consensus.workspace = true - # revm with required ethereum features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -70,6 +68,9 @@ rand.workspace = true [features] default = [] +js-tracer = [ + "reth-node-builder/js-tracer" +] test-utils = [ "reth-node-builder/test-utils", "reth-chainspec/test-utils", diff --git a/crates/ethereum/node/src/lib.rs b/crates/ethereum/node/src/lib.rs index 421cee37fb03..8dae6031577c 100644 --- a/crates/ethereum/node/src/lib.rs +++ b/crates/ethereum/node/src/lib.rs @@ -1,4 +1,7 @@ //! Standalone crate for ethereum-specific Reth configuration and builder types. +//! +//! # features +//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index a536b9dff907..dd4f1e5802c8 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -2,7 +2,6 @@ use std::sync::Arc; -use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; @@ -13,7 +12,8 @@ use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_network::{NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodeTypesWithDB, + AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, HeaderTy, NodeTypesWithDB, + TxTy, }; use reth_node_builder::{ components::{ @@ -30,7 +30,7 @@ use reth_provider::{CanonStateSubscriptions, EthStorage}; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ - blobstore::DiskFileBlobStore, EthTransactionPool, TransactionPool, + blobstore::DiskFileBlobStore, EthTransactionPool, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; @@ -133,7 +133,7 @@ pub struct EthereumExecutorBuilder; impl ExecutorBuilder for EthereumExecutorBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, { type EVM = EthEvmConfig; @@ -242,8 +242,10 @@ impl EthereumPayloadBuilder { where Types: NodeTypesWithEngine, Node: FullNodeTypes, - Evm: ConfigureEvm
, - Pool: TransactionPool + Unpin + 'static, + Evm: ConfigureEvm
, Transaction = TxTy>, + Pool: TransactionPool>> + + Unpin + + 'static, Types::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, @@ -280,7 +282,9 @@ impl PayloadServiceBuilder for EthereumPayloadBui where Types: NodeTypesWithEngine, Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, Types::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, @@ -305,7 +309,9 @@ pub struct EthereumNetworkBuilder { impl NetworkBuilder for EthereumNetworkBuilder where Node: FullNodeTypes>, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, { async fn build_network( self, @@ -327,9 +333,9 @@ pub struct EthereumConsensusBuilder { impl ConsensusBuilder for EthereumConsensusBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { - type Consensus = Arc; + type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index f8680f47ae3e..343521ef8eb7 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -1,19 +1,9 @@ -use crate::utils::eth_payload_attributes; -use alloy_consensus::TxType; -use alloy_primitives::bytes; -use alloy_provider::{ - network::{ - Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder, TransactionBuilder7702, - }, - Provider, ProviderBuilder, SendableTx, -}; -use alloy_rpc_types_eth::TransactionRequest; -use alloy_signer::SignerSync; -use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use crate::utils::{advance_with_random_transactions, eth_payload_attributes}; +use alloy_provider::{Provider, ProviderBuilder}; +use rand::{rngs::StdRng, Rng, SeedableRng}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{setup, setup_engine, transaction::TransactionTestContext}; use reth_node_ethereum::EthereumNode; -use revm::primitives::{AccessListItem, Authorization}; use std::sync::Arc; #[tokio::test] @@ -76,80 +66,12 @@ async fn e2e_test_send_transactions() -> eyre::Result<()> { .build(), ); - let (mut nodes, _tasks, wallet) = + let (mut nodes, _tasks, _) = setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; let mut node = nodes.pop().unwrap(); - let signers = wallet.gen(); let provider = ProviderBuilder::new().with_recommended_fillers().on_http(node.rpc_url()); - // simple contract which writes to storage on any call - let dummy_bytecode = bytes!("6080604052348015600f57600080fd5b50602880601d6000396000f3fe4360a09081523360c0526040608081905260e08152902080805500fea164736f6c6343000810000a"); - let mut call_destinations = signers.iter().map(|s| s.address()).collect::>(); - - // Produce 100 random blocks with random transactions - for _ in 0..100 { - let tx_count = rng.gen_range(1..20); - - let mut pending = vec![]; - for _ in 0..tx_count { - let signer = signers.choose(&mut rng).unwrap(); - let tx_type = TxType::try_from(rng.gen_range(0..=4)).unwrap(); - - let mut tx = TransactionRequest::default().with_from(signer.address()); - - let should_create = - rng.gen::() && tx_type != TxType::Eip4844 && tx_type != TxType::Eip7702; - if should_create { - tx = tx.into_create().with_input(dummy_bytecode.clone()); - } else { - tx = tx.with_to(*call_destinations.choose(&mut rng).unwrap()).with_input( - (0..rng.gen_range(0..10000)).map(|_| rng.gen()).collect::>(), - ); - } - - if matches!(tx_type, TxType::Legacy | TxType::Eip2930) { - tx = tx.with_gas_price(provider.get_gas_price().await?); - } - - if rng.gen::() || tx_type == TxType::Eip2930 { - tx = tx.with_access_list( - vec![AccessListItem { - address: *call_destinations.choose(&mut rng).unwrap(), - storage_keys: (0..rng.gen_range(0..100)).map(|_| rng.gen()).collect(), - }] - .into(), - ); - } - - if tx_type == TxType::Eip7702 { - let signer = signers.choose(&mut rng).unwrap(); - let auth = Authorization { - chain_id: provider.get_chain_id().await?, - address: *call_destinations.choose(&mut rng).unwrap(), - nonce: provider.get_transaction_count(signer.address()).await?, - }; - let sig = signer.sign_hash_sync(&auth.signature_hash())?; - tx = tx.with_authorization_list(vec![auth.into_signed(sig)]) - } - - let SendableTx::Builder(tx) = provider.fill(tx).await? else { unreachable!() }; - let tx = - NetworkWallet::::sign_request(&EthereumWallet::new(signer.clone()), tx) - .await?; - - pending.push(provider.send_tx_envelope(tx).await?); - } - - let (payload, _) = node.advance_block().await?; - assert!(payload.block().raw_transactions().len() == tx_count); - - for pending in pending { - let receipt = pending.get_receipt().await?; - if let Some(address) = receipt.contract_address { - call_destinations.push(address); - } - } - } + advance_with_random_transactions(&mut node, 100, &mut rng, true).await?; let second_node = nodes.pop().unwrap(); let second_provider = @@ -159,15 +81,58 @@ async fn e2e_test_send_transactions() -> eyre::Result<()> { let head = provider.get_block_by_number(Default::default(), false.into()).await?.unwrap().header.hash; - second_node.engine_api.update_forkchoice(head, head).await?; - let start = std::time::Instant::now(); + second_node.sync_to(head).await?; - while provider.get_block_number().await? != second_provider.get_block_number().await? { - tokio::time::sleep(std::time::Duration::from_millis(100)).await; + Ok(()) +} + +#[tokio::test] +async fn test_long_reorg() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::thread_rng().gen(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {:?}", seed); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .prague_activated() + .build(), + ); + + let (mut nodes, _tasks, _) = + setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; + + let mut first_node = nodes.pop().unwrap(); + let mut second_node = nodes.pop().unwrap(); + + let first_provider = ProviderBuilder::new().on_http(first_node.rpc_url()); + + // Advance first node 100 blocks. + advance_with_random_transactions(&mut first_node, 100, &mut rng, false).await?; + + // Sync second node to 20th block. + let head = first_provider.get_block_by_number(20.into(), false.into()).await?.unwrap(); + second_node.sync_to(head.header.hash).await?; + + // Produce a fork chain with blocks 21.60 + second_node.payload.timestamp = head.header.timestamp; + advance_with_random_transactions(&mut second_node, 40, &mut rng, true).await?; + + // Reorg first node from 100th block to new 60th block. + first_node.sync_to(second_node.block_hash(60)).await?; + + // Advance second node 20 blocks and ensure that first node is able to follow it. + advance_with_random_transactions(&mut second_node, 20, &mut rng, true).await?; + first_node.sync_to(second_node.block_hash(80)).await?; - assert!(start.elapsed() <= std::time::Duration::from_secs(10), "timed out"); - } + // Ensure that it works the other way around too. + advance_with_random_transactions(&mut first_node, 20, &mut rng, true).await?; + second_node.sync_to(first_node.block_hash(100)).await?; Ok(()) } diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index 54bfbc8205e5..664f447cf25c 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -1,5 +1,5 @@ use crate::utils::eth_payload_attributes; -use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718}; +use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718, eip4844}; use alloy_primitives::{Address, B256, U256}; use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx}; use alloy_rpc_types_beacon::relay::{ @@ -240,6 +240,7 @@ async fn test_flashbots_validate_v4() -> eyre::Result<()> { execution_payload: block_to_payload_v3(payload.block().clone()), blobs_bundle: BlobsBundleV1::new([]), execution_requests: payload.requests().unwrap_or_default().to_vec(), + target_blobs_per_block: eip4844::TARGET_BLOBS_PER_BLOCK, signature: Default::default(), }, parent_beacon_block_root: attrs.parent_beacon_block_root.unwrap(), diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index c3743de185f5..84741a46aa69 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -1,6 +1,22 @@ -use alloy_primitives::{Address, B256}; +use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_primitives::{bytes, Address, B256}; +use alloy_provider::{ + network::{ + Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder, TransactionBuilder7702, + }, + Provider, ProviderBuilder, SendableTx, +}; use alloy_rpc_types_engine::PayloadAttributes; +use alloy_rpc_types_eth::TransactionRequest; +use alloy_signer::SignerSync; +use rand::{seq::SliceRandom, Rng}; +use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType, TmpDB}; +use reth_node_api::NodeTypesWithDBAdapter; +use reth_node_ethereum::EthereumNode; use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_primitives::TxType; +use reth_provider::FullProvider; +use revm::primitives::{AccessListItem, Authorization}; /// Helper function to create a new eth payload attributes pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttributes { @@ -10,6 +26,121 @@ pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttribu suggested_fee_recipient: Address::ZERO, withdrawals: Some(vec![]), parent_beacon_block_root: Some(B256::ZERO), + target_blobs_per_block: None, + max_blobs_per_block: None, }; EthPayloadBuilderAttributes::new(B256::ZERO, attributes) } + +/// Advances node by producing blocks with random transactions. +pub(crate) async fn advance_with_random_transactions( + node: &mut NodeHelperType, + num_blocks: usize, + rng: &mut impl Rng, + finalize: bool, +) -> eyre::Result<()> +where + Provider: FullProvider>, +{ + let provider = ProviderBuilder::new().with_recommended_fillers().on_http(node.rpc_url()); + let signers = Wallet::new(1).with_chain_id(provider.get_chain_id().await?).gen(); + + // simple contract which writes to storage on any call + let dummy_bytecode = bytes!("6080604052348015600f57600080fd5b50602880601d6000396000f3fe4360a09081523360c0526040608081905260e08152902080805500fea164736f6c6343000810000a"); + let mut call_destinations = signers.iter().map(|s| s.address()).collect::>(); + + for _ in 0..num_blocks { + let tx_count = rng.gen_range(1..20); + + let mut pending = vec![]; + for _ in 0..tx_count { + let signer = signers.choose(rng).unwrap(); + let tx_type = TxType::try_from(rng.gen_range(0..=4) as u64).unwrap(); + + let nonce = provider + .get_transaction_count(signer.address()) + .block_id(BlockId::Number(BlockNumberOrTag::Pending)) + .await?; + + let mut tx = + TransactionRequest::default().with_from(signer.address()).with_nonce(nonce); + + let should_create = + rng.gen::() && tx_type != TxType::Eip4844 && tx_type != TxType::Eip7702; + if should_create { + tx = tx.into_create().with_input(dummy_bytecode.clone()); + } else { + tx = tx.with_to(*call_destinations.choose(rng).unwrap()).with_input( + (0..rng.gen_range(0..10000)).map(|_| rng.gen()).collect::>(), + ); + } + + if matches!(tx_type, TxType::Legacy | TxType::Eip2930) { + tx = tx.with_gas_price(provider.get_gas_price().await?); + } + + if rng.gen::() || tx_type == TxType::Eip2930 { + tx = tx.with_access_list( + vec![AccessListItem { + address: *call_destinations.choose(rng).unwrap(), + storage_keys: (0..rng.gen_range(0..100)).map(|_| rng.gen()).collect(), + }] + .into(), + ); + } + + if tx_type == TxType::Eip7702 { + let signer = signers.choose(rng).unwrap(); + let auth = Authorization { + chain_id: provider.get_chain_id().await?, + address: *call_destinations.choose(rng).unwrap(), + nonce: provider + .get_transaction_count(signer.address()) + .block_id(BlockId::Number(BlockNumberOrTag::Pending)) + .await?, + }; + let sig = signer.sign_hash_sync(&auth.signature_hash())?; + tx = tx.with_authorization_list(vec![auth.into_signed(sig)]) + } + + let gas = provider + .estimate_gas(&tx) + .block(BlockId::Number(BlockNumberOrTag::Pending)) + .await + .unwrap_or(1_000_000); + + tx.set_gas_limit(gas); + + let SendableTx::Builder(tx) = provider.fill(tx).await? else { unreachable!() }; + let tx = + NetworkWallet::::sign_request(&EthereumWallet::new(signer.clone()), tx) + .await?; + + pending.push(provider.send_tx_envelope(tx).await?); + } + + let (payload, _) = node.build_and_submit_payload().await?; + if finalize { + node.engine_api + .update_forkchoice(payload.block().hash(), payload.block().hash()) + .await?; + } else { + let last_safe = provider + .get_block_by_number(BlockNumberOrTag::Safe, false.into()) + .await? + .unwrap() + .header + .hash; + node.engine_api.update_forkchoice(last_safe, payload.block().hash()).await?; + } + + for pending in pending { + let receipt = pending.get_receipt().await?; + if let Some(address) = receipt.contract_address { + call_destinations.push(address); + } + } + } + + Ok(()) +} diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index 4e0880d1d153..b01f4c5bc74f 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -25,7 +25,6 @@ reth-basic-payload-builder.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-errors.workspace = true -reth-trie.workspace = true reth-chain-state.workspace = true reth-chainspec.workspace = true diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 49065ec0d8a8..f909d3840e22 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -10,7 +10,10 @@ #![allow(clippy::useless_let_if_seq)] use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; -use alloy_eips::{eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::Requests, merge::BEACON_NONCE}; +use alloy_eips::{ + eip4844::MAX_DATA_GAS_PER_BLOCK, eip7002::WITHDRAWAL_REQUEST_TYPE, + eip7251::CONSOLIDATION_REQUEST_TYPE, eip7685::Requests, merge::BEACON_NONCE, +}; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, @@ -27,15 +30,15 @@ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ proofs::{self}, - Block, BlockBody, BlockExt, EthereumHardforks, Receipt, + Block, BlockBody, BlockExt, EthereumHardforks, InvalidTransactionError, Receipt, + TransactionSigned, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactions, BestTransactionsAttributes, TransactionPool, - ValidPoolTransaction, + error::InvalidPoolTransactionError, noop::NoopTransactionPool, BestTransactions, + BestTransactionsAttributes, PoolTransaction, TransactionPool, ValidPoolTransaction, }; -use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, primitives::{ @@ -88,9 +91,9 @@ where // Default implementation of [PayloadBuilder] for unit type impl PayloadBuilder for EthereumPayloadBuilder where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, { type Attributes = EthPayloadBuilderAttributes; type BuiltPayload = EthBuiltPayload; @@ -152,9 +155,9 @@ pub fn default_ethereum_payload( best_txs: F, ) -> Result, PayloadBuilderError> where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, F: FnOnce(BestTransactionsAttributes) -> BestTransactionsIter, { let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; @@ -228,7 +231,10 @@ where // we can't fit this transaction into the block, so we need to mark it as invalid // which also removes all dependent transaction from the iterator before we can // continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit(pool_tx.gas_limit(), block_gas_limit), + ); continue } @@ -238,7 +244,7 @@ where } // convert tx to a signed transaction - let tx = pool_tx.to_recovered_transaction(); + let tx = pool_tx.to_consensus(); // There's only limited amount of blob space available per block, so we need to check if // the EIP-4844 can still fit in the block @@ -250,7 +256,13 @@ where // the iterator. This is similar to the gas limit condition // for regular transactions above. trace!(target: "payload_builder", tx=?tx.hash, ?sum_blob_gas_used, ?tx_blob_gas, "skipping blob transaction because it would exceed the max data gas per block"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + tx_blob_gas, + MAX_DATA_GAS_PER_BLOCK, + ), + ); continue } } @@ -270,7 +282,12 @@ where // if the transaction is invalid, we can skip it and all of its // descendants trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); } continue @@ -351,7 +368,27 @@ where ) .map_err(|err| PayloadBuilderError::Internal(err.into()))?; - Some(Requests::new(vec![deposit_requests, withdrawal_requests, consolidation_requests])) + let mut requests = Requests::default(); + + if !deposit_requests.is_empty() { + requests.push_request(core::iter::once(0).chain(deposit_requests).collect()); + } + + if !withdrawal_requests.is_empty() { + requests.push_request( + core::iter::once(WITHDRAWAL_REQUEST_TYPE).chain(withdrawal_requests).collect(), + ); + } + + if !consolidation_requests.is_empty() { + requests.push_request( + core::iter::once(CONSOLIDATION_REQUEST_TYPE) + .chain(consolidation_requests) + .collect(), + ); + } + + Some(requests) } else { None }; @@ -375,7 +412,7 @@ where let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + let hashed_state = db.database.db.hashed_post_state(execution_outcome.state()); let (state_root, trie_output) = { db.database.inner().state_root_with_updates(hashed_state.clone()).inspect_err(|err| { warn!(target: "payload_builder", @@ -438,6 +475,7 @@ where blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), requests_hash, + target_blobs_per_block: None, }; let withdrawals = chain_spec diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 9d6a616af983..fe5505b52bd0 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -46,17 +46,18 @@ metrics-util = { workspace = true, features = ["debugging"] } [features] default = ["std"] std = [ - "dep:metrics", - "dep:reth-metrics", - "reth-consensus/std", - "reth-primitives/std", - "reth-primitives-traits/std", - "reth-revm/std", - "alloy-eips/std", - "alloy-primitives/std", - "alloy-consensus/std", - "revm-primitives/std", - "revm/std", + "dep:metrics", + "dep:reth-metrics", + "reth-consensus/std", + "reth-primitives/std", + "reth-primitives-traits/std", + "reth-revm/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-consensus/std", + "revm-primitives/std", + "revm/std", + "reth-ethereum-forks/std" ] test-utils = [ "dep:parking_lot", diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index 721c8055110d..b4b9992a979a 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -22,7 +22,7 @@ alloy-eips.workspace = true revm-primitives.workspace = true nybbles.workspace = true -derive_more.workspace = true +thiserror.workspace = true [features] default = ["std"] @@ -30,5 +30,8 @@ std = [ "reth-consensus/std", "alloy-eips/std", "alloy-primitives/std", - "revm-primitives/std" + "revm-primitives/std", + "alloy-rlp/std", + "thiserror/std", + "nybbles/std" ] diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index 4dbbfb7abdce..db7887d1b8d2 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -14,20 +14,20 @@ extern crate alloc; use alloc::{boxed::Box, string::String}; use alloy_eips::BlockNumHash; use alloy_primitives::B256; -use derive_more::{Display, From}; use reth_consensus::ConsensusError; use reth_prune_types::PruneSegmentError; use reth_storage_errors::provider::ProviderError; use revm_primitives::EVMError; +use thiserror::Error; pub mod trie; pub use trie::*; /// Transaction validation errors -#[derive(Clone, Debug, Display, Eq, PartialEq)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum BlockValidationError { /// EVM error with transaction hash and message - #[display("EVM reported invalid transaction ({hash}): {error}")] + #[error("EVM reported invalid transaction ({hash}): {error}")] EVM { /// The hash of the transaction hash: B256, @@ -35,16 +35,16 @@ pub enum BlockValidationError { error: Box>, }, /// Error when recovering the sender for a transaction - #[display("failed to recover sender for transaction")] + #[error("failed to recover sender for transaction")] SenderRecoveryError, /// Error when incrementing balance in post execution - #[display("incrementing balance in post execution failed")] + #[error("incrementing balance in post execution failed")] IncrementBalanceFailed, /// Error when the state root does not match the expected value. - // #[from(ignore)] - StateRoot(StateRootError), + #[error(transparent)] + StateRoot(#[from] StateRootError), /// Error when transaction gas limit exceeds available block gas - #[display( + #[error( "transaction gas limit {transaction_gas_limit} is more than blocks available gas {block_available_gas}" )] TransactionGasLimitMoreThanAvailableBlockGas { @@ -54,22 +54,22 @@ pub enum BlockValidationError { block_available_gas: u64, }, /// Error for pre-merge block - #[display("block {hash} is pre merge")] + #[error("block {hash} is pre merge")] BlockPreMerge { /// The hash of the block hash: B256, }, /// Error for missing total difficulty - #[display("missing total difficulty for block {hash}")] + #[error("missing total difficulty for block {hash}")] MissingTotalDifficulty { /// The hash of the block hash: B256, }, /// Error for EIP-4788 when parent beacon block root is missing - #[display("EIP-4788 parent beacon block root missing for active Cancun block")] + #[error("EIP-4788 parent beacon block root missing for active Cancun block")] MissingParentBeaconBlockRoot, /// Error for Cancun genesis block when parent beacon block root is not zero - #[display( + #[error( "the parent beacon block root is not zero for Cancun genesis block: {parent_beacon_block_root}" )] CancunGenesisParentBeaconBlockRootNotZero { @@ -79,9 +79,7 @@ pub enum BlockValidationError { /// EVM error during [EIP-4788] beacon root contract call. /// /// [EIP-4788]: https://eips.ethereum.org/EIPS/eip-4788 - #[display( - "failed to apply beacon root contract call at {parent_beacon_block_root}: {message}" - )] + #[error("failed to apply beacon root contract call at {parent_beacon_block_root}: {message}")] BeaconRootContractCall { /// The beacon block root parent_beacon_block_root: Box, @@ -91,7 +89,7 @@ pub enum BlockValidationError { /// EVM error during [EIP-2935] blockhash contract call. /// /// [EIP-2935]: https://eips.ethereum.org/EIPS/eip-2935 - #[display("failed to apply blockhash contract call: {message}")] + #[error("failed to apply blockhash contract call: {message}")] BlockHashContractCall { /// The error message. message: String, @@ -99,7 +97,7 @@ pub enum BlockValidationError { /// EVM error during withdrawal requests contract call [EIP-7002] /// /// [EIP-7002]: https://eips.ethereum.org/EIPS/eip-7002 - #[display("failed to apply withdrawal requests contract call: {message}")] + #[error("failed to apply withdrawal requests contract call: {message}")] WithdrawalRequestsContractCall { /// The error message. message: String, @@ -107,7 +105,7 @@ pub enum BlockValidationError { /// EVM error during consolidation requests contract call [EIP-7251] /// /// [EIP-7251]: https://eips.ethereum.org/EIPS/eip-7251 - #[display("failed to apply consolidation requests contract call: {message}")] + #[error("failed to apply consolidation requests contract call: {message}")] ConsolidationRequestsContractCall { /// The error message. message: String, @@ -115,35 +113,22 @@ pub enum BlockValidationError { /// Error when decoding deposit requests from receipts [EIP-6110] /// /// [EIP-6110]: https://eips.ethereum.org/EIPS/eip-6110 - #[display("failed to decode deposit requests from receipts: {_0}")] + #[error("failed to decode deposit requests from receipts: {_0}")] DepositRequestDecode(String), } -impl From for BlockValidationError { - fn from(error: StateRootError) -> Self { - Self::StateRoot(error) - } -} - -impl core::error::Error for BlockValidationError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::EVM { error, .. } => core::error::Error::source(error), - Self::StateRoot(source) => core::error::Error::source(source), - _ => Option::None, - } - } -} - /// `BlockExecutor` Errors -#[derive(Debug, From, Display)] +#[derive(Error, Debug)] pub enum BlockExecutionError { /// Validation error, transparently wrapping [`BlockValidationError`] - Validation(BlockValidationError), + #[error(transparent)] + Validation(#[from] BlockValidationError), /// Consensus error, transparently wrapping [`ConsensusError`] - Consensus(ConsensusError), + #[error(transparent)] + Consensus(#[from] ConsensusError), /// Internal, i.e. non consensus or validation related Block Executor Errors - Internal(InternalBlockExecutionError), + #[error(transparent)] + Internal(#[from] InternalBlockExecutionError), } impl BlockExecutionError { @@ -184,24 +169,14 @@ impl From for BlockExecutionError { } } -impl core::error::Error for BlockExecutionError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Validation(source) => core::error::Error::source(source), - Self::Consensus(source) => core::error::Error::source(source), - Self::Internal(source) => core::error::Error::source(source), - } - } -} - /// Internal (i.e., not validation or consensus related) `BlockExecutor` Errors -#[derive(Display, Debug, From)] +#[derive(Error, Debug)] pub enum InternalBlockExecutionError { /// Pruning error, transparently wrapping [`PruneSegmentError`] - #[from] - Pruning(PruneSegmentError), + #[error(transparent)] + Pruning(#[from] PruneSegmentError), /// Error when appending chain on fork is not possible - #[display( + #[error( "appending chain on fork (other_chain_fork:?) is not possible as the tip is {chain_tip:?}" )] AppendChainDoesntConnect { @@ -211,9 +186,10 @@ pub enum InternalBlockExecutionError { other_chain_fork: Box, }, /// Error when fetching latest block state. - #[from] - LatestBlock(ProviderError), + #[error(transparent)] + LatestBlock(#[from] ProviderError), /// Arbitrary Block Executor Errors + #[error(transparent)] Other(Box), } @@ -233,13 +209,3 @@ impl InternalBlockExecutionError { Self::Other(msg.to_string().into()) } } - -impl core::error::Error for InternalBlockExecutionError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Pruning(source) => core::error::Error::source(source), - Self::LatestBlock(source) => core::error::Error::source(source), - _ => Option::None, - } - } -} diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index 9e4b16d8d0c2..83210faab52f 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -1,27 +1,20 @@ //! Errors when computing the state root. -use alloc::string::ToString; -use alloy_primitives::B256; -use derive_more::{Display, From}; +use alloc::{boxed::Box, string::ToString}; +use alloy_primitives::{Bytes, B256}; use nybbles::Nibbles; use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; +use thiserror::Error; /// State root errors. -#[derive(Display, Debug, From, PartialEq, Eq, Clone)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum StateRootError { /// Internal database error. - Database(DatabaseError), + #[error(transparent)] + Database(#[from] DatabaseError), /// Storage root error. - StorageRootError(StorageRootError), -} - -impl core::error::Error for StateRootError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Database(source) => core::error::Error::source(source), - Self::StorageRootError(source) => core::error::Error::source(source), - } - } + #[error(transparent)] + StorageRootError(#[from] StorageRootError), } impl From for DatabaseError { @@ -34,10 +27,11 @@ impl From for DatabaseError { } /// Storage root error. -#[derive(Display, From, PartialEq, Eq, Clone, Debug)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum StorageRootError { /// Internal database error. - Database(DatabaseError), + #[error(transparent)] + Database(#[from] DatabaseError), } impl From for DatabaseError { @@ -48,21 +42,15 @@ impl From for DatabaseError { } } -impl core::error::Error for StorageRootError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Database(source) => core::error::Error::source(source), - } - } -} - /// State proof errors. -#[derive(Display, From, Debug, PartialEq, Eq, Clone)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum StateProofError { /// Internal database error. - Database(DatabaseError), + #[error(transparent)] + Database(#[from] DatabaseError), /// RLP decoding error. - Rlp(alloy_rlp::Error), + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), } impl From for ProviderError { @@ -74,32 +62,78 @@ impl From for ProviderError { } } -impl core::error::Error for StateProofError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Database(source) => core::error::Error::source(source), - Self::Rlp(source) => core::error::Error::source(source), - } - } +/// Result type with [`SparseStateTrieError`] as error. +pub type SparseStateTrieResult = Result; + +/// Error encountered in `SparseStateTrie`. +#[derive(Error, Debug)] +pub enum SparseStateTrieError { + /// Encountered invalid root node. + #[error("invalid root node at {path:?}: {node:?}")] + InvalidRootNode { + /// Path to first proof node. + path: Nibbles, + /// Encoded first proof node. + node: Bytes, + }, + /// Sparse trie error. + #[error(transparent)] + Sparse(#[from] SparseTrieError), + /// RLP error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), +} + +/// Result type with [`SparseTrieError`] as error. +pub type SparseTrieResult = Result; + +/// Error encountered in `SparseTrie`. +#[derive(Error, Debug)] +pub enum SparseTrieError { + /// Sparse trie is still blind. Thrown on attempt to update it. + #[error("sparse trie is blind")] + Blind, + /// Encountered blinded node on update. + #[error("attempted to update blind node at {path:?}: {hash}")] + BlindedNode { + /// Blind node path. + path: Nibbles, + /// Node hash + hash: B256, + }, + /// Encountered unexpected node at path when revealing. + #[error("encountered an invalid node at path {path:?} when revealing: {node:?}")] + Reveal { + /// Path to the node. + path: Nibbles, + /// Node that was at the path when revealing. + node: Box, + }, + /// RLP error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), + /// Other. + #[error(transparent)] + Other(#[from] Box), } /// Trie witness errors. -#[derive(Display, From, Debug, PartialEq, Eq, Clone)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum TrieWitnessError { /// Error gather proofs. - #[from] - Proof(StateProofError), + #[error(transparent)] + Proof(#[from] StateProofError), /// RLP decoding error. - #[from] - Rlp(alloy_rlp::Error), + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), /// Missing account. - #[display("missing account {_0}")] + #[error("missing account {_0}")] MissingAccount(B256), /// Missing target node. - #[display("target node missing from proof {_0:?}")] + #[error("target node missing from proof {_0:?}")] MissingTargetNode(Nibbles), /// Unexpected empty root. - #[display("unexpected empty root: {_0:?}")] + #[error("unexpected empty root: {_0:?}")] UnexpectedEmptyRoot(Nibbles), } @@ -108,13 +142,3 @@ impl From for ProviderError { Self::TrieWitnessError(error.to_string()) } } - -impl core::error::Error for TrieWitnessError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Proof(source) => core::error::Error::source(source), - Self::Rlp(source) => core::error::Error::source(source), - _ => Option::None, - } - } -} diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index c7fbad673db1..c0ef2c5a694d 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -63,5 +63,6 @@ std = [ "revm/std", "serde?/std", "reth-primitives-traits/std", - "alloy-consensus/std", + "alloy-consensus/std", + "serde_with?/std" ] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 1767a7f43f65..cbdb2296bf62 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -8,8 +8,8 @@ use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; use core::{fmt, ops::RangeInclusive}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{ - transaction::SignedTransactionIntoRecoveredExt, SealedBlockFor, SealedBlockWithSenders, - SealedHeader, TransactionSignedEcRecovered, + transaction::SignedTransactionIntoRecoveredExt, RecoveredTx, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, }; use reth_primitives_traits::{Block, BlockBody, NodePrimitives, SignedTransaction}; use reth_trie::updates::TrieUpdates; @@ -436,14 +436,13 @@ impl>> ChainBlocks<'_, self.blocks.values().flat_map(|block| block.transactions_with_sender()) } - /// Returns an iterator over all [`TransactionSignedEcRecovered`] in the blocks + /// Returns an iterator over all [`RecoveredTx`] in the blocks /// /// Note: This clones the transactions since it is assumed this is part of a shared [Chain]. #[inline] pub fn transactions_ecrecovered( &self, - ) -> impl Iterator::Transaction>> + '_ - { + ) -> impl Iterator::Transaction>> + '_ { self.transactions_with_sender().map(|(signer, tx)| tx.clone().with_signer(*signer)) } @@ -525,7 +524,9 @@ pub(super) mod serde_bincode_compat { use crate::ExecutionOutcome; use alloc::borrow::Cow; use alloy_primitives::BlockNumber; - use reth_primitives::serde_bincode_compat::SealedBlockWithSenders; + use reth_primitives::{ + serde_bincode_compat::SealedBlockWithSenders, EthPrimitives, NodePrimitives, + }; use reth_trie_common::serde_bincode_compat::updates::TrieUpdates; use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -547,18 +548,24 @@ pub(super) mod serde_bincode_compat { /// } /// ``` #[derive(Debug, Serialize, Deserialize)] - pub struct Chain<'a> { - blocks: SealedBlocksWithSenders<'a>, - execution_outcome: Cow<'a, ExecutionOutcome>, + pub struct Chain<'a, N = EthPrimitives> + where + N: NodePrimitives, + { + blocks: SealedBlocksWithSenders<'a, N::Block>, + execution_outcome: Cow<'a, ExecutionOutcome>, trie_updates: Option>, } #[derive(Debug)] - struct SealedBlocksWithSenders<'a>( - Cow<'a, BTreeMap>, + struct SealedBlocksWithSenders<'a, B: reth_primitives_traits::Block>( + Cow<'a, BTreeMap>>, ); - impl Serialize for SealedBlocksWithSenders<'_> { + impl Serialize for SealedBlocksWithSenders<'_, B> + where + B: reth_primitives_traits::Block, + { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -573,20 +580,26 @@ pub(super) mod serde_bincode_compat { } } - impl<'de> Deserialize<'de> for SealedBlocksWithSenders<'_> { + impl<'de, B> Deserialize<'de> for SealedBlocksWithSenders<'_, B> + where + B: reth_primitives_traits::Block, + { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { Ok(Self(Cow::Owned( - BTreeMap::>::deserialize(deserializer) + BTreeMap::>::deserialize(deserializer) .map(|blocks| blocks.into_iter().map(|(n, b)| (n, b.into())).collect())?, ))) } } - impl<'a> From<&'a super::Chain> for Chain<'a> { - fn from(value: &'a super::Chain) -> Self { + impl<'a, N> From<&'a super::Chain> for Chain<'a, N> + where + N: NodePrimitives, + { + fn from(value: &'a super::Chain) -> Self { Self { blocks: SealedBlocksWithSenders(Cow::Borrowed(&value.blocks)), execution_outcome: Cow::Borrowed(&value.execution_outcome), @@ -595,8 +608,11 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> From> for super::Chain { - fn from(value: Chain<'a>) -> Self { + impl<'a, N> From> for super::Chain + where + N: NodePrimitives, + { + fn from(value: Chain<'a, N>) -> Self { Self { blocks: value.blocks.0.into_owned(), execution_outcome: value.execution_outcome.into_owned(), diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 412269ace9cd..1dca5f2fc9e9 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -1,16 +1,14 @@ -use std::collections::HashMap; - +use crate::BlockExecutionOutput; use alloy_eips::eip7685::Requests; -use alloy_primitives::{Address, BlockNumber, Bloom, Log, B256, U256}; -use reth_primitives::{logs_bloom, Account, Bytecode, Receipts, StorageEntry}; -use reth_primitives_traits::{receipt::ReceiptExt, Receipt}; -use reth_trie::HashedPostState; +use alloy_primitives::{logs_bloom, Address, BlockNumber, Bloom, Log, B256, U256}; +use reth_primitives::Receipts; +use reth_primitives_traits::{receipt::ReceiptExt, Account, Bytecode, Receipt, StorageEntry}; +use reth_trie::{HashedPostState, KeyHasher}; use revm::{ db::{states::BundleState, BundleAccount}, primitives::AccountInfo, }; - -use crate::BlockExecutionOutput; +use std::collections::HashMap; /// Represents a changed account #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -166,8 +164,8 @@ impl ExecutionOutcome { /// Returns [`HashedPostState`] for this execution outcome. /// See [`HashedPostState::from_bundle_state`] for more info. - pub fn hash_state_slow(&self) -> HashedPostState { - HashedPostState::from_bundle_state(&self.bundle.state) + pub fn hash_state_slow(&self) -> HashedPostState { + HashedPostState::from_bundle_state::(&self.bundle.state) } /// Transform block number to the index of block. @@ -334,7 +332,7 @@ impl ExecutionOutcome { } } -impl ExecutionOutcome { +impl> ExecutionOutcome { /// Returns an iterator over all block logs. pub fn logs(&self, block_number: BlockNumber) -> Option> { let index = self.block_number_to_index(block_number)?; @@ -376,10 +374,12 @@ mod tests { use super::*; #[cfg(not(feature = "optimism"))] use alloy_primitives::bytes; + #[cfg(not(feature = "optimism"))] + use alloy_primitives::LogData; use alloy_primitives::{Address, B256}; use reth_primitives::Receipts; #[cfg(not(feature = "optimism"))] - use reth_primitives::{LogData, TxType}; + use reth_primitives::TxType; #[test] #[cfg(not(feature = "optimism"))] diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 85bc7e7f9a79..4faeb1a72030 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -8,9 +8,6 @@ use crate::{ }; use alloc::boxed::Box; use alloy_primitives::BlockNumber; -use reth_execution_errors::BlockExecutionError; -use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives::{BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; @@ -22,8 +19,10 @@ use revm::State; impl BlockExecutorProvider for Either where A: BlockExecutorProvider, - B: BlockExecutorProvider, + B: BlockExecutorProvider, { + type Primitives = A::Primitives; + type Executor + Display>> = Either, B::Executor>; @@ -53,23 +52,13 @@ where impl Executor for Either where - A: for<'a> Executor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = BlockExecutionOutput, - Error = BlockExecutionError, - >, - B: for<'a> Executor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = BlockExecutionOutput, - Error = BlockExecutionError, - >, + A: Executor, + B: for<'a> Executor = A::Input<'a>, Output = A::Output, Error = A::Error>, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; - type Error = BlockExecutionError; + type Input<'a> = A::Input<'a>; + type Output = A::Output; + type Error = A::Error; fn init(&mut self, tx_env_overrides: Box) { match self { @@ -116,23 +105,13 @@ where impl BatchExecutor for Either where - A: for<'a> BatchExecutor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = ExecutionOutcome, - Error = BlockExecutionError, - >, - B: for<'a> BatchExecutor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = ExecutionOutcome, - Error = BlockExecutionError, - >, + A: BatchExecutor, + B: for<'a> BatchExecutor = A::Input<'a>, Output = A::Output, Error = A::Error>, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; - type Error = BlockExecutionError; + type Input<'a> = A::Input<'a>; + type Output = A::Output; + type Error = A::Error; fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { match self { diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 42c756f4d93f..8c3e0108fcc3 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,26 +1,31 @@ //! Traits for execution. +use alloy_consensus::BlockHeader; // Re-export execution types pub use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, }; pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; +use reth_primitives_traits::Block as _; pub use reth_storage_errors::provider::ProviderError; use crate::{system_calls::OnStateHook, TxEnvOverrides}; use alloc::{boxed::Box, vec::Vec}; use alloy_eips::eip7685::Requests; -use alloy_primitives::BlockNumber; -use core::{fmt::Display, marker::PhantomData}; +use alloy_primitives::{ + map::{DefaultHashBuilder, HashMap}, + Address, BlockNumber, +}; +use core::fmt::Display; use reth_consensus::ConsensusError; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, NodePrimitives, Receipt}; use reth_prune_types::PruneModes; use reth_revm::batch::BlockBatchRecord; use revm::{ db::{states::bundle_state::BundleRetention, BundleState}, State, }; -use revm_primitives::{db::Database, U256}; +use revm_primitives::{db::Database, Account, AccountStatus, EvmState, U256}; /// A general purpose executor trait that executes an input (e.g. block) and produces an output /// (e.g. state changes and receipts). @@ -130,6 +135,9 @@ pub trait BatchExecutor { /// A type that can create a new executor for block execution. pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { + /// Receipt type. + type Primitives: NodePrimitives; + /// An executor that can execute a single block given a database. /// /// # Verification @@ -143,16 +151,22 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// the returned state. type Executor + Display>>: for<'a> Executor< DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = BlockExecutionOutput, + Input<'a> = BlockExecutionInput< + 'a, + BlockWithSenders<::Block>, + >, + Output = BlockExecutionOutput<::Receipt>, Error = BlockExecutionError, >; /// An executor that can execute a batch of blocks given a database. type BatchExecutor + Display>>: for<'a> BatchExecutor< DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = ExecutionOutcome, + Input<'a> = BlockExecutionInput< + 'a, + BlockWithSenders<::Block>, + >, + Output = ExecutionOutcome<::Receipt>, Error = BlockExecutionError, >; @@ -174,18 +188,21 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// Helper type for the output of executing a block. #[derive(Debug, Clone)] -pub struct ExecuteOutput { +pub struct ExecuteOutput { /// Receipts obtained after executing a block. - pub receipts: Vec, + pub receipts: Vec, /// Cumulative gas used in the block execution. pub gas_used: u64, } /// Defines the strategy for executing a single block. -pub trait BlockExecutionStrategy -where - DB: Database, -{ +pub trait BlockExecutionStrategy { + /// Database this strategy operates on. + type DB: Database; + + /// Primitive types used by the strategy. + type Primitives: NodePrimitives; + /// The error type returned by this strategy's methods. type Error: From + core::error::Error; @@ -195,30 +212,30 @@ where /// Applies any necessary changes before executing the block's transactions. fn apply_pre_execution_changes( &mut self, - block: &BlockWithSenders, + block: &BlockWithSenders<::Block>, total_difficulty: U256, ) -> Result<(), Self::Error>; /// Executes all transactions in the block. fn execute_transactions( &mut self, - block: &BlockWithSenders, + block: &BlockWithSenders<::Block>, total_difficulty: U256, - ) -> Result; + ) -> Result::Receipt>, Self::Error>; /// Applies any necessary changes after executing the block's transactions. fn apply_post_execution_changes( &mut self, - block: &BlockWithSenders, + block: &BlockWithSenders<::Block>, total_difficulty: U256, - receipts: &[Receipt], + receipts: &[::Receipt], ) -> Result; /// Returns a reference to the current state. - fn state_ref(&self) -> &State; + fn state_ref(&self) -> &State; /// Returns a mutable reference to the current state. - fn state_mut(&mut self) -> &mut State; + fn state_mut(&mut self) -> &mut State; /// Sets a hook to be called after each state change during execution. fn with_state_hook(&mut self, _hook: Option>) {} @@ -232,8 +249,8 @@ where /// Validate a block with regard to execution results. fn validate_block_post_execution( &self, - _block: &BlockWithSenders, - _receipts: &[Receipt], + _block: &BlockWithSenders<::Block>, + _receipts: &[::Receipt], _requests: &Requests, ) -> Result<(), ConsensusError> { Ok(()) @@ -242,9 +259,13 @@ where /// A strategy factory that can create block execution strategies. pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { + /// Primitive types used by the strategy. + type Primitives: NodePrimitives; + /// Associated strategy type. type Strategy + Display>>: BlockExecutionStrategy< - DB, + DB = DB, + Primitives = Self::Primitives, Error = BlockExecutionError, >; @@ -280,11 +301,13 @@ impl BlockExecutorProvider for BasicBlockExecutorProvider where F: BlockExecutionStrategyFactory, { + type Primitives = F::Primitives; + type Executor + Display>> = - BasicBlockExecutor, DB>; + BasicBlockExecutor>; type BatchExecutor + Display>> = - BasicBatchExecutor, DB>; + BasicBatchExecutor>; fn executor(&self, db: DB) -> Self::Executor where @@ -307,34 +330,26 @@ where /// A generic block executor that uses a [`BlockExecutionStrategy`] to /// execute blocks. #[allow(missing_debug_implementations, dead_code)] -pub struct BasicBlockExecutor -where - S: BlockExecutionStrategy, - DB: Database, -{ +pub struct BasicBlockExecutor { /// Block execution strategy. pub(crate) strategy: S, - _phantom: PhantomData, } -impl BasicBlockExecutor -where - S: BlockExecutionStrategy, - DB: Database, -{ +impl BasicBlockExecutor { /// Creates a new `BasicBlockExecutor` with the given strategy. pub const fn new(strategy: S) -> Self { - Self { strategy, _phantom: PhantomData } + Self { strategy } } } -impl Executor for BasicBlockExecutor +impl Executor for BasicBlockExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; + type Input<'a> = + BlockExecutionInput<'a, BlockWithSenders<::Block>>; + type Output = BlockExecutionOutput<::Receipt>; type Error = S::Error; fn init(&mut self, env_overrides: Box) { @@ -404,43 +419,44 @@ where /// A generic batch executor that uses a [`BlockExecutionStrategy`] to /// execute batches. #[allow(missing_debug_implementations)] -pub struct BasicBatchExecutor +pub struct BasicBatchExecutor where - S: BlockExecutionStrategy, - DB: Database, + S: BlockExecutionStrategy, { /// Batch execution strategy. pub(crate) strategy: S, /// Keeps track of batch execution receipts and requests. - pub(crate) batch_record: BlockBatchRecord, - _phantom: PhantomData, + pub(crate) batch_record: BlockBatchRecord<::Receipt>, } -impl BasicBatchExecutor +impl BasicBatchExecutor where - S: BlockExecutionStrategy, - DB: Database, + S: BlockExecutionStrategy, { /// Creates a new `BasicBatchExecutor` with the given strategy. - pub const fn new(strategy: S, batch_record: BlockBatchRecord) -> Self { - Self { strategy, batch_record, _phantom: PhantomData } + pub const fn new( + strategy: S, + batch_record: BlockBatchRecord<::Receipt>, + ) -> Self { + Self { strategy, batch_record } } } -impl BatchExecutor for BasicBatchExecutor +impl BatchExecutor for BasicBatchExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; + type Input<'a> = + BlockExecutionInput<'a, BlockWithSenders<::Block>>; + type Output = ExecutionOutcome<::Receipt>; type Error = BlockExecutionError; fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { let BlockExecutionInput { block, total_difficulty } = input; if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); + self.batch_record.set_first_block(block.header().number()); } self.strategy.apply_pre_execution_changes(block, total_difficulty)?; @@ -452,7 +468,7 @@ where self.strategy.validate_block_post_execution(block, &receipts, &requests)?; // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); + let retention = self.batch_record.bundle_retention(block.header().number()); self.strategy.state_mut().merge_transitions(retention); // store receipts in the set @@ -486,19 +502,58 @@ where } } +/// Creates an `EvmState` from a map of balance increments and the current state +/// to load accounts from. No balance increment is done in the function. +/// Zero balance increments are ignored and won't create state entries. +pub fn balance_increment_state( + balance_increments: &HashMap, + state: &mut State, +) -> Result +where + DB: Database, +{ + let mut load_account = |address: &Address| -> Result<(Address, Account), BlockExecutionError> { + let cache_account = state.load_cache_account(*address).map_err(|_| { + BlockExecutionError::msg("could not load account for balance increment") + })?; + + let account = cache_account.account.as_ref().ok_or_else(|| { + BlockExecutionError::msg("could not load account for balance increment") + })?; + + Ok(( + *address, + Account { + info: account.info.clone(), + storage: Default::default(), + status: AccountStatus::Touched, + }, + )) + }; + + balance_increments + .iter() + .filter(|(_, &balance)| balance != 0) + .map(|(addr, _)| load_account(addr)) + .collect::>() +} + #[cfg(test)] mod tests { use super::*; use alloy_primitives::U256; + use core::marker::PhantomData; use reth_chainspec::{ChainSpec, MAINNET}; + use reth_primitives::EthPrimitives; use revm::db::{CacheDB, EmptyDBTyped}; - use revm_primitives::{bytes, TxEnv}; + use revm_primitives::{address, bytes, AccountInfo, TxEnv, KECCAK_EMPTY}; use std::sync::Arc; #[derive(Clone, Default)] struct TestExecutorProvider; impl BlockExecutorProvider for TestExecutorProvider { + type Primitives = EthPrimitives; type Executor + Display>> = TestExecutor; type BatchExecutor + Display>> = TestExecutor; @@ -583,19 +638,20 @@ mod tests { _chain_spec: Arc, _evm_config: EvmConfig, state: State, - execute_transactions_result: ExecuteOutput, + execute_transactions_result: ExecuteOutput, apply_post_execution_changes_result: Requests, finish_result: BundleState, } #[derive(Clone)] struct TestExecutorStrategyFactory { - execute_transactions_result: ExecuteOutput, + execute_transactions_result: ExecuteOutput, apply_post_execution_changes_result: Requests, finish_result: BundleState, } impl BlockExecutionStrategyFactory for TestExecutorStrategyFactory { + type Primitives = EthPrimitives; type Strategy + Display>> = TestExecutorStrategy; @@ -622,10 +678,12 @@ mod tests { } } - impl BlockExecutionStrategy for TestExecutorStrategy + impl BlockExecutionStrategy for TestExecutorStrategy where DB: Database, { + type DB = DB; + type Primitives = EthPrimitives; type Error = BlockExecutionError; fn apply_pre_execution_changes( @@ -640,7 +698,7 @@ mod tests { &mut self, _block: &BlockWithSenders, _total_difficulty: U256, - ) -> Result { + ) -> Result, Self::Error> { Ok(self.execute_transactions_result.clone()) } @@ -692,8 +750,10 @@ mod tests { fn test_strategy() { let expected_gas_used = 10; let expected_receipts = vec![Receipt::default()]; - let expected_execute_transactions_result = - ExecuteOutput { receipts: expected_receipts.clone(), gas_used: expected_gas_used }; + let expected_execute_transactions_result = ExecuteOutput:: { + receipts: expected_receipts.clone(), + gas_used: expected_gas_used, + }; let expected_apply_post_execution_changes_result = Requests::new(vec![bytes!("deadbeef")]); let expected_finish_result = BundleState::default(); @@ -739,4 +799,90 @@ mod tests { let result = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO)); assert!(result.is_ok()); } + + fn setup_state_with_account( + addr: Address, + balance: u128, + nonce: u64, + ) -> State>> { + let db = CacheDB::>::default(); + let mut state = State::builder().with_database(db).with_bundle_update().build(); + + let account_info = AccountInfo { + balance: U256::from(balance), + nonce, + code_hash: KECCAK_EMPTY, + code: None, + }; + state.insert_account(addr, account_info); + state + } + + #[test] + fn test_balance_increment_state_zero() { + let addr = address!("1000000000000000000000000000000000000000"); + let mut state = setup_state_with_account(addr, 100, 1); + + let mut increments = HashMap::::default(); + increments.insert(addr, 0); + + let result = balance_increment_state(&increments, &mut state).unwrap(); + assert!(result.is_empty(), "Zero increments should be ignored"); + } + + #[test] + fn test_balance_increment_state_empty_increments_map() { + let mut state = State::builder() + .with_database(CacheDB::>::default()) + .with_bundle_update() + .build(); + + let increments = HashMap::::default(); + let result = balance_increment_state(&increments, &mut state).unwrap(); + assert!(result.is_empty(), "Empty increments map should return empty state"); + } + + #[test] + fn test_balance_increment_state_multiple_valid_increments() { + let addr1 = address!("1000000000000000000000000000000000000000"); + let addr2 = address!("2000000000000000000000000000000000000000"); + + let mut state = setup_state_with_account(addr1, 100, 1); + + let account2 = + AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + state.insert_account(addr2, account2); + + let mut increments = HashMap::::default(); + increments.insert(addr1, 50); + increments.insert(addr2, 100); + + let result = balance_increment_state(&increments, &mut state).unwrap(); + + assert_eq!(result.len(), 2); + assert_eq!(result.get(&addr1).unwrap().info.balance, U256::from(100)); + assert_eq!(result.get(&addr2).unwrap().info.balance, U256::from(200)); + } + + #[test] + fn test_balance_increment_state_mixed_zero_and_nonzero_increments() { + let addr1 = address!("1000000000000000000000000000000000000000"); + let addr2 = address!("2000000000000000000000000000000000000000"); + + let mut state = setup_state_with_account(addr1, 100, 1); + + let account2 = + AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + state.insert_account(addr2, account2); + + let mut increments = HashMap::::default(); + increments.insert(addr1, 0); + increments.insert(addr2, 100); + + let result = balance_increment_state(&increments, &mut state).unwrap(); + + assert_eq!(result.len(), 1, "Only non-zero increments should be included"); + assert!(!result.contains_key(&addr1), "Zero increment account should not be included"); + assert_eq!(result.get(&addr2).unwrap().info.balance, U256::from(200)); + } } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index ae884bdd5f86..29f6d7c65817 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -20,7 +20,6 @@ extern crate alloc; use crate::builder::RethEvmBuilder; use alloy_consensus::BlockHeader as _; use alloy_primitives::{Address, Bytes, B256, U256}; -use reth_primitives::TransactionSigned; use reth_primitives_traits::BlockHeader; use revm::{Database, Evm, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv}; @@ -116,18 +115,21 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { /// The header type used by the EVM. type Header: BlockHeader; + /// The transaction type. + type Transaction; + /// The error type that is returned by [`Self::next_cfg_and_block_env`]. type Error: core::error::Error + Send + Sync; - /// Returns a [`TxEnv`] from a [`TransactionSigned`] and [`Address`]. - fn tx_env(&self, transaction: &TransactionSigned, signer: Address) -> TxEnv { + /// Returns a [`TxEnv`] from a transaction and [`Address`]. + fn tx_env(&self, transaction: &Self::Transaction, signer: Address) -> TxEnv { let mut tx_env = TxEnv::default(); self.fill_tx_env(&mut tx_env, transaction, signer); tx_env } - /// Fill transaction environment from a [`TransactionSigned`] and the given sender address. - fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address); + /// Fill transaction environment from a transaction and the given sender address. + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &Self::Transaction, sender: Address); /// Fill transaction environment with a system contract call. fn fill_tx_env_system_contract_call( diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index 3464bb96f4c7..1f21cb4d3a41 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -3,11 +3,12 @@ //! Block processing related to syncing should take care to update the metrics by using either //! [`ExecutorMetrics::execute_metered`] or [`ExecutorMetrics::metered_one`]. use crate::{execute::Executor, system_calls::OnStateHook}; +use alloy_consensus::BlockHeader; use metrics::{Counter, Gauge, Histogram}; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput}; use reth_metrics::Metrics; use reth_primitives::BlockWithSenders; -use revm_primitives::ResultAndState; +use revm_primitives::EvmState; use std::time::Instant; /// Wrapper struct that combines metrics and state hook @@ -17,13 +18,11 @@ struct MeteredStateHook { } impl OnStateHook for MeteredStateHook { - fn on_state(&mut self, result_and_state: &ResultAndState) { + fn on_state(&mut self, state: &EvmState) { // Update the metrics for the number of accounts, storage slots and bytecodes loaded - let accounts = result_and_state.state.keys().len(); - let storage_slots = - result_and_state.state.values().map(|account| account.storage.len()).sum::(); - let bytecodes = result_and_state - .state + let accounts = state.keys().len(); + let storage_slots = state.values().map(|account| account.storage.len()).sum::(); + let bytecodes = state .values() .filter(|account| !account.info.is_empty_code_hash()) .collect::>() @@ -34,7 +33,7 @@ impl OnStateHook for MeteredStateHook { self.metrics.bytecodes_loaded_histogram.record(bytecodes as f64); // Call the original state hook - self.inner_hook.on_state(result_and_state); + self.inner_hook.on_state(state); } } @@ -69,9 +68,10 @@ pub struct ExecutorMetrics { } impl ExecutorMetrics { - fn metered(&self, block: &BlockWithSenders, f: F) -> R + fn metered(&self, block: &BlockWithSenders, f: F) -> R where F: FnOnce() -> R, + B: reth_primitives_traits::Block, { // Execute the block and record the elapsed time. let execute_start = Instant::now(); @@ -79,8 +79,8 @@ impl ExecutorMetrics { let execution_duration = execute_start.elapsed().as_secs_f64(); // Update gas metrics. - self.gas_processed_total.increment(block.gas_used); - self.gas_per_second.set(block.gas_used as f64 / execution_duration); + self.gas_processed_total.increment(block.header().gas_used()); + self.gas_per_second.set(block.header().gas_used() as f64 / execution_duration); self.execution_histogram.record(execution_duration); self.execution_duration.set(execution_duration); @@ -94,19 +94,20 @@ impl ExecutorMetrics { /// of accounts, storage slots and bytecodes loaded and updated. /// Execute the given block using the provided [`Executor`] and update metrics for the /// execution. - pub fn execute_metered<'a, E, DB, O, Error>( + pub fn execute_metered<'a, E, DB, O, Error, B>( &self, executor: E, - input: BlockExecutionInput<'a, BlockWithSenders>, + input: BlockExecutionInput<'a, BlockWithSenders>, state_hook: Box, ) -> Result, Error> where E: Executor< DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, Output = BlockExecutionOutput, Error = Error, >, + B: reth_primitives_traits::Block, { // clone here is cheap, all the metrics are Option>. additionally // they are gloally registered so that the data recorded in the hook will @@ -133,9 +134,14 @@ impl ExecutorMetrics { } /// Execute the given block and update metrics for the execution. - pub fn metered_one(&self, input: BlockExecutionInput<'_, BlockWithSenders>, f: F) -> R + pub fn metered_one( + &self, + input: BlockExecutionInput<'_, BlockWithSenders>, + f: F, + ) -> R where - F: FnOnce(BlockExecutionInput<'_, BlockWithSenders>) -> R, + F: FnOnce(BlockExecutionInput<'_, BlockWithSenders>) -> R, + B: reth_primitives_traits::Block, { self.metered(input.block, || f(input)) } @@ -148,14 +154,13 @@ mod tests { use metrics_util::debugging::{DebugValue, DebuggingRecorder, Snapshotter}; use revm::db::BundleState; use revm_primitives::{ - Account, AccountInfo, AccountStatus, Bytes, EvmState, EvmStorage, EvmStorageSlot, - ExecutionResult, Output, SuccessReason, B256, U256, + Account, AccountInfo, AccountStatus, EvmState, EvmStorage, EvmStorageSlot, B256, U256, }; use std::sync::mpsc; /// A mock executor that simulates state changes struct MockExecutor { - result_and_state: ResultAndState, + state: EvmState, } impl Executor<()> for MockExecutor { @@ -198,7 +203,7 @@ mod tests { F: OnStateHook + 'static, { // Call hook with our mock state - hook.on_state(&self.result_and_state); + hook.on_state(&self.state); Ok(BlockExecutionOutput { state: BundleState::default(), @@ -215,7 +220,7 @@ mod tests { } impl OnStateHook for ChannelStateHook { - fn on_state(&mut self, _result_and_state: &ResultAndState) { + fn on_state(&mut self, _state: &EvmState) { let _ = self.sender.send(self.output); } } @@ -241,35 +246,26 @@ mod tests { let expected_output = 42; let state_hook = Box::new(ChannelStateHook { sender: tx, output: expected_output }); - let result_and_state = ResultAndState { - result: ExecutionResult::Success { - reason: SuccessReason::Stop, - gas_used: 100, - output: Output::Call(Bytes::default()), - logs: vec![], - gas_refunded: 0, - }, - state: { - let mut state = EvmState::default(); - let storage = - EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); - state.insert( - Default::default(), - Account { - info: AccountInfo { - balance: U256::from(100), - nonce: 10, - code_hash: B256::random(), - code: Default::default(), - }, - storage, - status: AccountStatus::Loaded, + let state = { + let mut state = EvmState::default(); + let storage = + EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); + state.insert( + Default::default(), + Account { + info: AccountInfo { + balance: U256::from(100), + nonce: 10, + code_hash: B256::random(), + code: Default::default(), }, - ); - state - }, + storage, + status: AccountStatus::Loaded, + }, + ); + state }; - let executor = MockExecutor { result_and_state }; + let executor = MockExecutor { state }; let _result = metrics.execute_metered(executor, input, state_hook).unwrap(); let snapshot = snapshotter.snapshot().into_vec(); @@ -303,11 +299,9 @@ mod tests { let expected_output = 42; let state_hook = Box::new(ChannelStateHook { sender: tx, output: expected_output }); - let result_and_state = ResultAndState { - result: ExecutionResult::Revert { gas_used: 0, output: Default::default() }, - state: EvmState::default(), - }; - let executor = MockExecutor { result_and_state }; + let state = EvmState::default(); + + let executor = MockExecutor { state }; let _result = metrics.execute_metered(executor, input, state_hook).unwrap(); let actual_output = rx.try_recv().unwrap(); diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index 4fdc6d367a27..7b1063533da3 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -4,7 +4,7 @@ use alloy_primitives::BlockNumber; use core::fmt::Display; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -23,6 +23,8 @@ const UNAVAILABLE_FOR_NOOP: &str = "execution unavailable for noop"; pub struct NoopBlockExecutorProvider; impl BlockExecutorProvider for NoopBlockExecutorProvider { + type Primitives = EthPrimitives; + type Executor + Display>> = Self; type BatchExecutor + Display>> = Self; diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index 0d4f45c4d9d7..ec2f1803da01 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -12,7 +12,7 @@ use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; /// This type is mainly used to provide required data to configure the EVM environment that is /// usually stored on disk. #[auto_impl::auto_impl(&, Arc)] -pub trait EvmEnvProvider: Send + Sync { +pub trait EvmEnvProvider: Send + Sync { /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given /// [BlockHashOrNumber]. fn fill_env_at( @@ -23,17 +23,17 @@ pub trait EvmEnvProvider: Send + Sync { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
; + EvmConfig: ConfigureEvmEnv
; /// Fills the default [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the - /// given [Header]. + /// given block header. fn env_with_header( &self, - header: &Header, + header: &H, evm_config: EvmConfig, ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
, { let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); let mut block_env = BlockEnv::default(); @@ -42,16 +42,16 @@ pub trait EvmEnvProvider: Send + Sync { } /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given - /// [Header]. + /// block header. fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &H, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
; + EvmConfig: ConfigureEvmEnv
; /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given /// [BlockHashOrNumber]. @@ -62,15 +62,15 @@ pub trait EvmEnvProvider: Send + Sync { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
; + EvmConfig: ConfigureEvmEnv
; - /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given [Header]. + /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given block header. fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &H, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
; + EvmConfig: ConfigureEvmEnv
; } diff --git a/crates/evm/src/state_change.rs b/crates/evm/src/state_change.rs index 0e207fc2dbeb..5104c466399b 100644 --- a/crates/evm/src/state_change.rs +++ b/crates/evm/src/state_change.rs @@ -1,43 +1,55 @@ //! State changes that are not related to transactions. +use alloy_consensus::BlockHeader; use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{map::HashMap, Address, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc; -use reth_primitives::Block; +use reth_primitives_traits::BlockBody; /// Collect all balance changes at the end of the block. /// /// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular /// state changes (DAO fork). #[inline] -pub fn post_block_balance_increments( +pub fn post_block_balance_increments( chain_spec: &ChainSpec, block: &Block, total_difficulty: U256, -) -> HashMap { +) -> HashMap +where + ChainSpec: EthereumHardforks, + Block: reth_primitives_traits::Block, +{ let mut balance_increments = HashMap::default(); // Add block rewards if they are enabled. - if let Some(base_block_reward) = - calc::base_block_reward(chain_spec, block.number, block.difficulty, total_difficulty) - { + if let Some(base_block_reward) = calc::base_block_reward( + chain_spec, + block.header().number(), + block.header().difficulty(), + total_difficulty, + ) { // Ommer rewards - for ommer in &block.body.ommers { - *balance_increments.entry(ommer.beneficiary).or_default() += - calc::ommer_reward(base_block_reward, block.number, ommer.number); + if let Some(ommers) = block.body().ommers() { + for ommer in ommers { + *balance_increments.entry(ommer.beneficiary()).or_default() += + calc::ommer_reward(base_block_reward, block.header().number(), ommer.number()); + } } // Full block reward - *balance_increments.entry(block.beneficiary).or_default() += - calc::block_reward(base_block_reward, block.body.ommers.len()); + *balance_increments.entry(block.header().beneficiary()).or_default() += calc::block_reward( + base_block_reward, + block.body().ommers().map(|s| s.len()).unwrap_or(0), + ); } // process withdrawals insert_post_block_withdrawals_balance_increments( chain_spec, - block.timestamp, - block.body.withdrawals.as_ref().map(|w| w.as_slice()), + block.header().timestamp(), + block.body().withdrawals().as_ref().map(|w| w.as_slice()), &mut balance_increments, ); diff --git a/crates/evm/src/system_calls/eip2935.rs b/crates/evm/src/system_calls/eip2935.rs index 4848feb7281c..0cc2b83a3ca5 100644 --- a/crates/evm/src/system_calls/eip2935.rs +++ b/crates/evm/src/system_calls/eip2935.rs @@ -4,7 +4,6 @@ use alloc::{boxed::Box, string::ToString}; use alloy_eips::eip2935::HISTORY_STORAGE_ADDRESS; use crate::ConfigureEvm; -use alloy_consensus::Header; use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; @@ -35,7 +34,7 @@ pub(crate) fn transact_blockhashes_contract_call( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, { if !chain_spec.is_prague_active_at_timestamp(block_timestamp) { return Ok(None) diff --git a/crates/evm/src/system_calls/eip4788.rs b/crates/evm/src/system_calls/eip4788.rs index 2ad02c26eb90..bfd5797214e5 100644 --- a/crates/evm/src/system_calls/eip4788.rs +++ b/crates/evm/src/system_calls/eip4788.rs @@ -2,7 +2,6 @@ use alloc::{boxed::Box, string::ToString}; use crate::ConfigureEvm; -use alloy_consensus::Header; use alloy_eips::eip4788::BEACON_ROOTS_ADDRESS; use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; @@ -31,7 +30,7 @@ pub(crate) fn transact_beacon_root_contract_call( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, Spec: EthereumHardforks, { if !chain_spec.is_cancun_active_at_timestamp(block_timestamp) { diff --git a/crates/evm/src/system_calls/eip7002.rs b/crates/evm/src/system_calls/eip7002.rs index f20b7a54c089..d3c6d84903ed 100644 --- a/crates/evm/src/system_calls/eip7002.rs +++ b/crates/evm/src/system_calls/eip7002.rs @@ -1,7 +1,6 @@ //! [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) system call implementation. use crate::ConfigureEvm; use alloc::{boxed::Box, format}; -use alloy_consensus::Header; use alloy_eips::eip7002::WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS; use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; @@ -21,7 +20,7 @@ pub(crate) fn transact_withdrawal_requests_contract_call( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, { // get previous env let previous_env = Box::new(evm.context.env().clone()); diff --git a/crates/evm/src/system_calls/eip7251.rs b/crates/evm/src/system_calls/eip7251.rs index 112f724df764..28ae0160cdf6 100644 --- a/crates/evm/src/system_calls/eip7251.rs +++ b/crates/evm/src/system_calls/eip7251.rs @@ -1,7 +1,6 @@ //! [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) system call implementation. use crate::ConfigureEvm; use alloc::{boxed::Box, format}; -use alloy_consensus::Header; use alloy_eips::eip7251::CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS; use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; @@ -22,7 +21,7 @@ pub(crate) fn transact_consolidation_requests_contract_call( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, { // get previous env let previous_env = Box::new(evm.context.env().clone()); diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 47fd59d735fc..8af72094be47 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -1,16 +1,17 @@ //! System contract call functions. use crate::ConfigureEvm; -use alloc::{boxed::Box, sync::Arc, vec}; -use alloy_consensus::Header; -use alloy_eips::eip7685::Requests; +use alloc::{boxed::Box, sync::Arc}; +use alloy_consensus::BlockHeader; +use alloy_eips::{ + eip7002::WITHDRAWAL_REQUEST_TYPE, eip7251::CONSOLIDATION_REQUEST_TYPE, eip7685::Requests, +}; use alloy_primitives::Bytes; use core::fmt::Display; use reth_chainspec::EthereumHardforks; use reth_execution_errors::BlockExecutionError; -use reth_primitives::Block; use revm::{Database, DatabaseCommit, Evm}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, B256}; mod eip2935; mod eip4788; @@ -19,15 +20,15 @@ mod eip7251; /// A hook that is called after each state change. pub trait OnStateHook { - /// Invoked with the result and state after each system call. - fn on_state(&mut self, state: &ResultAndState); + /// Invoked with the state after each system call. + fn on_state(&mut self, state: &EvmState); } impl OnStateHook for F where - F: FnMut(&ResultAndState), + F: FnMut(&EvmState), { - fn on_state(&mut self, state: &ResultAndState) { + fn on_state(&mut self, state: &EvmState) { self(state) } } @@ -38,7 +39,7 @@ where pub struct NoopHook; impl OnStateHook for NoopHook { - fn on_state(&mut self, _state: &ResultAndState) {} + fn on_state(&mut self, _state: &EvmState) {} } /// An ephemeral helper type for executing system calls. @@ -89,11 +90,11 @@ where impl SystemCaller where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, Chainspec: EthereumHardforks, { /// Apply pre execution changes. - pub fn apply_pre_execution_changes( + pub fn apply_pre_execution_changes( &mut self, block: &Block, evm: &mut Evm<'_, Ext, DB>, @@ -101,17 +102,18 @@ where where DB: Database + DatabaseCommit, DB::Error: Display, + Block: reth_primitives_traits::Block
, { self.apply_blockhashes_contract_call( - block.timestamp, - block.number, - block.parent_hash, + block.header().timestamp(), + block.header().number(), + block.header().parent_hash(), evm, )?; self.apply_beacon_root_contract_call( - block.timestamp, - block.number, - block.parent_beacon_block_root, + block.header().timestamp(), + block.header().number(), + block.header().parent_beacon_block_root(), evm, )?; @@ -127,13 +129,27 @@ where DB: Database + DatabaseCommit, DB::Error: Display, { - // todo + let mut requests = Requests::default(); + // Collect all EIP-7685 requests let withdrawal_requests = self.apply_withdrawal_requests_contract_call(evm)?; + if !withdrawal_requests.is_empty() { + requests.push_request( + core::iter::once(WITHDRAWAL_REQUEST_TYPE).chain(withdrawal_requests).collect(), + ); + } // Collect all EIP-7251 requests let consolidation_requests = self.apply_consolidation_requests_contract_call(evm)?; - Ok(Requests::new(vec![withdrawal_requests, consolidation_requests])) + if !consolidation_requests.is_empty() { + requests.push_request( + core::iter::once(CONSOLIDATION_REQUEST_TYPE) + .chain(consolidation_requests) + .collect(), + ); + } + + Ok(requests) } /// Applies the pre-block call to the EIP-2935 blockhashes contract. @@ -182,7 +198,7 @@ where if let Some(res) = result_and_state { if let Some(ref mut hook) = self.hook { - hook.on_state(&res); + hook.on_state(&res.state); } evm.context.evm.db.commit(res.state); } @@ -237,7 +253,7 @@ where if let Some(res) = result_and_state { if let Some(ref mut hook) = self.hook { - hook.on_state(&res); + hook.on_state(&res.state); } evm.context.evm.db.commit(res.state); } @@ -276,7 +292,7 @@ where eip7002::transact_withdrawal_requests_contract_call(&self.evm_config.clone(), evm)?; if let Some(ref mut hook) = self.hook { - hook.on_state(&result_and_state); + hook.on_state(&result_and_state.state); } evm.context.evm.db.commit(result_and_state.state); @@ -314,7 +330,7 @@ where eip7251::transact_consolidation_requests_contract_call(&self.evm_config.clone(), evm)?; if let Some(ref mut hook) = self.hook { - hook.on_state(&result_and_state); + hook.on_state(&result_and_state.state); } evm.context.evm.db.commit(result_and_state.state); @@ -322,7 +338,7 @@ where } /// Delegate to stored `OnStateHook`, noop if hook is `None`. - pub fn on_state(&mut self, state: &ResultAndState) { + pub fn on_state(&mut self, state: &EvmState) { if let Some(ref mut hook) = &mut self.hook { hook.on_state(state); } diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index a4dc906494ce..22ba4a316e2e 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -12,7 +12,7 @@ use alloy_primitives::BlockNumber; use parking_lot::Mutex; use reth_execution_errors::BlockExecutionError; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockWithSenders, Receipt, Receipts}; +use reth_primitives::{BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, Receipts}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -33,6 +33,8 @@ impl MockExecutorProvider { } impl BlockExecutorProvider for MockExecutorProvider { + type Primitives = EthPrimitives; + type Executor + Display>> = Self; type BatchExecutor + Display>> = Self; @@ -116,15 +118,14 @@ impl BatchExecutor for MockExecutorProvider { } } -impl BasicBlockExecutor +impl BasicBlockExecutor where - S: BlockExecutionStrategy, - DB: Database, + S: BlockExecutionStrategy, { /// Provides safe read access to the state pub fn with_state(&self, f: F) -> R where - F: FnOnce(&State) -> R, + F: FnOnce(&State) -> R, { f(self.strategy.state_ref()) } @@ -132,21 +133,20 @@ where /// Provides safe write access to the state pub fn with_state_mut(&mut self, f: F) -> R where - F: FnOnce(&mut State) -> R, + F: FnOnce(&mut State) -> R, { f(self.strategy.state_mut()) } } -impl BasicBatchExecutor +impl BasicBatchExecutor where - S: BlockExecutionStrategy, - DB: Database, + S: BlockExecutionStrategy, { /// Provides safe read access to the state pub fn with_state(&self, f: F) -> R where - F: FnOnce(&State) -> R, + F: FnOnce(&State) -> R, { f(self.strategy.state_ref()) } @@ -154,13 +154,13 @@ where /// Provides safe write access to the state pub fn with_state_mut(&mut self, f: F) -> R where - F: FnOnce(&mut State) -> R, + F: FnOnce(&mut State) -> R, { f(self.strategy.state_mut()) } /// Accessor for batch executor receipts. - pub const fn receipts(&self) -> &Receipts { + pub const fn receipts(&self) -> &Receipts<::Receipt> { self.batch_record.receipts() } } diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 7e670620472c..f93c5efa7212 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -9,7 +9,7 @@ use alloy_primitives::BlockNumber; use reth_evm::execute::{ BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, }; -use reth_node_api::{Block as _, BlockBody as _}; +use reth_node_api::{Block as _, BlockBody as _, NodePrimitives}; use reth_primitives::{BlockExt, BlockWithSenders, Receipt}; use reth_primitives_traits::{format_gas_throughput, SignedTransaction}; use reth_provider::{ @@ -38,12 +38,10 @@ pub struct BackfillJob { impl Iterator for BackfillJob where - E: BlockExecutorProvider, - P: HeaderProvider - + BlockReader - + StateProviderFactory, + E: BlockExecutorProvider>, + P: HeaderProvider + BlockReader + StateProviderFactory, { - type Item = BackfillJobResult; + type Item = BackfillJobResult>; fn next(&mut self) -> Option { if self.range.is_empty() { @@ -56,10 +54,8 @@ where impl BackfillJob where - E: BlockExecutorProvider, - P: BlockReader - + HeaderProvider - + StateProviderFactory, + E: BlockExecutorProvider>, + P: BlockReader + HeaderProvider + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. pub fn into_single_blocks(self) -> SingleBlockBackfillJob { @@ -67,11 +63,11 @@ where } /// Converts the backfill job into a stream. - pub fn into_stream(self) -> StreamBackfillJob { + pub fn into_stream(self) -> StreamBackfillJob> { self.into() } - fn execute_range(&mut self) -> BackfillJobResult { + fn execute_range(&mut self) -> BackfillJobResult> { debug!( target: "exex::backfill", range = ?self.range, @@ -169,10 +165,13 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where - E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + E: BlockExecutorProvider>, + P: HeaderProvider + BlockReader + StateProviderFactory, { - type Item = BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)>; + type Item = BackfillJobResult<( + BlockWithSenders, + BlockExecutionOutput<::Receipt>, + )>; fn next(&mut self) -> Option { self.range.next().map(|block_number| self.execute_block(block_number)) @@ -181,8 +180,8 @@ where impl SingleBlockBackfillJob where - E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + E: BlockExecutorProvider>, + P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. pub fn into_stream( @@ -191,10 +190,14 @@ where self.into() } + #[expect(clippy::type_complexity)] pub(crate) fn execute_block( &self, block_number: u64, - ) -> BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)> { + ) -> BackfillJobResult<( + BlockWithSenders, + BlockExecutionOutput<::Receipt>, + )> { let td = self .provider .header_td_by_number(block_number)? diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index 46177ceda122..95da076c7c87 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -11,7 +11,8 @@ use futures::{ StreamExt, }; use reth_evm::execute::{BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider}; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_node_api::NodePrimitives; +use reth_primitives::{BlockWithSenders, EthPrimitives}; use reth_provider::{BlockReader, Chain, HeaderProvider, StateProviderFactory}; use reth_prune_types::PruneModes; use reth_stages_api::ExecutionStageThresholds; @@ -38,8 +39,11 @@ struct BackfillTaskOutput { /// Ordered queue of [`JoinHandle`]s that yield [`BackfillTaskOutput`]s. type BackfillTasks = FuturesOrdered>>; -type SingleBlockStreamItem = (BlockWithSenders, BlockExecutionOutput); -type BatchBlockStreamItem = Chain; +type SingleBlockStreamItem = ( + BlockWithSenders<::Block>, + BlockExecutionOutput<::Receipt>, +); +type BatchBlockStreamItem = Chain; /// Stream for processing backfill jobs asynchronously. /// @@ -100,18 +104,12 @@ where } } -impl Stream for StreamBackfillJob +impl Stream for StreamBackfillJob> where - E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider - + BlockReader - + StateProviderFactory - + Clone - + Send - + Unpin - + 'static, + E: BlockExecutorProvider> + Clone + Send + 'static, + P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, { - type Item = BackfillJobResult; + type Item = BackfillJobResult>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -139,18 +137,12 @@ where } } -impl Stream for StreamBackfillJob +impl Stream for StreamBackfillJob> where - E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider - + BlockReader - + StateProviderFactory - + Clone - + Send - + Unpin - + 'static, + E: BlockExecutorProvider> + Clone + Send + 'static, + P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, { - type Item = BackfillJobResult; + type Item = BackfillJobResult>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -200,7 +192,10 @@ impl From> for StreamBackfillJob From> for StreamBackfillJob { +impl From> for StreamBackfillJob> +where + E: BlockExecutorProvider, +{ fn from(job: BackfillJob) -> Self { let batch_size = job.thresholds.max_blocks.map_or(DEFAULT_BATCH_SIZE, |max| max as usize); Self { diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 3d303c9bbac0..f536ed515f9e 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -1,6 +1,6 @@ use crate::{ExExContextDyn, ExExEvent, ExExNotifications, ExExNotificationsStream}; use reth_exex_types::ExExHead; -use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_node_api::{FullNodeComponents, NodePrimitives, NodeTypes}; use reth_node_core::node_config::NodeConfig; use reth_primitives::Head; use reth_provider::BlockReader; @@ -57,11 +57,12 @@ where impl ExExContext where Node: FullNodeComponents, - Node::Provider: Debug + BlockReader, + Node::Provider: Debug + BlockReader, Node::Executor: Debug, + Node::Types: NodeTypes, { /// Returns dynamic version of the context - pub fn into_dyn(self) -> ExExContextDyn { + pub fn into_dyn(self) -> ExExContextDyn<::Primitives> { ExExContextDyn::from(self) } } @@ -69,6 +70,7 @@ where impl ExExContext where Node: FullNodeComponents, + Node::Types: NodeTypes, { /// Returns the transaction pool of the node. pub fn pool(&self) -> &Node::Pool { @@ -107,19 +109,13 @@ where /// Sets notifications stream to [`crate::ExExNotificationsWithoutHead`], a stream of /// notifications without a head. - pub fn set_notifications_without_head(&mut self) - where - Node::Provider: BlockReader, - { + pub fn set_notifications_without_head(&mut self) { self.notifications.set_without_head(); } /// Sets notifications stream to [`crate::ExExNotificationsWithHead`], a stream of notifications /// with the provided head. - pub fn set_notifications_with_head(&mut self, head: ExExHead) - where - Node::Provider: BlockReader, - { + pub fn set_notifications_with_head(&mut self, head: ExExHead) { self.notifications.set_with_head(head); } } @@ -142,7 +138,7 @@ mod tests { impl ExEx where - Node::Provider: BlockReader, + Node::Provider: BlockReader, { async fn _test_bounds(mut self) -> eyre::Result<()> { self.ctx.pool(); diff --git a/crates/exex/exex/src/dyn_context.rs b/crates/exex/exex/src/dyn_context.rs index 3ce0f488f40c..8bda75cac45d 100644 --- a/crates/exex/exex/src/dyn_context.rs +++ b/crates/exex/exex/src/dyn_context.rs @@ -4,8 +4,9 @@ use std::fmt::Debug; use reth_chainspec::{EthChainSpec, Head}; -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, HeaderTy, NodePrimitives, NodeTypes}; use reth_node_core::node_config::NodeConfig; +use reth_primitives::EthPrimitives; use reth_provider::BlockReader; use tokio::sync::mpsc; @@ -13,11 +14,11 @@ use crate::{ExExContext, ExExEvent, ExExNotificationsStream}; // TODO(0xurb) - add `node` after abstractions /// Captures the context that an `ExEx` has access to. -pub struct ExExContextDyn { +pub struct ExExContextDyn { /// The current head of the blockchain at launch. pub head: Head, /// The config of the node - pub config: NodeConfig>, + pub config: NodeConfig + 'static>>, /// The loaded node config pub reth_config: reth_config::Config, /// Channel used to send [`ExExEvent`]s to the rest of the node. @@ -34,10 +35,10 @@ pub struct ExExContextDyn { /// /// Once an [`ExExNotification`](crate::ExExNotification) is sent over the channel, it is /// considered delivered by the node. - pub notifications: Box, + pub notifications: Box>, } -impl Debug for ExExContextDyn { +impl Debug for ExExContextDyn { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ExExContext") .field("head", &self.head) @@ -49,16 +50,17 @@ impl Debug for ExExContextDyn { } } -impl From> for ExExContextDyn +impl From> for ExExContextDyn<::Primitives> where - Node: FullNodeComponents, - Node::Provider: Debug + BlockReader, + Node: FullNodeComponents>, + Node::Provider: Debug + BlockReader, Node::Executor: Debug, { fn from(ctx: ExExContext) -> Self { - let config = - ctx.config.map_chainspec(|chainspec| Box::new(chainspec) as Box); - let notifications = Box::new(ctx.notifications) as Box; + let config = ctx.config.map_chainspec(|chainspec| { + Box::new(chainspec) as Box>> + }); + let notifications = Box::new(ctx.notifications) as Box<_>; Self { head: ctx.head, diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index ea5ddf2e8c62..16a930526141 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1,14 +1,17 @@ use crate::{ wal::Wal, ExExEvent, ExExNotification, ExExNotifications, FinishedExExHeight, WalHandle, }; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use futures::StreamExt; use itertools::Itertools; use metrics::Gauge; use reth_chain_state::ForkChoiceStream; use reth_chainspec::Head; +use reth_evm::execute::BlockExecutorProvider; use reth_metrics::{metrics::Counter, Metrics}; -use reth_primitives::SealedHeader; +use reth_node_api::NodePrimitives; +use reth_primitives::{EthPrimitives, SealedHeader}; use reth_provider::HeaderProvider; use reth_tracing::tracing::{debug, warn}; use std::{ @@ -69,13 +72,13 @@ struct ExExMetrics { /// [`ExExHandle::new`] should be given to the `ExEx`, while the handle itself should be given to /// the manager in [`ExExManager::new`]. #[derive(Debug)] -pub struct ExExHandle { +pub struct ExExHandle { /// The execution extension's ID. id: String, /// Metrics for an `ExEx`. metrics: ExExMetrics, /// Channel to send [`ExExNotification`]s to the `ExEx`. - sender: PollSender, + sender: PollSender>, /// Channel to receive [`ExExEvent`]s from the `ExEx`. receiver: UnboundedReceiver, /// The ID of the next notification to send to this `ExEx`. @@ -86,17 +89,17 @@ pub struct ExExHandle { finished_height: Option, } -impl ExExHandle { +impl ExExHandle { /// Create a new handle for the given `ExEx`. /// /// Returns the handle, as well as a [`UnboundedSender`] for [`ExExEvent`]s and a /// [`mpsc::Receiver`] for [`ExExNotification`]s that should be given to the `ExEx`. - pub fn new( + pub fn new>( id: String, node_head: Head, provider: P, executor: E, - wal_handle: WalHandle, + wal_handle: WalHandle, ) -> (Self, UnboundedSender, ExExNotifications) { let (notification_tx, notification_rx) = mpsc::channel(1); let (event_tx, event_rx) = mpsc::unbounded_channel(); @@ -124,21 +127,21 @@ impl ExExHandle { fn send( &mut self, cx: &mut Context<'_>, - (notification_id, notification): &(usize, ExExNotification), - ) -> Poll>> { + (notification_id, notification): &(usize, ExExNotification), + ) -> Poll>>> { if let Some(finished_height) = self.finished_height { match notification { ExExNotification::ChainCommitted { new } => { // Skip the chain commit notification if the finished height of the ExEx is // higher than or equal to the tip of the new notification. // I.e., the ExEx has already processed the notification. - if finished_height.number >= new.tip().number { + if finished_height.number >= new.tip().number() { debug!( target: "exex::manager", exex_id = %self.id, %notification_id, ?finished_height, - new_tip = %new.tip().number, + new_tip = %new.tip().number(), "Skipping notification" ); @@ -208,15 +211,15 @@ pub struct ExExManagerMetrics { /// - Error handling /// - Monitoring #[derive(Debug)] -pub struct ExExManager

{ +pub struct ExExManager { /// Provider for querying headers. provider: P, /// Handles to communicate with the `ExEx`'s. - exex_handles: Vec, + exex_handles: Vec>, /// [`ExExNotification`] channel from the [`ExExManagerHandle`]s. - handle_rx: UnboundedReceiver<(ExExNotificationSource, ExExNotification)>, + handle_rx: UnboundedReceiver<(ExExNotificationSource, ExExNotification)>, /// The minimum notification ID currently present in the buffer. min_id: usize, @@ -226,7 +229,7 @@ pub struct ExExManager

{ /// /// The first element of the tuple is a monotonically increasing ID unique to the notification /// (the second element of the tuple). - buffer: VecDeque<(usize, ExExNotification)>, + buffer: VecDeque<(usize, ExExNotification)>, /// Max size of the internal state notifications buffer. max_capacity: usize, /// Current state notifications buffer capacity. @@ -241,17 +244,20 @@ pub struct ExExManager

{ finished_height: watch::Sender, /// Write-Ahead Log for the [`ExExNotification`]s. - wal: Wal, + wal: Wal, /// A stream of finalized headers. - finalized_header_stream: ForkChoiceStream, + finalized_header_stream: ForkChoiceStream>, /// A handle to the `ExEx` manager. - handle: ExExManagerHandle, + handle: ExExManagerHandle, /// Metrics for the `ExEx` manager. metrics: ExExManagerMetrics, } -impl

ExExManager

{ +impl ExExManager +where + N: NodePrimitives, +{ /// Create a new [`ExExManager`]. /// /// You must provide an [`ExExHandle`] for each `ExEx` and the maximum capacity of the @@ -261,10 +267,10 @@ impl

ExExManager

{ /// notifications over [`ExExManagerHandle`]s until there is capacity again. pub fn new( provider: P, - handles: Vec, + handles: Vec>, max_capacity: usize, - wal: Wal, - finalized_header_stream: ForkChoiceStream, + wal: Wal, + finalized_header_stream: ForkChoiceStream>, ) -> Self { let num_exexs = handles.len(); @@ -314,7 +320,7 @@ impl

ExExManager

{ } /// Returns the handle to the manager. - pub fn handle(&self) -> ExExManagerHandle { + pub fn handle(&self) -> ExExManagerHandle { self.handle.clone() } @@ -333,22 +339,23 @@ impl

ExExManager

{ /// Pushes a new notification into the managers internal buffer, assigning the notification a /// unique ID. - fn push_notification(&mut self, notification: ExExNotification) { + fn push_notification(&mut self, notification: ExExNotification) { let next_id = self.next_id; self.buffer.push_back((next_id, notification)); self.next_id += 1; } } -impl

ExExManager

+impl ExExManager where P: HeaderProvider, + N: NodePrimitives, { /// Finalizes the WAL according to the passed finalized header. /// /// This function checks if all ExExes are on the canonical chain and finalizes the WAL if /// necessary. - fn finalize_wal(&self, finalized_header: SealedHeader) -> eyre::Result<()> { + fn finalize_wal(&self, finalized_header: SealedHeader) -> eyre::Result<()> { debug!(target: "exex::manager", header = ?finalized_header.num_hash(), "Received finalized header"); // Check if all ExExes are on the canonical chain @@ -413,9 +420,10 @@ where } } -impl

Future for ExExManager

+impl Future for ExExManager where P: HeaderProvider + Unpin + 'static, + N: NodePrimitives, { type Output = eyre::Result<()>; @@ -456,8 +464,9 @@ where // Drain handle notifications while this.buffer.len() < this.max_capacity { if let Poll::Ready(Some((source, notification))) = this.handle_rx.poll_recv(cx) { - let committed_tip = notification.committed_chain().map(|chain| chain.tip().number); - let reverted_tip = notification.reverted_chain().map(|chain| chain.tip().number); + let committed_tip = + notification.committed_chain().map(|chain| chain.tip().number()); + let reverted_tip = notification.reverted_chain().map(|chain| chain.tip().number()); debug!(target: "exex::manager", ?committed_tip, ?reverted_tip, "Received new notification"); // Commit to WAL only notifications from blockchain tree. Pipeline notifications @@ -524,9 +533,9 @@ where /// A handle to communicate with the [`ExExManager`]. #[derive(Debug)] -pub struct ExExManagerHandle { +pub struct ExExManagerHandle { /// Channel to send notifications to the `ExEx` manager. - exex_tx: UnboundedSender<(ExExNotificationSource, ExExNotification)>, + exex_tx: UnboundedSender<(ExExNotificationSource, ExExNotification)>, /// The number of `ExEx`'s running on the node. num_exexs: usize, /// A watch channel denoting whether the manager is ready for new notifications or not. @@ -544,7 +553,7 @@ pub struct ExExManagerHandle { finished_height: watch::Receiver, } -impl ExExManagerHandle { +impl ExExManagerHandle { /// Creates an empty manager handle. /// /// Use this if there is no manager present. @@ -571,8 +580,8 @@ impl ExExManagerHandle { pub fn send( &self, source: ExExNotificationSource, - notification: ExExNotification, - ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { + notification: ExExNotification, + ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { self.exex_tx.send((source, notification)) } @@ -583,8 +592,8 @@ impl ExExManagerHandle { pub async fn send_async( &mut self, source: ExExNotificationSource, - notification: ExExNotification, - ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { + notification: ExExNotification, + ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { self.ready().await; self.exex_tx.send((source, notification)) } @@ -633,7 +642,7 @@ async fn make_wait_future(mut rx: watch::Receiver) -> watch::Receiver Clone for ExExManagerHandle { fn clone(&self) -> Self { Self { exex_tx: self.exex_tx.clone(), @@ -653,6 +662,7 @@ mod tests { use futures::{StreamExt, TryStreamExt}; use rand::Rng; use reth_db_common::init::init_genesis; + use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::SealedBlockWithSenders; use reth_provider::{ @@ -673,8 +683,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (mut exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (mut exex_handle, event_tx, mut _notification_rx) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Send an event and check that it's delivered correctly let event = ExExEvent::FinishedHeight(BlockNumHash::new(42, B256::random())); @@ -688,8 +703,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle_1, _, _) = ExExHandle::new( + "test_exex_1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); assert!(!ExExManager::new((), vec![], 0, wal.clone(), empty_finalized_header_stream()) .handle @@ -705,8 +725,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle_1, _, _) = ExExHandle::new( + "test_exex_1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); assert!(!ExExManager::new((), vec![], 0, wal.clone(), empty_finalized_header_stream()) .handle @@ -728,8 +753,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle, _, _) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle, _, _) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Create a mock ExExManager and add the exex_handle to it let mut exex_manager = @@ -778,8 +808,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle, _, _) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle, _, _) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Create a mock ExExManager and add the exex_handle to it let max_capacity = 5; @@ -824,8 +859,13 @@ mod tests { let provider_factory = create_test_provider_factory(); - let (exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle, event_tx, mut _notification_rx) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Check initial block height assert!(exex_handle.finished_height.is_none()); @@ -874,10 +914,20 @@ mod tests { let provider_factory = create_test_provider_factory(); // Create two `ExExHandle` instances - let (exex_handle1, event_tx1, _) = - ExExHandle::new("test_exex1".to_string(), Head::default(), (), (), wal.handle()); - let (exex_handle2, event_tx2, _) = - ExExHandle::new("test_exex2".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle1, event_tx1, _) = ExExHandle::new( + "test_exex1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); + let (exex_handle2, event_tx2, _) = ExExHandle::new( + "test_exex2".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); let block1 = BlockNumHash::new(42, B256::random()); let block2 = BlockNumHash::new(10, B256::random()); @@ -921,10 +971,20 @@ mod tests { let provider_factory = create_test_provider_factory(); // Create two `ExExHandle` instances - let (exex_handle1, event_tx1, _) = - ExExHandle::new("test_exex1".to_string(), Head::default(), (), (), wal.handle()); - let (exex_handle2, event_tx2, _) = - ExExHandle::new("test_exex2".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle1, event_tx1, _) = ExExHandle::new( + "test_exex1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); + let (exex_handle2, event_tx2, _) = ExExHandle::new( + "test_exex2".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Assert that the initial block height is `None` for the first `ExExHandle`. assert!(exex_handle1.finished_height.is_none()); @@ -974,8 +1034,13 @@ mod tests { let provider_factory = create_test_provider_factory(); - let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle_1, _, _) = ExExHandle::new( + "test_exex_1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Create an ExExManager with a small max capacity let max_capacity = 2; diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 954a057fc09c..05892e2f90d5 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -1,8 +1,11 @@ use crate::{BackfillJobFactory, ExExNotification, StreamBackfillJob, WalHandle}; +use alloy_consensus::BlockHeader; use futures::{Stream, StreamExt}; use reth_chainspec::Head; use reth_evm::execute::BlockExecutorProvider; use reth_exex_types::ExExHead; +use reth_node_api::NodePrimitives; +use reth_primitives::EthPrimitives; use reth_provider::{BlockReader, Chain, HeaderProvider, StateProviderFactory}; use reth_tracing::tracing::debug; use std::{ @@ -17,14 +20,19 @@ use tokio::sync::mpsc::Receiver; /// stream is configured with a head via [`ExExNotifications::set_with_head`] or /// [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head. #[derive(Debug)] -pub struct ExExNotifications { +pub struct ExExNotifications +where + E: BlockExecutorProvider, +{ inner: ExExNotificationsInner, } /// A trait, that represents a stream of [`ExExNotification`]s. The stream will emit notifications /// for all blocks. If the stream is configured with a head via [`ExExNotifications::set_with_head`] /// or [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head. -pub trait ExExNotificationsStream: Stream> + Unpin { +pub trait ExExNotificationsStream: + Stream>> + Unpin +{ /// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s without a head. /// /// It's a no-op if the stream has already been configured without a head. @@ -56,7 +64,10 @@ pub trait ExExNotificationsStream: Stream> } #[derive(Debug)] -enum ExExNotificationsInner { +enum ExExNotificationsInner +where + E: BlockExecutorProvider, +{ /// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. WithoutHead(ExExNotificationsWithoutHead), /// A stream of [`ExExNotification`]s. The stream will only emit notifications for blocks that @@ -67,14 +78,17 @@ enum ExExNotificationsInner { Invalid, } -impl ExExNotifications { +impl ExExNotifications +where + E: BlockExecutorProvider, +{ /// Creates a new stream of [`ExExNotifications`] without a head. pub const fn new( node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, ) -> Self { Self { inner: ExExNotificationsInner::WithoutHead(ExExNotificationsWithoutHead::new( @@ -88,15 +102,13 @@ impl ExExNotifications { } } -impl ExExNotificationsStream for ExExNotifications +impl ExExNotificationsStream for ExExNotifications where - P: BlockReader - + HeaderProvider - + StateProviderFactory + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider> + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, { fn set_without_head(&mut self) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); @@ -144,15 +156,13 @@ where impl Stream for ExExNotifications where - P: BlockReader - + HeaderProvider - + StateProviderFactory + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider> + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, { - type Item = eyre::Result; + type Item = eyre::Result>; fn poll_next( self: std::pin::Pin<&mut Self>, @@ -169,15 +179,21 @@ where } /// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. -pub struct ExExNotificationsWithoutHead { +pub struct ExExNotificationsWithoutHead +where + E: BlockExecutorProvider, +{ node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, } -impl Debug for ExExNotificationsWithoutHead { +impl Debug for ExExNotificationsWithoutHead +where + E: Debug + BlockExecutorProvider, +{ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ExExNotifications") .field("provider", &self.provider) @@ -187,14 +203,17 @@ impl Debug for ExExNotificationsWithoutHead { } } -impl ExExNotificationsWithoutHead { +impl ExExNotificationsWithoutHead +where + E: BlockExecutorProvider, +{ /// Creates a new instance of [`ExExNotificationsWithoutHead`]. const fn new( node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, ) -> Self { Self { node_head, provider, executor, notifications, wal_handle } } @@ -212,8 +231,11 @@ impl ExExNotificationsWithoutHead { } } -impl Stream for ExExNotificationsWithoutHead { - type Item = ExExNotification; +impl Stream for ExExNotificationsWithoutHead +where + E: Unpin + BlockExecutorProvider, +{ + type Item = ExExNotification; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().notifications.poll_recv(cx) @@ -229,12 +251,15 @@ impl Stream for ExExNotificationsWithoutHead { /// `exex_head.number` of 10 indicates that the ExEx has processed up to block 10, and is ready to /// process block 11. #[derive(Debug)] -pub struct ExExNotificationsWithHead { +pub struct ExExNotificationsWithHead +where + E: BlockExecutorProvider, +{ node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, exex_head: ExExHead, /// If true, then we need to check if the ExEx head is on the canonical chain and if not, /// revert its head. @@ -243,17 +268,20 @@ pub struct ExExNotificationsWithHead { /// the missing blocks. pending_check_backfill: bool, /// The backfill job to run before consuming any notifications. - backfill_job: Option>, + backfill_job: Option>>, } -impl ExExNotificationsWithHead { +impl ExExNotificationsWithHead +where + E: BlockExecutorProvider, +{ /// Creates a new [`ExExNotificationsWithHead`]. const fn new( node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, exex_head: ExExHead, ) -> Self { Self { @@ -272,20 +300,18 @@ impl ExExNotificationsWithHead { impl ExExNotificationsWithHead where - P: BlockReader - + HeaderProvider - + StateProviderFactory + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider> + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, { /// Checks if the ExEx head is on the canonical chain. /// /// If the head block is not found in the database or it's ahead of the node head, it means /// we're not on the canonical chain and we need to revert the notification with the ExEx /// head block. - fn check_canonical(&mut self) -> eyre::Result> { + fn check_canonical(&mut self) -> eyre::Result>> { if self.provider.is_known(&self.exex_head.block.hash)? && self.exex_head.block.number <= self.node_head.number { @@ -309,7 +335,7 @@ where // Update the head block hash to the parent hash of the first committed block. let committed_chain = notification.committed_chain().unwrap(); let new_exex_head = - (committed_chain.first().parent_hash, committed_chain.first().number - 1).into(); + (committed_chain.first().parent_hash(), committed_chain.first().number() - 1).into(); debug!(target: "exex::notifications", old_exex_head = ?self.exex_head.block, new_exex_head = ?new_exex_head, "ExEx head updated"); self.exex_head.block = new_exex_head; @@ -354,15 +380,13 @@ where impl Stream for ExExNotificationsWithHead where - P: BlockReader - + HeaderProvider - + StateProviderFactory + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider> + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, { - type Item = eyre::Result; + type Item = eyre::Result>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -402,7 +426,7 @@ where this.exex_head.block = committed_chain.tip().num_hash(); } else if let Some(reverted_chain) = notification.reverted_chain() { let first_block = reverted_chain.first(); - this.exex_head.block = (first_block.parent_hash, first_block.number - 1).into(); + this.exex_head.block = (first_block.parent_hash(), first_block.number() - 1).into(); } Poll::Ready(Some(Ok(notification))) diff --git a/crates/exex/exex/src/wal/cache.rs b/crates/exex/exex/src/wal/cache.rs index 86943f33cfa0..b5e0f2034e8d 100644 --- a/crates/exex/exex/src/wal/cache.rs +++ b/crates/exex/exex/src/wal/cache.rs @@ -3,9 +3,11 @@ use std::{ collections::{BinaryHeap, HashSet}, }; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use alloy_primitives::{map::FbHashMap, BlockNumber, B256}; use reth_exex_types::ExExNotification; +use reth_node_api::NodePrimitives; /// The block cache of the WAL. /// @@ -91,16 +93,16 @@ impl BlockCache { } /// Inserts the blocks from the notification into the cache with the given file ID. - pub(super) fn insert_notification_blocks_with_file_id( + pub(super) fn insert_notification_blocks_with_file_id( &mut self, file_id: u32, - notification: &ExExNotification, + notification: &ExExNotification, ) { let reverted_chain = notification.reverted_chain(); let committed_chain = notification.committed_chain(); let max_block = - reverted_chain.iter().chain(&committed_chain).map(|chain| chain.tip().number).max(); + reverted_chain.iter().chain(&committed_chain).map(|chain| chain.tip().number()).max(); if let Some(max_block) = max_block { self.notification_max_blocks.push(Reverse((max_block, file_id))); } @@ -108,13 +110,13 @@ impl BlockCache { if let Some(committed_chain) = &committed_chain { for block in committed_chain.blocks().values() { let cached_block = CachedBlock { - block: (block.number, block.hash()).into(), - parent_hash: block.parent_hash, + block: (block.number(), block.hash()).into(), + parent_hash: block.parent_hash(), }; self.committed_blocks.insert(block.hash(), (file_id, cached_block)); } - self.highest_committed_block_height = Some(committed_chain.tip().number); + self.highest_committed_block_height = Some(committed_chain.tip().number()); } } diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index 066fbe1b58c1..fb6be6e8c852 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -3,6 +3,8 @@ mod cache; pub use cache::BlockCache; mod storage; +use reth_node_api::NodePrimitives; +use reth_primitives::EthPrimitives; pub use storage::Storage; mod metrics; use metrics::Metrics; @@ -32,23 +34,26 @@ use reth_tracing::tracing::{debug, instrument}; /// 2. When the chain is finalized, call [`Wal::finalize`] to prevent the infinite growth of the /// WAL. #[derive(Debug, Clone)] -pub struct Wal { - inner: Arc, +pub struct Wal { + inner: Arc>, } -impl Wal { +impl Wal +where + N: NodePrimitives, +{ /// Creates a new instance of [`Wal`]. pub fn new(directory: impl AsRef) -> eyre::Result { Ok(Self { inner: Arc::new(WalInner::new(directory)?) }) } /// Returns a read-only handle to the WAL. - pub fn handle(&self) -> WalHandle { + pub fn handle(&self) -> WalHandle { WalHandle { wal: self.inner.clone() } } /// Commits the notification to WAL. - pub fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { + pub fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { self.inner.commit(notification) } @@ -63,7 +68,7 @@ impl Wal { /// Returns an iterator over all notifications in the WAL. pub fn iter_notifications( &self, - ) -> eyre::Result> + '_>> { + ) -> eyre::Result>> + '_>> { self.inner.iter_notifications() } @@ -75,16 +80,19 @@ impl Wal { /// Inner type for the WAL. #[derive(Debug)] -struct WalInner { +struct WalInner { next_file_id: AtomicU32, /// The underlying WAL storage backed by a file. - storage: Storage, + storage: Storage, /// WAL block cache. See [`cache::BlockCache`] docs for more details. block_cache: RwLock, metrics: Metrics, } -impl WalInner { +impl WalInner +where + N: NodePrimitives, +{ fn new(directory: impl AsRef) -> eyre::Result { let mut wal = Self { next_file_id: AtomicU32::new(0), @@ -137,7 +145,7 @@ impl WalInner { reverted_block_range = ?notification.reverted_chain().as_ref().map(|chain| chain.range()), committed_block_range = ?notification.committed_chain().as_ref().map(|chain| chain.range()) ))] - fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { + fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { let mut block_cache = self.block_cache.write(); let file_id = self.next_file_id.fetch_add(1, Ordering::Relaxed); @@ -187,7 +195,7 @@ impl WalInner { /// Returns an iterator over all notifications in the WAL. fn iter_notifications( &self, - ) -> eyre::Result> + '_>> { + ) -> eyre::Result>> + '_>> { let Some(range) = self.storage.files_range()? else { return Ok(Box::new(std::iter::empty())) }; @@ -198,16 +206,19 @@ impl WalInner { /// A read-only handle to the WAL that can be shared. #[derive(Debug)] -pub struct WalHandle { - wal: Arc, +pub struct WalHandle { + wal: Arc>, } -impl WalHandle { +impl WalHandle +where + N: NodePrimitives, +{ /// Returns the notification for the given committed block hash if it exists. pub fn get_committed_notification_by_block_hash( &self, block_hash: &B256, - ) -> eyre::Result> { + ) -> eyre::Result>> { let Some(file_id) = self.wal.block_cache().get_file_id_by_committed_block_hash(block_hash) else { return Ok(None) diff --git a/crates/exex/exex/src/wal/storage.rs b/crates/exex/exex/src/wal/storage.rs index aaa4398fd0b6..699d88ba2a74 100644 --- a/crates/exex/exex/src/wal/storage.rs +++ b/crates/exex/exex/src/wal/storage.rs @@ -6,6 +6,8 @@ use std::{ use eyre::OptionExt; use reth_exex_types::ExExNotification; +use reth_node_api::NodePrimitives; +use reth_primitives::EthPrimitives; use reth_tracing::tracing::debug; use tracing::instrument; @@ -16,18 +18,22 @@ static FILE_EXTENSION: &str = "wal"; /// Each notification is represented by a single file that contains a MessagePack-encoded /// notification. #[derive(Debug, Clone)] -pub struct Storage { +pub struct Storage { /// The path to the WAL file. path: PathBuf, + _pd: std::marker::PhantomData, } -impl Storage { +impl Storage +where + N: NodePrimitives, +{ /// Creates a new instance of [`Storage`] backed by the file at the given path and creates /// it doesn't exist. pub(super) fn new(path: impl AsRef) -> eyre::Result { reth_fs_util::create_dir_all(&path)?; - Ok(Self { path: path.as_ref().to_path_buf() }) + Ok(Self { path: path.as_ref().to_path_buf(), _pd: std::marker::PhantomData }) } fn file_path(&self, id: u32) -> PathBuf { @@ -110,7 +116,7 @@ impl Storage { pub(super) fn iter_notifications( &self, range: RangeInclusive, - ) -> impl Iterator> + '_ { + ) -> impl Iterator)>> + '_ { range.map(move |id| { let (notification, size) = self.read_notification(id)?.ok_or_eyre("notification {id} not found")?; @@ -124,7 +130,7 @@ impl Storage { pub(super) fn read_notification( &self, file_id: u32, - ) -> eyre::Result> { + ) -> eyre::Result, u64)>> { let file_path = self.file_path(file_id); debug!(target: "exex::wal::storage", ?file_path, "Reading notification from WAL"); @@ -136,7 +142,7 @@ impl Storage { let size = file.metadata()?.len(); // Deserialize using the bincode- and msgpack-compatible serde wrapper - let notification: reth_exex_types::serde_bincode_compat::ExExNotification<'_> = + let notification: reth_exex_types::serde_bincode_compat::ExExNotification<'_, N> = rmp_serde::decode::from_read(&mut file).map_err(|err| { eyre::eyre!("failed to decode notification from {file_path:?}: {err:?}") })?; @@ -153,14 +159,14 @@ impl Storage { pub(super) fn write_notification( &self, file_id: u32, - notification: &ExExNotification, + notification: &ExExNotification, ) -> eyre::Result { let file_path = self.file_path(file_id); debug!(target: "exex::wal::storage", ?file_path, "Writing notification to WAL"); // Serialize using the bincode- and msgpack-compatible serde wrapper let notification = - reth_exex_types::serde_bincode_compat::ExExNotification::from(notification); + reth_exex_types::serde_bincode_compat::ExExNotification::::from(notification); reth_fs_util::atomic_write_file(&file_path, |file| { rmp_serde::encode::write(file, ¬ification) @@ -186,7 +192,7 @@ mod tests { let mut rng = generators::rng(); let temp_dir = tempfile::tempdir()?; - let storage = Storage::new(&temp_dir)?; + let storage: Storage = Storage::new(&temp_dir)?; let old_block = random_block(&mut rng, 0, Default::default()) .seal_with_senders() @@ -215,7 +221,7 @@ mod tests { #[test] fn test_files_range() -> eyre::Result<()> { let temp_dir = tempfile::tempdir()?; - let storage = Storage::new(&temp_dir)?; + let storage: Storage = Storage::new(&temp_dir)?; // Create WAL files File::create(storage.file_path(1))?; diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index ca0ea46551c5..939bf21c0223 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -30,7 +30,8 @@ use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications, Wal}; use reth_network::{config::SecretKey, NetworkConfigBuilder, NetworkManager}; use reth_node_api::{ - FullNodeTypes, FullNodeTypesAdapter, NodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine, + FullNodeTypes, FullNodeTypesAdapter, NodePrimitives, NodeTypes, NodeTypesWithDBAdapter, + NodeTypesWithEngine, }; use reth_node_builder::{ components::{ @@ -45,7 +46,7 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{BlockExt, EthPrimitives, Head, SealedBlockWithSenders}; +use reth_primitives::{BlockExt, EthPrimitives, Head, SealedBlockWithSenders, TransactionSigned}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, BlockReader, EthStorage, ProviderFactory, @@ -64,7 +65,7 @@ pub struct TestPoolBuilder; impl PoolBuilder for TestPoolBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>>, { type Pool = TestPool; @@ -80,7 +81,7 @@ pub struct TestExecutorBuilder; impl ExecutorBuilder for TestExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type EVM = EthEvmConfig; type Executor = MockExecutorProvider; diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index 3b67fd5aa500..b7e659d80a81 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-chain-state.workspace = true reth-execution-types.workspace = true +reth-primitives = { workspace = true, optional = true } reth-primitives-traits.workspace = true # reth diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index fb0762f04b3e..19e47c0a1da8 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -7,30 +7,30 @@ use reth_primitives_traits::NodePrimitives; /// Notifications sent to an `ExEx`. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum ExExNotification { +pub enum ExExNotification { /// Chain got committed without a reorg, and only the new chain is returned. ChainCommitted { /// The new chain after commit. - new: Arc>, + new: Arc>, }, /// Chain got reorged, and both the old and the new chains are returned. ChainReorged { /// The old chain before reorg. - old: Arc>, + old: Arc>, /// The new chain after reorg. - new: Arc>, + new: Arc>, }, /// Chain got reverted, and only the old chain is returned. ChainReverted { /// The old chain before reversion. - old: Arc>, + old: Arc>, }, } -impl ExExNotification { +impl ExExNotification { /// Returns the committed chain from the [`Self::ChainCommitted`] and [`Self::ChainReorged`] /// variants, if any. - pub fn committed_chain(&self) -> Option> { + pub fn committed_chain(&self) -> Option>> { match self { Self::ChainCommitted { new } | Self::ChainReorged { old: _, new } => Some(new.clone()), Self::ChainReverted { .. } => None, @@ -39,7 +39,7 @@ impl ExExNotification { /// Returns the reverted chain from the [`Self::ChainReorged`] and [`Self::ChainReverted`] /// variants, if any. - pub fn reverted_chain(&self) -> Option> { + pub fn reverted_chain(&self) -> Option>> { match self { Self::ChainReorged { old, new: _ } | Self::ChainReverted { old } => Some(old.clone()), Self::ChainCommitted { .. } => None, @@ -73,11 +73,11 @@ impl From> for ExExNotification

/// Bincode-compatible [`ExExNotification`] serde implementation. #[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] pub(super) mod serde_bincode_compat { - use std::sync::Arc; - use reth_execution_types::serde_bincode_compat::Chain; + use reth_primitives::{EthPrimitives, NodePrimitives}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; + use std::sync::Arc; /// Bincode-compatible [`super::ExExNotification`] serde implementation. /// @@ -96,14 +96,21 @@ pub(super) mod serde_bincode_compat { /// ``` #[derive(Debug, Serialize, Deserialize)] #[allow(missing_docs)] - pub enum ExExNotification<'a> { - ChainCommitted { new: Chain<'a> }, - ChainReorged { old: Chain<'a>, new: Chain<'a> }, - ChainReverted { old: Chain<'a> }, + #[serde(bound = "")] + pub enum ExExNotification<'a, N = EthPrimitives> + where + N: NodePrimitives, + { + ChainCommitted { new: Chain<'a, N> }, + ChainReorged { old: Chain<'a, N>, new: Chain<'a, N> }, + ChainReverted { old: Chain<'a, N> }, } - impl<'a> From<&'a super::ExExNotification> for ExExNotification<'a> { - fn from(value: &'a super::ExExNotification) -> Self { + impl<'a, N> From<&'a super::ExExNotification> for ExExNotification<'a, N> + where + N: NodePrimitives, + { + fn from(value: &'a super::ExExNotification) -> Self { match value { super::ExExNotification::ChainCommitted { new } => { ExExNotification::ChainCommitted { new: Chain::from(new.as_ref()) } @@ -121,8 +128,11 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> From> for super::ExExNotification { - fn from(value: ExExNotification<'a>) -> Self { + impl<'a, N> From> for super::ExExNotification + where + N: NodePrimitives, + { + fn from(value: ExExNotification<'a, N>) -> Self { match value { ExExNotification::ChainCommitted { new } => { Self::ChainCommitted { new: Arc::new(new.into()) } @@ -160,16 +170,14 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { - use std::sync::Arc; - + use super::super::{serde_bincode_compat, ExExNotification}; use arbitrary::Arbitrary; use rand::Rng; use reth_execution_types::Chain; use reth_primitives::SealedBlockWithSenders; use serde::{Deserialize, Serialize}; use serde_with::serde_as; - - use super::super::{serde_bincode_compat, ExExNotification}; + use std::sync::Arc; #[test] fn test_exex_notification_bincode_roundtrip() { diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index f1c8410eeba9..70946c6dce87 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -19,7 +19,7 @@ reth-net-nat.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } # ethereum -alloy-primitives.workspace = true +alloy-primitives = { workspace = true, features = ["rand"] } alloy-rlp = { workspace = true, features = ["derive"] } discv5.workspace = true secp256k1 = { workspace = true, features = [ @@ -60,6 +60,7 @@ serde = [ "generic-array/serde", "parking_lot/serde", "rand?/serde", - "secp256k1/serde" + "secp256k1/serde", + "reth-ethereum-forks/serde" ] test-utils = ["dep:rand"] diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index a52f65057443..2f71354a7dd8 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -58,5 +58,6 @@ serde = [ "parking_lot/serde", "rand/serde", "secp256k1/serde", - "trust-dns-resolver/serde" + "trust-dns-resolver/serde", + "reth-ethereum-forks/serde" ] diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 82f45dd23bfe..682995e7eb3a 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -1,5 +1,6 @@ use super::queue::BodiesRequestQueue; use crate::{bodies::task::TaskDownloader, metrics::BodyDownloaderMetrics}; +use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use futures::Stream; use futures_util::StreamExt; @@ -14,7 +15,7 @@ use reth_network_p2p::{ error::{DownloadError, DownloadResult}, }; use reth_primitives::SealedHeader; -use reth_primitives_traits::size::InMemorySize; +use reth_primitives_traits::{size::InMemorySize, BlockHeader as _}; use reth_storage_api::HeaderProvider; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ @@ -71,7 +72,7 @@ where Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. - fn next_headers_request(&self) -> DownloadResult>> { + fn next_headers_request(&self) -> DownloadResult>>> { let start_at = match self.in_progress_queue.last_requested_block_number { Some(num) => num + 1, None => *self.download_range.start(), @@ -96,7 +97,7 @@ where &self, range: RangeInclusive, max_non_empty: u64, - ) -> DownloadResult>> { + ) -> DownloadResult>>> { if range.is_empty() || max_non_empty == 0 { return Ok(None) } @@ -109,7 +110,7 @@ where let mut collected = 0; let mut non_empty_headers = 0; let headers = self.provider.sealed_headers_while(range.clone(), |header| { - let should_take = range.contains(&header.number) && + let should_take = range.contains(&header.number()) && non_empty_headers < max_non_empty && collected < self.stream_batch_size; @@ -300,7 +301,7 @@ where impl BodyDownloader for BodiesDownloader where B: BodiesClient + 'static, - Provider: HeaderProvider + Unpin + 'static, + Provider: HeaderProvider

+ Unpin + 'static, { type Body = B::Body; @@ -350,7 +351,7 @@ where impl Stream for BodiesDownloader where B: BodiesClient + 'static, - Provider: HeaderProvider + Unpin + 'static, + Provider: HeaderProvider
+ Unpin + 'static, { type Item = BodyDownloaderResult; diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index a2b63c8ed186..89af9813e3cc 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -52,10 +52,10 @@ impl TaskDownloader { /// /// fn t< /// B: BodiesClient + 'static, - /// Provider: HeaderProvider + Unpin + 'static, + /// Provider: HeaderProvider
+ Unpin + 'static, /// >( /// client: Arc, - /// consensus: Arc>, + /// consensus: Arc>, /// provider: Provider, /// ) { /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider); diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 63a20ff27f5b..be359134e79d 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -1225,11 +1225,10 @@ mod tests { use super::*; use crate::headers::test_utils::child_header; use alloy_consensus::Header; - use alloy_eips::BlockNumHash; + use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; use assert_matches::assert_matches; use reth_consensus::test_utils::TestConsensus; use reth_network_p2p::test_utils::TestHeadersClient; - use reth_primitives_traits::BlockWithParent; /// Tests that `replace_number` works the same way as `Option::replace` #[test] diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index eb2a0b023b3f..ec34e3e7a323 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -28,7 +28,7 @@ tracing.workspace = true # HeaderBytes generic-array.workspace = true typenum = "1.15.0" -byteorder = "1.4.3" +byteorder.workspace = true # crypto rand.workspace = true diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 8b89603167d4..1fe97f236dea 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -17,6 +17,7 @@ reth-chainspec.workspace = true reth-codecs-derive.workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true +reth-ethereum-forks.workspace = true # ethereum alloy-chains = { workspace = true, features = ["rlp"] } @@ -46,24 +47,26 @@ rand.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", - "alloy-chains/arbitrary", - "dep:arbitrary", - "dep:proptest", - "dep:proptest-arbitrary-interop", - "reth-chainspec/arbitrary", - "alloy-consensus/arbitrary", - "alloy-eips/arbitrary", - "alloy-primitives/arbitrary", - "reth-primitives-traits/arbitrary", + "reth-primitives/arbitrary", + "alloy-chains/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-arbitrary-interop", + "reth-chainspec/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-ethereum-forks/arbitrary" ] serde = [ - "dep:serde", - "alloy-chains/serde", - "alloy-consensus/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "bytes/serde", - "rand/serde", - "reth-primitives-traits/serde", + "dep:serde", + "alloy-chains/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "reth-primitives-traits/serde", + "reth-ethereum-forks/serde" ] diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 97bbe36b3d61..e6506e86ad7c 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -258,7 +258,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }, ]), }.encode(&mut data); @@ -293,7 +294,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }, ]), }; @@ -393,7 +395,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }, ], withdrawals: None, @@ -468,7 +471,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }, ], withdrawals: None, diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 25ce7f3b3504..72a1116c3925 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -8,7 +8,7 @@ use alloy_rlp::{ use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; use reth_primitives::TransactionSigned; -use reth_primitives_traits::SignedTransaction; +use reth_primitives_traits::{SignedTransaction, Transaction}; use std::{ collections::{HashMap, HashSet}, mem, @@ -94,7 +94,7 @@ pub struct Transactions( pub Vec, ); -impl Transactions { +impl Transactions { /// Returns `true` if the list of transactions contains any blob transactions. pub fn has_eip4844(&self) -> bool { self.0.iter().any(|tx| tx.is_eip4844()) @@ -309,7 +309,7 @@ impl From> for NewPooledTransactionHashes66 { } } -/// Same as [`NewPooledTransactionHashes66`] but extends that that beside the transaction hashes, +/// Same as [`NewPooledTransactionHashes66`] but extends that beside the transaction hashes, /// the node sends the transaction types and their sizes (as defined in EIP-2718) as well. #[derive(Clone, Debug, PartialEq, Eq, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] diff --git a/crates/net/eth-wire-types/src/header.rs b/crates/net/eth-wire-types/src/header.rs index 9fa3b150d9e1..883db625c6e6 100644 --- a/crates/net/eth-wire-types/src/header.rs +++ b/crates/net/eth-wire-types/src/header.rs @@ -142,7 +142,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_hash: None + requests_hash: None, + target_blobs_per_block: None, }; assert_eq!(header.hash_slow(), expected_hash); } @@ -256,6 +257,7 @@ mod tests { excess_blob_gas: Some(0), parent_beacon_block_root: None, requests_hash: None, + target_blobs_per_block: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); @@ -296,6 +298,7 @@ mod tests { blob_gas_used: Some(0), excess_blob_gas: Some(0x1600000), requests_hash: None, + target_blobs_per_block: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index ff7ab1c801bd..78083e9e0928 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -1,7 +1,7 @@ //! Abstraction over primitive types in network messages. use alloy_rlp::{Decodable, Encodable}; -use reth_primitives_traits::{Block, BlockHeader}; +use reth_primitives_traits::{Block, BlockHeader, SignedTransaction}; use std::fmt::Debug; /// Abstraction over primitive types which might appear in network messages. See @@ -21,6 +21,7 @@ pub trait NetworkPrimitives: + PartialEq + Eq + 'static; + /// The block body type. type BlockBody: Encodable + Decodable @@ -32,6 +33,7 @@ pub trait NetworkPrimitives: + PartialEq + Eq + 'static; + /// Full block type. type Block: Block
+ Encodable @@ -58,17 +60,9 @@ pub trait NetworkPrimitives: + PartialEq + Eq + 'static; + /// The transaction type which peers return in `PooledTransactions` messages. - type PooledTransaction: Encodable - + Decodable - + Send - + Sync - + Unpin - + Clone - + Debug - + PartialEq - + Eq - + 'static; + type PooledTransaction: SignedTransaction + TryFrom + 'static; /// The transaction type which peers return in `GetReceipts` messages. type Receipt: Encodable diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index fa73d0907feb..e19912481e4e 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -4,7 +4,7 @@ use alloy_primitives::{hex, B256, U256}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_chainspec::{EthChainSpec, Hardforks, MAINNET}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{EthereumHardfork, ForkId, Head}; +use reth_ethereum_forks::{EthereumHardfork, ForkId, Head}; use std::fmt::{Debug, Display}; /// The status message is used in the eth protocol handshake to ensure that peers are on the same diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index ffbd3017fa62..3dd632de5c05 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -89,6 +89,7 @@ serde = [ "reth-codecs/serde", "alloy-chains/serde", "reth-primitives-traits/serde", + "reth-ethereum-forks/serde" ] [[test]] diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index 625971e0e7bd..1e1bb1b20126 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -27,6 +27,22 @@ pub struct RawCapabilityMessage { pub payload: Bytes, } +impl RawCapabilityMessage { + /// Creates a new capability message with the given id and payload. + pub const fn new(id: usize, payload: Bytes) -> Self { + Self { id, payload } + } + + /// Creates a raw message for the eth sub-protocol. + /// + /// Caller must ensure that the rlp encoded `payload` matches the given `id`. + /// + /// See also [`EthMessage`] + pub const fn eth(id: EthMessageID, payload: Bytes) -> Self { + Self::new(id as usize, payload) + } +} + /// Various protocol related event types bubbled up from a session that need to be handled by the /// network. #[derive(Debug)] @@ -38,7 +54,7 @@ pub enum CapabilityMessage { serde(bound = "EthMessage: Serialize + serde::de::DeserializeOwned") )] Eth(EthMessage), - /// Any other capability message. + /// Any other or manually crafted eth message. Other(RawCapabilityMessage), } @@ -319,7 +335,7 @@ pub fn shared_capability_offsets( // highest wins, others are ignored if shared_capabilities .get(&peer_capability.name) - .map_or(true, |v| peer_capability.version > v.version) + .is_none_or(|v| peer_capability.version > v.version) { shared_capabilities.insert( peer_capability.name.clone(), diff --git a/crates/net/eth-wire/tests/pooled_transactions.rs b/crates/net/eth-wire/tests/pooled_transactions.rs index 3b17d04cba51..93a17f3b05ba 100644 --- a/crates/net/eth-wire/tests/pooled_transactions.rs +++ b/crates/net/eth-wire/tests/pooled_transactions.rs @@ -3,7 +3,7 @@ use alloy_eips::eip2718::Decodable2718; use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable}; -use reth_eth_wire::{EthVersion, PooledTransactions, ProtocolMessage}; +use reth_eth_wire::{EthNetworkPrimitives, EthVersion, PooledTransactions, ProtocolMessage}; use reth_primitives::PooledTransactionsElement; use std::{fs, path::PathBuf}; use test_fuzz::test_fuzz; @@ -51,7 +51,7 @@ fn decode_request_pair_pooled_blob_transactions() { .join("testdata/request_pair_pooled_blob_transactions"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs: ProtocolMessage = + let _txs: ProtocolMessage = ProtocolMessage::decode_message(EthVersion::Eth68, &mut &hex_data[..]).unwrap(); } diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 6d410e9db23b..efb0257fc8e0 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -45,5 +45,6 @@ serde = [ "reth-eth-wire-types/serde", "reth-network-types/serde", "alloy-primitives/serde", - "enr/serde" + "enr/serde", + "reth-ethereum-forks/serde" ] diff --git a/crates/net/network-types/Cargo.toml b/crates/net/network-types/Cargo.toml index c9b8fdd5bf20..932527b91c65 100644 --- a/crates/net/network-types/Cargo.toml +++ b/crates/net/network-types/Cargo.toml @@ -26,5 +26,9 @@ serde_json = { workspace = true } tracing.workspace = true [features] -serde = ["dep:serde", "dep:humantime-serde"] +serde = [ + "dep:serde", + "dep:humantime-serde", + "reth-ethereum-forks/serde" +] test-utils = [] diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index ab9e89c2ca84..a4eff9d3a90c 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -123,6 +123,7 @@ serde = [ "smallvec/serde", "url/serde", "reth-primitives-traits/serde", + "reth-ethereum-forks/serde" ] test-utils = [ "dep:reth-provider", diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index da003a2e2907..13c932d46442 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -24,35 +24,50 @@ pub struct NetworkBuilder // === impl NetworkBuilder === -impl NetworkBuilder { +impl NetworkBuilder { /// Consumes the type and returns all fields. - pub fn split(self) -> (NetworkManager, Tx, Eth) { + pub fn split(self) -> (NetworkManager, Tx, Eth) { let Self { network, transactions, request_handler } = self; (network, transactions, request_handler) } /// Returns the network manager. - pub const fn network(&self) -> &NetworkManager { + pub const fn network(&self) -> &NetworkManager { &self.network } /// Returns the mutable network manager. - pub fn network_mut(&mut self) -> &mut NetworkManager { + pub fn network_mut(&mut self) -> &mut NetworkManager { &mut self.network } /// Returns the handle to the network. - pub fn handle(&self) -> NetworkHandle { + pub fn handle(&self) -> NetworkHandle { self.network.handle().clone() } /// Consumes the type and returns all fields and also return a [`NetworkHandle`]. - pub fn split_with_handle(self) -> (NetworkHandle, NetworkManager, Tx, Eth) { + pub fn split_with_handle(self) -> (NetworkHandle, NetworkManager, Tx, Eth) { let Self { network, transactions, request_handler } = self; let handle = network.handle().clone(); (handle, network, transactions, request_handler) } + /// Creates a new [`EthRequestHandler`] and wires it to the network. + pub fn request_handler( + self, + client: Client, + ) -> NetworkBuilder, N> { + let Self { mut network, transactions, .. } = self; + let (tx, rx) = mpsc::channel(ETH_REQUEST_CHANNEL_CAPACITY); + network.set_eth_request_handler(tx); + let peers = network.handle().peers_handle().clone(); + let request_handler = EthRequestHandler::new(client, peers, rx); + NetworkBuilder { network, request_handler, transactions } + } +} + +impl NetworkBuilder { /// Creates a new [`TransactionsManager`] and wires it to the network. pub fn transactions( self, @@ -66,17 +81,4 @@ impl NetworkBuilder { let transactions = TransactionsManager::new(handle, pool, rx, transactions_manager_config); NetworkBuilder { network, request_handler, transactions } } - - /// Creates a new [`EthRequestHandler`] and wires it to the network. - pub fn request_handler( - self, - client: Client, - ) -> NetworkBuilder> { - let Self { mut network, transactions, .. } = self; - let (tx, rx) = mpsc::channel(ETH_REQUEST_CHANNEL_CAPACITY); - network.set_eth_request_handler(tx); - let peers = network.handle().peers_handle().clone(); - let request_handler = EthRequestHandler::new(client, peers, rx); - NetworkBuilder { network, request_handler, transactions } - } } diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index a7d8a98fae6d..a9ce67821b98 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -16,7 +16,7 @@ use reth_eth_wire::{ use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::{mainnet_nodes, pk2id, sepolia_nodes, PeerId, TrustedPeer}; use reth_network_types::{PeersConfig, SessionsConfig}; -use reth_storage_api::{noop::NoopBlockReader, BlockNumReader, BlockReader, HeaderProvider}; +use reth_storage_api::{noop::NoopProvider, BlockNumReader, BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; use std::{collections::HashSet, net::SocketAddr, sync::Arc}; @@ -94,7 +94,7 @@ impl NetworkConfig<(), N> { } /// Convenience method for creating the corresponding builder type with a random secret key. - pub fn builder_with_rng_secret_key() -> NetworkConfigBuilder { + pub fn builder_with_rng_secret_key() -> NetworkConfigBuilder { NetworkConfigBuilder::with_rng_secret_key() } } @@ -147,8 +147,11 @@ where impl NetworkConfig where - C: BlockReader - + HeaderProvider + C: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + HeaderProvider + Clone + Unpin + 'static, @@ -495,11 +498,11 @@ impl NetworkConfigBuilder { pub fn build_with_noop_provider( self, chain_spec: Arc, - ) -> NetworkConfig, N> + ) -> NetworkConfig, N> where ChainSpec: EthChainSpec + Hardforks + 'static, { - self.build(NoopBlockReader::new(chain_spec)) + self.build(NoopProvider::eth(chain_spec)) } /// Sets the NAT resolver for external IP. diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index bb45507bdbdb..ee8640daaa9a 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -4,7 +4,7 @@ use crate::{ budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget, metrics::EthRequestHandlerMetrics, }; -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_rlp::Encodable; use futures::StreamExt; @@ -83,7 +83,7 @@ where C: BlockReader + HeaderProvider + ReceiptProvider, { /// Returns the list of requested headers - fn get_headers_response(&self, request: GetBlockHeaders) -> Vec
{ + fn get_headers_response(&self, request: GetBlockHeaders) -> Vec { let GetBlockHeaders { start_block, limit, skip, direction } = request; let mut headers = Vec::new(); @@ -105,7 +105,7 @@ where if let Some(header) = self.client.header_by_hash_or_number(block).unwrap_or_default() { match direction { HeadersDirection::Rising => { - if let Some(next) = (header.number + 1).checked_add(skip) { + if let Some(next) = (header.number() + 1).checked_add(skip) { block = next.into() } else { break @@ -116,14 +116,14 @@ where // prevent under flows for block.number == 0 and `block.number - skip < // 0` if let Some(next) = - header.number.checked_sub(1).and_then(|num| num.checked_sub(skip)) + header.number().checked_sub(1).and_then(|num| num.checked_sub(skip)) { block = next.into() } else { break } } else { - block = header.parent_hash.into() + block = header.parent_hash().into() } } } @@ -146,7 +146,7 @@ where &self, _peer_id: PeerId, request: GetBlockHeaders, - response: oneshot::Sender>>, + response: oneshot::Sender>>, ) { self.metrics.eth_headers_requests_received_total.increment(1); let headers = self.get_headers_response(request); @@ -225,7 +225,7 @@ where impl Future for EthRequestHandler where C: BlockReader - + HeaderProvider + + HeaderProvider
+ Unpin, { type Output = (); diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 53e7cff9fdd8..3719e003b98d 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -403,7 +403,7 @@ impl NetworkManager { &mut self, peer_id: PeerId, _capabilities: Arc, - _message: CapabilityMessage, + _message: CapabilityMessage, ) { trace!(target: "net", ?peer_id, "received unexpected message"); self.swarm @@ -649,6 +649,9 @@ impl NetworkManager { let _ = tx.send(None); } } + NetworkHandleMessage::EthMessage { peer_id, message } => { + self.swarm.sessions_mut().send_message(&peer_id, message) + } } } diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 199498b0b4c1..e88ccb54c369 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -55,7 +55,7 @@ pub enum PeerMessage { PooledTransactions(NewPooledTransactionHashes), /// All `eth` request variants. EthRequest(PeerRequest), - /// Other than eth namespace message + /// Any other or manually crafted eth message. Other(RawCapabilityMessage), } diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index dd908e5bb268..1ecf17369272 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -1,6 +1,6 @@ use crate::{ - config::NetworkMode, protocol::RlpxSubProtocol, swarm::NetworkConnectionState, - transactions::TransactionsHandle, FetchClient, + config::NetworkMode, message::PeerMessage, protocol::RlpxSubProtocol, + swarm::NetworkConnectionState, transactions::TransactionsHandle, FetchClient, }; use alloy_primitives::B256; use enr::Enr; @@ -139,6 +139,11 @@ impl NetworkHandle { }) } + /// Send eth message to the peer. + pub fn send_eth_message(&self, peer_id: PeerId, message: PeerMessage) { + self.send_message(NetworkHandleMessage::EthMessage { peer_id, message }) + } + /// Send message to get the [`TransactionsHandle`]. /// /// Returns `None` if no transaction task is installed. @@ -492,6 +497,13 @@ pub(crate) enum NetworkHandleMessage, }, + /// Sends an `eth` protocol message to the peer. + EthMessage { + /// The peer to send the message to. + peer_id: PeerId, + /// The message to send to the peer's sessions. + message: PeerMessage, + }, /// Applies a reputation change to the given peer. ReputationChange(PeerId, ReputationChangeKind), /// Returns the client that can be used to interact with the network. diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index d4b762e3e12c..f8d18e159946 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -375,7 +375,7 @@ impl PeersManager { if peer.is_trusted() || peer.is_static() { // For misbehaving trusted or static peers, we provide a bit more leeway when // penalizing them. - ban_duration = self.backoff_durations.medium; + ban_duration = self.backoff_durations.low / 2; } } diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 76701f7e2abf..af9bb2f08568 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -839,7 +839,7 @@ mod tests { f: F, ) -> Pin + Send>> where - F: FnOnce(EthStream>>) -> O + Send + 'static, + F: FnOnce(EthStream>, N>) -> O + Send + 'static, O: Future + Send + Sync, { let status = self.status; diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index d167dc0e6ec4..d24d7ec68417 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -264,7 +264,7 @@ pub enum ActiveSessionMessage { /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. - message: CapabilityMessage, + message: CapabilityMessage, }, /// Received a bad message from the peer. BadMessage { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index a020c540e385..b19281b079af 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -710,7 +710,7 @@ pub enum SessionEvent { /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. - message: CapabilityMessage, + message: CapabilityMessage, }, /// Received a bad message from the peer. BadMessage { diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 473c76c260f0..5d7c0a9f6541 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -385,10 +385,7 @@ impl NetworkState { } /// Handle the outcome of processed response, for example directly queue another request. - fn on_block_response_outcome( - &mut self, - outcome: BlockResponseOutcome, - ) -> Option> { + fn on_block_response_outcome(&mut self, outcome: BlockResponseOutcome) { match outcome { BlockResponseOutcome::Request(peer, request) => { self.handle_block_request(peer, request); @@ -397,7 +394,6 @@ impl NetworkState { self.peers_manager.apply_reputation_change(&peer, reputation_change); } } - None } /// Invoked when received a response from a connected peer. @@ -405,21 +401,19 @@ impl NetworkState { /// Delegates the response result to the fetcher which may return an outcome specific /// instruction that needs to be handled in [`Self::on_block_response_outcome`]. This could be /// a follow-up request or an instruction to slash the peer's reputation. - fn on_eth_response( - &mut self, - peer: PeerId, - resp: PeerResponseResult, - ) -> Option> { - match resp { + fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) { + let outcome = match resp { PeerResponseResult::BlockHeaders(res) => { - let outcome = self.state_fetcher.on_block_headers_response(peer, res)?; - self.on_block_response_outcome(outcome) + self.state_fetcher.on_block_headers_response(peer, res) } PeerResponseResult::BlockBodies(res) => { - let outcome = self.state_fetcher.on_block_bodies_response(peer, res)?; - self.on_block_response_outcome(outcome) + self.state_fetcher.on_block_bodies_response(peer, res) } _ => None, + }; + + if let Some(outcome) = outcome { + self.on_block_response_outcome(outcome); } } @@ -443,13 +437,14 @@ impl NetworkState { } } - // need to buffer results here to make borrow checker happy - let mut closed_sessions = Vec::new(); - let mut received_responses = Vec::new(); + loop { + // need to buffer results here to make borrow checker happy + let mut closed_sessions = Vec::new(); + let mut received_responses = Vec::new(); - // poll all connected peers for responses - for (id, peer) in &mut self.active_peers { - if let Some(mut response) = peer.pending_response.take() { + // poll all connected peers for responses + for (id, peer) in &mut self.active_peers { + let Some(mut response) = peer.pending_response.take() else { continue }; match response.poll(cx) { Poll::Ready(res) => { // check if the error is due to a closed channel to the session @@ -460,7 +455,8 @@ impl NetworkState { "Request canceled, response channel from session closed." ); // if the channel is closed, this means the peer session is also - // closed, in which case we can invoke the [Self::on_closed_session] + // closed, in which case we can invoke the + // [Self::on_closed_session] // immediately, preventing followup requests and propagate the // connection dropped error closed_sessions.push(*id); @@ -474,15 +470,17 @@ impl NetworkState { } }; } - } - for peer in closed_sessions { - self.on_session_closed(peer) - } + for peer in closed_sessions { + self.on_session_closed(peer) + } + + if received_responses.is_empty() { + break; + } - for (peer_id, resp) in received_responses { - if let Some(action) = self.on_eth_response(peer_id, resp) { - self.queued_messages.push_back(action); + for (peer_id, resp) in received_responses { + self.on_eth_response(peer_id, resp); } } @@ -491,6 +489,8 @@ impl NetworkState { self.on_peer_action(action); } + // We need to poll again tn case we have received any responses because they may have + // triggered follow-up requests. if self.queued_messages.is_empty() { return Poll::Pending } diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 47447783f428..c4a2bd14d36e 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -350,7 +350,7 @@ pub(crate) enum SwarmEvent { /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. - message: CapabilityMessage, + message: CapabilityMessage, }, /// Received a bad message from the peer. BadMessage { diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index 61bfafb6c253..a27df7e7202a 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -18,6 +18,7 @@ use reth_network_api::{ NetworkEvent, NetworkEventListenerProvider, NetworkInfo, Peers, }; use reth_network_peers::PeerId; +use reth_primitives::TransactionSigned; use reth_provider::{test_utils::NoopProvider, ChainSpecProvider}; use reth_storage_api::{BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory}; use reth_tasks::TokioTaskExecutor; @@ -25,7 +26,7 @@ use reth_tokio_util::EventStream; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, test_utils::{TestPool, TestPoolBuilder}, - EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, + EthTransactionPool, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; use secp256k1::SecretKey; use std::{ @@ -195,12 +196,17 @@ where impl Testnet where - C: BlockReader - + HeaderProvider + C: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + HeaderProvider + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, { /// Spawns the testnet to a separate task pub fn spawn(self) -> TestnetHandle { @@ -258,11 +264,16 @@ impl fmt::Debug for Testnet { impl Future for Testnet where - C: BlockReader - + HeaderProvider + C: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + HeaderProvider + + Unpin + + 'static, + Pool: TransactionPool> + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, { type Output = (); @@ -456,11 +467,16 @@ where impl Future for Peer where - C: BlockReader - + HeaderProvider + C: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + HeaderProvider + + Unpin + + 'static, + Pool: TransactionPool> + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, { type Output = (); diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 180a619fff9e..025ae36ea142 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -50,6 +50,7 @@ use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; use reth_network_peers::PeerId; use reth_primitives::PooledTransactionsElement; +use reth_primitives_traits::SignedTransaction; use schnellru::ByLength; #[cfg(debug_assertions)] use smallvec::{smallvec, SmallVec}; @@ -895,16 +896,14 @@ impl TransactionFetcher { approx_capacity_get_pooled_transactions_req_eth66() } } -} -impl TransactionFetcher { /// Processes a resolved [`GetPooledTransactions`] request. Queues the outcome as a /// [`FetchEvent`], which will then be streamed by /// [`TransactionsManager`](super::TransactionsManager). pub fn on_resolved_get_pooled_transactions_request_fut( &mut self, - response: GetPooledTxResponse, - ) -> FetchEvent { + response: GetPooledTxResponse, + ) -> FetchEvent { // update peer activity, requests for buffered hashes can only be made to idle // fallback peers let GetPooledTxResponse { peer_id, mut requested_hashes, result } = response; @@ -1026,8 +1025,8 @@ impl TransactionFetcher { } } -impl Stream for TransactionFetcher { - type Item = FetchEvent; +impl Stream for TransactionFetcher { + type Item = FetchEvent; /// Advances all inflight requests and returns the next event. fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -1176,18 +1175,18 @@ impl Future for GetPooledTxRequestFut { /// Wrapper of unverified [`PooledTransactions`]. #[derive(Debug, Constructor, Deref)] -pub struct UnverifiedPooledTransactions { - txns: PooledTransactions, +pub struct UnverifiedPooledTransactions { + txns: PooledTransactions, } /// [`PooledTransactions`] that have been successfully verified. #[derive(Debug, Constructor, Deref)] -pub struct VerifiedPooledTransactions { - txns: PooledTransactions, +pub struct VerifiedPooledTransactions { + txns: PooledTransactions, } -impl DedupPayload for VerifiedPooledTransactions { - type Value = PooledTransactionsElement; +impl DedupPayload for VerifiedPooledTransactions { + type Value = T; fn is_empty(&self) -> bool { self.txns.is_empty() @@ -1199,26 +1198,30 @@ impl DedupPayload for VerifiedPooledTransactions { fn dedup(self) -> PartiallyValidData { PartiallyValidData::from_raw_data( - self.txns.into_iter().map(|tx| (*tx.hash(), tx)).collect(), + self.txns.into_iter().map(|tx| (*tx.tx_hash(), tx)).collect(), None, ) } } trait VerifyPooledTransactionsResponse { + type Transaction: SignedTransaction; + fn verify( self, requested_hashes: &RequestTxHashes, peer_id: &PeerId, - ) -> (VerificationOutcome, VerifiedPooledTransactions); + ) -> (VerificationOutcome, VerifiedPooledTransactions); } -impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { +impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { + type Transaction = T; + fn verify( self, requested_hashes: &RequestTxHashes, _peer_id: &PeerId, - ) -> (VerificationOutcome, VerifiedPooledTransactions) { + ) -> (VerificationOutcome, VerifiedPooledTransactions) { let mut verification_outcome = VerificationOutcome::Ok; let Self { mut txns } = self; @@ -1229,11 +1232,11 @@ impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { let mut tx_hashes_not_requested_count = 0; txns.0.retain(|tx| { - if !requested_hashes.contains(tx.hash()) { + if !requested_hashes.contains(tx.tx_hash()) { verification_outcome = VerificationOutcome::ReportPeer; #[cfg(debug_assertions)] - tx_hashes_not_requested.push(*tx.hash()); + tx_hashes_not_requested.push(*tx.tx_hash()); #[cfg(not(debug_assertions))] { tx_hashes_not_requested_count += 1; diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index d1ee1556f280..cd9ff4161aa1 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -49,7 +49,9 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{PooledTransactionsElement, TransactionSigned}; +use reth_primitives::{ + transaction::SignedTransactionIntoRecoveredExt, RecoveredTx, TransactionSigned, +}; use reth_primitives_traits::{SignedTransaction, TxType}; use reth_tokio_util::EventStream; use reth_transaction_pool::{ @@ -122,7 +124,11 @@ impl TransactionsHandle { /// /// Note: this only propagates the transactions that are known to the pool. pub fn propagate_hashes_to(&self, hash: impl IntoIterator, peer: PeerId) { - self.send(TransactionsCommand::PropagateHashesTo(hash.into_iter().collect(), peer)) + let hashes = hash.into_iter().collect::>(); + if hashes.is_empty() { + return + } + self.send(TransactionsCommand::PropagateHashesTo(hashes, peer)) } /// Request the active peer IDs from the [`TransactionsManager`]. @@ -133,7 +139,12 @@ impl TransactionsHandle { } /// Manually propagate full transactions to a specific peer. + /// + /// Do nothing if transactions are empty. pub fn propagate_transactions_to(&self, transactions: Vec, peer: PeerId) { + if transactions.is_empty() { + return + } self.send(TransactionsCommand::PropagateTransactionsTo(transactions, peer)) } @@ -141,7 +152,12 @@ impl TransactionsHandle { /// /// It's up to the [`TransactionsManager`] whether the transactions are sent as hashes or in /// full. + /// + /// Do nothing if transactions are empty. pub fn propagate_transactions(&self, transactions: Vec) { + if transactions.is_empty() { + return + } self.send(TransactionsCommand::PropagateTransactions(transactions)) } @@ -150,6 +166,9 @@ impl TransactionsHandle { &self, peers: Vec, ) -> Result>, RecvError> { + if peers.is_empty() { + return Ok(Default::default()) + } let (tx, rx) = oneshot::channel(); self.send(TransactionsCommand::GetTransactionHashes { peers, tx }); rx.await @@ -256,7 +275,7 @@ pub struct TransactionsManager, /// Incoming events from the [`NetworkManager`](crate::NetworkManager). - transaction_events: UnboundedMeteredReceiver, + transaction_events: UnboundedMeteredReceiver>, /// How the `TransactionsManager` is configured. config: TransactionsManagerConfig, /// `TransactionsManager` metrics @@ -681,14 +700,15 @@ where impl TransactionsManager where - Pool: TransactionPool, + Pool: TransactionPool + 'static, N: NetworkPrimitives< BroadcastedTransaction: SignedTransaction, PooledTransaction: SignedTransaction, >, - <::Transaction as PoolTransaction>::Consensus: - Into, - <::Transaction as PoolTransaction>::Pooled: Into, + Pool::Transaction: PoolTransaction< + Consensus = N::BroadcastedTransaction, + Pooled: Into + From>, + >, { /// Invoked when transactions in the local mempool are considered __pending__. /// @@ -990,52 +1010,9 @@ where let _ = response.send(Ok(resp)); } } -} - -impl TransactionsManager -where - Pool: TransactionPool + 'static, - <::Transaction as PoolTransaction>::Consensus: Into, - <::Transaction as PoolTransaction>::Pooled: - Into, -{ - /// Handles dedicated transaction events related to the `eth` protocol. - fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { - match event { - NetworkTransactionEvent::IncomingTransactions { peer_id, msg } => { - // ensure we didn't receive any blob transactions as these are disallowed to be - // broadcasted in full - - let has_blob_txs = msg.has_eip4844(); - - let non_blob_txs = msg - .0 - .into_iter() - .map(PooledTransactionsElement::try_from_broadcast) - .filter_map(Result::ok) - .collect(); - - self.import_transactions(peer_id, non_blob_txs, TransactionSource::Broadcast); - - if has_blob_txs { - debug!(target: "net::tx", ?peer_id, "received bad full blob transaction broadcast"); - self.report_peer_bad_transactions(peer_id); - } - } - NetworkTransactionEvent::IncomingPooledTransactionHashes { peer_id, msg } => { - self.on_new_pooled_transaction_hashes(peer_id, msg) - } - NetworkTransactionEvent::GetPooledTransactions { peer_id, request, response } => { - self.on_get_pooled_transactions(peer_id, request, response) - } - NetworkTransactionEvent::GetTransactionsHandle(response) => { - let _ = response.send(Some(self.handle())); - } - } - } /// Handles a command received from a detached [`TransactionsHandle`] - fn on_command(&mut self, cmd: TransactionsCommand) { + fn on_command(&mut self, cmd: TransactionsCommand) { match cmd { TransactionsCommand::PropagateHash(hash) => { self.on_new_pending_transactions(vec![hash]) @@ -1123,7 +1100,7 @@ where } /// Handles a received event related to common network events. - fn on_network_event(&mut self, event_result: NetworkEvent) { + fn on_network_event(&mut self, event_result: NetworkEvent>) { match event_result { NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. }) => { // remove the peer @@ -1150,11 +1127,46 @@ where } } + /// Handles dedicated transaction events related to the `eth` protocol. + fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { + match event { + NetworkTransactionEvent::IncomingTransactions { peer_id, msg } => { + // ensure we didn't receive any blob transactions as these are disallowed to be + // broadcasted in full + + let has_blob_txs = msg.has_eip4844(); + + let non_blob_txs = msg + .0 + .into_iter() + .map(N::PooledTransaction::try_from) + .filter_map(Result::ok) + .collect(); + + self.import_transactions(peer_id, non_blob_txs, TransactionSource::Broadcast); + + if has_blob_txs { + debug!(target: "net::tx", ?peer_id, "received bad full blob transaction broadcast"); + self.report_peer_bad_transactions(peer_id); + } + } + NetworkTransactionEvent::IncomingPooledTransactionHashes { peer_id, msg } => { + self.on_new_pooled_transaction_hashes(peer_id, msg) + } + NetworkTransactionEvent::GetPooledTransactions { peer_id, request, response } => { + self.on_get_pooled_transactions(peer_id, request, response) + } + NetworkTransactionEvent::GetTransactionsHandle(response) => { + let _ = response.send(Some(self.handle())); + } + } + } + /// Starts the import process for the given transactions. fn import_transactions( &mut self, peer_id: PeerId, - transactions: PooledTransactions, + transactions: PooledTransactions, source: TransactionSource, ) { // If the node is pipeline syncing, ignore transactions @@ -1170,7 +1182,7 @@ where // mark the transactions as received self.transaction_fetcher - .remove_hashes_from_transaction_fetcher(transactions.iter().map(|tx| *tx.hash())); + .remove_hashes_from_transaction_fetcher(transactions.iter().map(|tx| *tx.tx_hash())); // track that the peer knows these transaction, but only if this is a new broadcast. // If we received the transactions as the response to our `GetPooledTransactions`` @@ -1178,7 +1190,7 @@ where // recorded the hashes as seen by this peer in `Self::on_new_pooled_transaction_hashes`. let mut num_already_seen_by_peer = 0; for tx in &transactions { - if source.is_broadcast() && !peer.seen_transactions.insert(*tx.hash()) { + if source.is_broadcast() && !peer.seen_transactions.insert(*tx.tx_hash()) { num_already_seen_by_peer += 1; } } @@ -1207,7 +1219,7 @@ where Err(badtx) => { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - hash=%badtx.hash(), + hash=%badtx.tx_hash(), client_version=%peer.client_version, "failed ecrecovery for transaction" ); @@ -1216,22 +1228,23 @@ where } }; - match self.transactions_by_peers.entry(*tx.hash()) { + match self.transactions_by_peers.entry(*tx.tx_hash()) { Entry::Occupied(mut entry) => { // transaction was already inserted entry.get_mut().insert(peer_id); } Entry::Vacant(entry) => { - if self.bad_imports.contains(tx.hash()) { + if self.bad_imports.contains(tx.tx_hash()) { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - hash=%tx.hash(), + hash=%tx.tx_hash(), client_version=%peer.client_version, "received a known bad transaction from peer" ); has_bad_transactions = true; } else { // this is a new transaction that should be imported into the pool + let pool_transaction = Pool::Transaction::from_pooled(tx.into()); new_txs.push(pool_transaction); @@ -1293,7 +1306,7 @@ where } /// Processes a [`FetchEvent`]. - fn on_fetch_event(&mut self, fetch_event: FetchEvent) { + fn on_fetch_event(&mut self, fetch_event: FetchEvent) { match fetch_event { FetchEvent::TransactionsFetched { peer_id, transactions } => { self.import_transactions(peer_id, transactions, TransactionSource::Response); @@ -1316,12 +1329,17 @@ where // // spawned in `NodeConfig::start_network`(reth_node_core::NodeConfig) and // `NetworkConfig::start_network`(reth_network::NetworkConfig) -impl Future for TransactionsManager +impl Future for TransactionsManager where Pool: TransactionPool + Unpin + 'static, - <::Transaction as PoolTransaction>::Consensus: Into, - <::Transaction as PoolTransaction>::Pooled: - Into, + N: NetworkPrimitives< + BroadcastedTransaction: SignedTransaction, + PooledTransaction: SignedTransaction, + >, + Pool::Transaction: PoolTransaction< + Consensus = N::BroadcastedTransaction, + Pooled: Into + From>, + >, { type Output = (); @@ -1509,11 +1527,11 @@ impl PropagateTransaction { /// Create a new instance from a pooled transaction fn new

(tx: Arc>) -> Self where - P: PoolTransaction>, + P: PoolTransaction, { let size = tx.encoded_length(); - let transaction = tx.transaction.clone().into_consensus().into(); - let transaction = Arc::new(transaction); + let transaction = tx.transaction.clone_into_consensus(); + let transaction = Arc::new(transaction.into_signed()); Self { size, transaction } } diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index 1575d9f3374a..1018cde6b55b 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -3,7 +3,7 @@ //! announcements. Validation and filtering of announcements is network dependent. use crate::metrics::{AnnouncedTxTypesMetrics, TxTypesCounter}; -use alloy_primitives::{Signature, TxHash}; +use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; use derive_more::{Deref, DerefMut}; use reth_eth_wire::{ DedupPayload, Eth68TxMetadata, HandleMempoolData, PartiallyValidData, ValidAnnouncementData, diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index 9348bf2d0413..2c61da751844 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -57,5 +57,7 @@ std = [ "alloy-eips/std", "alloy-primitives/std", "reth-primitives-traits/std", - "alloy-consensus/std", + "alloy-consensus/std", + "derive_more/std", + "reth-network-peers/std" ] diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index eca03bdb4e79..1bc76924a6c8 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -1,12 +1,11 @@ use super::error::HeadersDownloaderResult; use crate::error::{DownloadError, DownloadResult}; use alloy_consensus::BlockHeader; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{eip1898::BlockWithParent, BlockHashOrNumber}; use alloy_primitives::B256; use futures::Stream; use reth_consensus::HeaderValidator; use reth_primitives::SealedHeader; -use reth_primitives_traits::BlockWithParent; use std::fmt::Debug; /// A downloader capable of fetching and yielding block headers. diff --git a/crates/net/peers/Cargo.toml b/crates/net/peers/Cargo.toml index 5ac24edea759..8ca5faec93d5 100644 --- a/crates/net/peers/Cargo.toml +++ b/crates/net/peers/Cargo.toml @@ -35,5 +35,13 @@ serde_json.workspace = true tokio = { workspace = true, features = ["net", "macros", "rt"] } [features] +default = ["std"] +std = [ + "alloy-primitives/std", + "alloy-rlp/std", + "secp256k1?/std", + "serde_with/std", + "thiserror/std" +] secp256k1 = ["dep:secp256k1", "enr/secp256k1"] net = ["dep:tokio", "tokio?/net"] diff --git a/crates/net/peers/src/bootnodes/mod.rs b/crates/net/peers/src/bootnodes/mod.rs index 31c91e5d1cea..b149c108a969 100644 --- a/crates/net/peers/src/bootnodes/mod.rs +++ b/crates/net/peers/src/bootnodes/mod.rs @@ -1,6 +1,7 @@ //! Bootnodes for the network use crate::NodeRecord; +use alloc::vec::Vec; mod ethereum; pub use ethereum::*; diff --git a/crates/net/peers/src/lib.rs b/crates/net/peers/src/lib.rs index 1d60994d8e1b..3e2777c2df89 100644 --- a/crates/net/peers/src/lib.rs +++ b/crates/net/peers/src/lib.rs @@ -52,9 +52,16 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{ + format, + string::{String, ToString}, +}; use alloy_primitives::B512; -use std::str::FromStr; +use core::str::FromStr; // Re-export PeerId for ease of use. pub use enr::Enr; @@ -137,8 +144,8 @@ impl AnyNode { let node_record = NodeRecord { address: enr .ip4() - .map(std::net::IpAddr::from) - .or_else(|| enr.ip6().map(std::net::IpAddr::from))?, + .map(core::net::IpAddr::from) + .or_else(|| enr.ip6().map(core::net::IpAddr::from))?, tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, udp_port: enr.udp4().or_else(|| enr.udp6())?, id: pk2id(&enr.public_key()), @@ -186,8 +193,8 @@ impl FromStr for AnyNode { } } -impl std::fmt::Display for AnyNode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl core::fmt::Display for AnyNode { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::NodeRecord(record) => write!(f, "{record}"), #[cfg(feature = "secp256k1")] diff --git a/crates/net/peers/src/node_record.rs b/crates/net/peers/src/node_record.rs index ed48e242c1da..15ef5ad8522a 100644 --- a/crates/net/peers/src/node_record.rs +++ b/crates/net/peers/src/node_record.rs @@ -1,15 +1,18 @@ //! Commonly used `NodeRecord` type for peers. -use std::{ +use crate::PeerId; +use alloc::{ + format, + string::{String, ToString}, +}; +use alloy_rlp::{RlpDecodable, RlpEncodable}; +use core::{ fmt, fmt::Write, net::{IpAddr, Ipv4Addr, SocketAddr}, num::ParseIntError, str::FromStr, }; - -use crate::PeerId; -use alloy_rlp::{RlpDecodable, RlpEncodable}; use serde_with::{DeserializeFromStr, SerializeDisplay}; #[cfg(feature = "secp256k1")] diff --git a/crates/net/peers/src/trusted_peer.rs b/crates/net/peers/src/trusted_peer.rs index aa7e0a015336..b87c4d6da2f0 100644 --- a/crates/net/peers/src/trusted_peer.rs +++ b/crates/net/peers/src/trusted_peer.rs @@ -1,14 +1,14 @@ //! `NodeRecord` type that uses a domain instead of an IP. use crate::{NodeRecord, PeerId}; -use serde_with::{DeserializeFromStr, SerializeDisplay}; -use std::{ +use alloc::string::{String, ToString}; +use core::{ fmt::{self, Write}, - io::Error, net::IpAddr, num::ParseIntError, str::FromStr, }; +use serde_with::{DeserializeFromStr, SerializeDisplay}; use url::Host; /// Represents the node record of a trusted peer. The only difference between this and a @@ -45,11 +45,13 @@ impl TrustedPeer { Self { host, tcp_port: port, udp_port: port, id } } + #[cfg(any(test, feature = "std"))] const fn to_node_record(&self, ip: IpAddr) -> NodeRecord { NodeRecord { address: ip, id: self.id, tcp_port: self.tcp_port, udp_port: self.udp_port } } /// Tries to resolve directly to a [`NodeRecord`] if the host is an IP address. + #[cfg(any(test, feature = "std"))] fn try_node_record(&self) -> Result { match &self.host { Host::Ipv4(ip) => Ok(self.to_node_record((*ip).into())), @@ -61,23 +63,24 @@ impl TrustedPeer { /// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`]. /// /// This use [`ToSocketAddr`](std::net::ToSocketAddrs) to resolve the host to an IP address. - pub fn resolve_blocking(&self) -> Result { + #[cfg(any(test, feature = "std"))] + pub fn resolve_blocking(&self) -> Result { let domain = match self.try_node_record() { Ok(record) => return Ok(record), Err(domain) => domain, }; // Resolve the domain to an IP address let mut ips = std::net::ToSocketAddrs::to_socket_addrs(&(domain, 0))?; - let ip = ips - .next() - .ok_or_else(|| Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found"))?; + let ip = ips.next().ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found") + })?; Ok(self.to_node_record(ip.ip())) } /// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`]. #[cfg(any(test, feature = "net"))] - pub async fn resolve(&self) -> Result { + pub async fn resolve(&self) -> Result { let domain = match self.try_node_record() { Ok(record) => return Ok(record), Err(domain) => domain, @@ -85,9 +88,9 @@ impl TrustedPeer { // Resolve the domain to an IP address let mut ips = tokio::net::lookup_host(format!("{domain}:0")).await?; - let ip = ips - .next() - .ok_or_else(|| Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found"))?; + let ip = ips.next().ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found") + })?; Ok(self.to_node_record(ip.ip())) } diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index ab4595d33624..7d209a90fcad 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -26,6 +26,5 @@ reth-node-types.workspace = true reth-node-core.workspace = true alloy-rpc-types-engine.workspace = true -alloy-consensus.workspace = true eyre.workspace = true diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 5d25d8d592c7..fc6366a2eb5d 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,18 +1,17 @@ //! Traits for configuring a node. use crate::ConfigureEvm; -use alloy_consensus::Header; use alloy_rpc_types_engine::JwtSecret; use reth_beacon_consensus::BeaconConsensusEngineHandle; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; -use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_types::{HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, TxTy}; use reth_payload_builder_primitives::PayloadBuilder; use reth_provider::FullProvider; use reth_tasks::TaskExecutor; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{future::Future, marker::PhantomData}; /// A helper trait that is downstream of the [`NodeTypesWithEngine`] trait and adds stateful @@ -47,16 +46,16 @@ where /// Encapsulates all types and components of the node. pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// The transaction pool of the node. - type Pool: TransactionPool + Unpin; + type Pool: TransactionPool>> + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm

; + type Evm: ConfigureEvm
, Transaction = TxTy>; /// The type that knows how to execute blocks. - type Executor: BlockExecutorProvider; + type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. - type Consensus: Consensus + Clone + Unpin + 'static; + type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; /// Network API. type Network: FullNetwork; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 781112d93c8a..26d157e1e0cb 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -45,7 +45,7 @@ reth-payload-validator.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true -reth-rpc = { workspace = true, features = ["js-tracer"] } +reth-rpc.workspace = true reth-rpc-api.workspace = true reth-rpc-builder.workspace = true reth-rpc-engine-api.workspace = true @@ -96,6 +96,7 @@ tempfile.workspace = true [features] default = [] +js-tracer = ["reth-rpc/js-tracer"] test-utils = [ "reth-db/test-utils", "reth-blockchain-tree/test-utils", diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 06d5294d800a..3cab01aa71be 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -37,7 +37,7 @@ use reth_provider::{ BlockReader, ChainSpecProvider, FullProvider, }; use reth_tasks::TaskExecutor; -use reth_transaction_pool::{PoolConfig, TransactionPool}; +use reth_transaction_pool::{PoolConfig, PoolTransaction, TransactionPool}; use revm_primitives::EnvKzgSettings; use secp256k1::SecretKey; use std::sync::Arc; @@ -650,9 +650,15 @@ impl BuilderContext { /// connected to that network. pub fn start_network(&self, builder: NetworkBuilder<(), ()>, pool: Pool) -> NetworkHandle where - Pool: TransactionPool + Unpin + 'static, - Node::Provider: - BlockReader, + Pool: TransactionPool< + Transaction: PoolTransaction, + > + Unpin + + 'static, + Node::Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, { self.start_network_with(builder, pool, Default::default()) } @@ -670,9 +676,15 @@ impl BuilderContext { tx_config: TransactionsManagerConfig, ) -> NetworkHandle where - Pool: TransactionPool + Unpin + 'static, - Node::Provider: - BlockReader, + Pool: TransactionPool< + Transaction: PoolTransaction, + > + Unpin + + 'static, + Node::Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, { let (handle, network, txpool, eth) = builder .transactions(pool, tx_config) diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 95c0c764b5c3..7e2d0eb43cc0 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -7,12 +7,11 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; -use alloy_consensus::Header; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::NodeTypesWithEngine; +use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; use reth_payload_builder::PayloadBuilderHandle; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{future::Future, marker::PhantomData}; /// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. @@ -375,10 +374,12 @@ where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future>> + Send, - Pool: TransactionPool + Unpin + 'static, - EVM: ConfigureEvm
, - Executor: BlockExecutorProvider, - Cons: Consensus + Clone + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, + EVM: ConfigureEvm
, Transaction = TxTy>, + Executor: BlockExecutorProvider::Primitives>, + Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, { type Components = Components; diff --git a/crates/node/builder/src/components/consensus.rs b/crates/node/builder/src/components/consensus.rs index 6c90bda54752..074080d337b1 100644 --- a/crates/node/builder/src/components/consensus.rs +++ b/crates/node/builder/src/components/consensus.rs @@ -1,11 +1,16 @@ //! Consensus component for the node builder. +use reth_node_api::NodeTypes; + use crate::{BuilderContext, FullNodeTypes}; use std::future::Future; /// A type that knows how to build the consensus implementation. pub trait ConsensusBuilder: Send { /// The consensus implementation to build. - type Consensus: reth_consensus::Consensus + Clone + Unpin + 'static; + type Consensus: reth_consensus::FullConsensus<::Primitives> + + Clone + + Unpin + + 'static; /// Creates the consensus implementation. fn build_consensus( @@ -17,7 +22,10 @@ pub trait ConsensusBuilder: Send { impl ConsensusBuilder for F where Node: FullNodeTypes, - Consensus: reth_consensus::Consensus + Clone + Unpin + 'static, + Consensus: reth_consensus::FullConsensus<::Primitives> + + Clone + + Unpin + + 'static, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future> + Send, { diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 4e8f63f412bc..e3226fa8e371 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -1,8 +1,7 @@ //! EVM component for the node builder. use crate::{BuilderContext, FullNodeTypes}; -use alloy_consensus::Header; use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::ConfigureEvm; +use reth_node_api::{ConfigureEvm, HeaderTy, TxTy}; use std::future::Future; /// A type that knows how to build the executor types. @@ -10,10 +9,12 @@ pub trait ExecutorBuilder: Send { /// The EVM config to use. /// /// This provides the node with the necessary configuration to configure an EVM. - type EVM: ConfigureEvm
; + type EVM: ConfigureEvm
, Transaction = TxTy>; /// The type that knows how to execute blocks. - type Executor: BlockExecutorProvider; + type Executor: BlockExecutorProvider< + Primitives = ::Primitives, + >; /// Creates the EVM config. fn build_evm( @@ -25,8 +26,9 @@ pub trait ExecutorBuilder: Send { impl ExecutorBuilder for F where Node: FullNodeTypes, - EVM: ConfigureEvm
, - Executor: BlockExecutorProvider, + EVM: ConfigureEvm
, Transaction = TxTy>, + Executor: + BlockExecutorProvider::Primitives>, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future> + Send, { diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 1fe35e554d51..b643e2aa2a65 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -22,14 +22,13 @@ pub use payload::*; pub use pool::*; use crate::{ConfigureEvm, FullNodeTypes}; -use alloy_consensus::Header; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; -use reth_node_api::NodeTypesWithEngine; +use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; use reth_payload_builder::PayloadBuilderHandle; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; /// An abstraction over the components of a node, consisting of: /// - evm and executor @@ -38,16 +37,16 @@ use reth_transaction_pool::TransactionPool; /// - payload builder. pub trait NodeComponents: Clone + Unpin + Send + Sync + 'static { /// The transaction pool of the node. - type Pool: TransactionPool + Unpin; + type Pool: TransactionPool>> + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm
; + type Evm: ConfigureEvm
, Transaction = TxTy>; /// The type that knows how to execute blocks. - type Executor: BlockExecutorProvider; + type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. - type Consensus: Consensus + Clone + Unpin + 'static; + type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; /// Network API. type Network: FullNetwork; @@ -97,10 +96,12 @@ impl NodeComponents for Components where Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, - EVM: ConfigureEvm
, - Executor: BlockExecutorProvider, - Cons: Consensus + Clone + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, + EVM: ConfigureEvm
, Transaction = TxTy>, + Executor: BlockExecutorProvider::Primitives>, + Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, { type Pool = Pool; type Evm = EVM; @@ -138,9 +139,9 @@ impl Clone for Components, + EVM: ConfigureEvm
, Transaction = TxTy>, Executor: BlockExecutorProvider, - Cons: Consensus + Clone, + Cons: Clone, { fn clone(&self) -> Self { Self { diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index 436a80c52e00..5b08e0a77395 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -1,7 +1,8 @@ //! Pool component for the node builder. use alloy_primitives::Address; -use reth_transaction_pool::{PoolConfig, SubPoolLimit, TransactionPool}; +use reth_node_api::TxTy; +use reth_transaction_pool::{PoolConfig, PoolTransaction, SubPoolLimit, TransactionPool}; use std::{collections::HashSet, future::Future}; use crate::{BuilderContext, FullNodeTypes}; @@ -9,7 +10,9 @@ use crate::{BuilderContext, FullNodeTypes}; /// A type that knows how to build the transaction pool. pub trait PoolBuilder: Send { /// The transaction pool to build. - type Pool: TransactionPool + Unpin + 'static; + type Pool: TransactionPool>> + + Unpin + + 'static; /// Creates the transaction pool. fn build_pool( @@ -21,7 +24,9 @@ pub trait PoolBuilder: Send { impl PoolBuilder for F where Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future> + Send, { diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 830909c8cc4c..f4557bd2272e 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -13,7 +13,6 @@ use rayon::ThreadPoolBuilder; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; -use reth_consensus::Consensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; @@ -23,7 +22,9 @@ use reth_evm::noop::NoopBlockExecutorProvider; use reth_fs_util as fs; use reth_invalid_block_hooks::InvalidBlockWitnessHook; use reth_network_p2p::headers::client::HeadersClient; -use reth_node_api::{FullNodePrimitives, FullNodeTypes, NodeTypes, NodeTypesWithDB}; +use reth_node_api::{ + FullNodePrimitives, FullNodeTypes, NodePrimitives, NodeTypes, NodeTypesWithDB, +}; use reth_node_core::{ args::InvalidBlockHookType, dirs::{ChainPath, DataDirPath}, @@ -40,7 +41,7 @@ use reth_node_metrics::{ server::{MetricServer, MetricServerConfig}, version::VersionInfo, }; -use reth_primitives::Head; +use reth_primitives::{Head, TransactionSigned}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, BlockHashReader, BlockNumReader, ChainSpecProvider, ProviderError, ProviderFactory, @@ -386,6 +387,7 @@ where Block = reth_primitives::Block, BlockBody = reth_primitives::BlockBody, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, { let factory = ProviderFactory::new( @@ -457,6 +459,7 @@ where Block = reth_primitives::Block, BlockBody = reth_primitives::BlockBody, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, >, { let factory = self.create_provider_factory().await?; @@ -681,7 +684,6 @@ where let components = components_builder.build_components(&builder_ctx).await?; let blockchain_db = self.blockchain_db().clone(); - let consensus = Arc::new(components.consensus().clone()); let node_adapter = NodeAdapter { components, @@ -699,7 +701,6 @@ where }, node_adapter, head, - consensus, }; let ctx = LaunchContextWith { @@ -855,11 +856,6 @@ where Ok(None) } - /// Returns the configured `Consensus`. - pub fn consensus(&self) -> Arc { - self.right().consensus.clone() - } - /// Returns the metrics sender. pub fn sync_metrics_tx(&self) -> UnboundedSender { self.right().db_provider_container.metrics_sender.clone() @@ -876,11 +872,16 @@ impl Attached::ChainSpec>, WithComponents>, > where - T: FullNodeTypes, + T: FullNodeTypes< + Provider: StateProviderFactory + ChainSpecProvider, + Types: ProviderNodeTypes>, + >, CB: NodeComponentsBuilder, { /// Returns the [`InvalidBlockHook`] to use for the node. - pub fn invalid_block_hook(&self) -> eyre::Result> { + pub fn invalid_block_hook( + &self, + ) -> eyre::Result::Primitives>>> { let Some(ref hook) = self.node_config().debug.invalid_block_hook else { return Ok(Box::new(NoopInvalidBlockHook::default())) }; @@ -904,7 +905,7 @@ where InvalidBlockHookType::PreState | InvalidBlockHookType::Opcode => { eyre::bail!("invalid block hook {hook:?} is not implemented yet") } - } as Box) + } as Box>) }) .collect::>()?; @@ -1029,7 +1030,6 @@ where db_provider_container: WithMeteredProvider, node_adapter: NodeAdapter, head: Head, - consensus: Arc, } #[cfg(test)] diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 430ca31a5b16..264de07048a0 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -19,8 +19,8 @@ use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ - BuiltPayload, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadBuilder, - PayloadTypes, + BlockTy, BuiltPayload, EngineValidator, FullNodeTypes, NodeTypesWithEngine, + PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -75,7 +75,14 @@ where T: FullNodeTypes>, CB: NodeComponentsBuilder, AO: RethRpcAddOns> - + EngineValidatorAddOn>, + + EngineValidatorAddOn< + NodeAdapter, + Validator: EngineValidator< + ::Engine, + Block = BlockTy, + >, + >, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, @@ -168,13 +175,15 @@ where )); info!(target: "reth::cli", "StaticFileProducer initialized"); + let consensus = Arc::new(ctx.components().consensus().clone()); + // Configure the pipeline let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); let pipeline = build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), - ctx.consensus(), + consensus.clone(), ctx.provider_factory().clone(), ctx.task_executor(), ctx.sync_metrics_tx(), @@ -196,18 +205,33 @@ where pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); } let pruner = pruner_builder.build_with_provider_factory(ctx.provider_factory().clone()); - let pruner_events = pruner.events(); info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); + let event_sender = EventSender::default(); + let beacon_engine_handle = + BeaconConsensusEngineHandle::new(consensus_engine_tx.clone(), event_sender.clone()); + + // extract the jwt secret from the args if possible + let jwt_secret = ctx.auth_jwt_secret()?; + + let add_ons_ctx = AddOnsContext { + node: ctx.node_adapter().clone(), + config: ctx.node_config(), + beacon_engine_handle: beacon_engine_handle.clone(), + jwt_secret, + }; + let engine_payload_validator = add_ons.engine_validator(&add_ons_ctx).await?; + let mut engine_service = if ctx.is_dev() { let eth_service = LocalEngineService::new( - ctx.consensus(), + consensus.clone(), ctx.components().block_executor().clone(), ctx.provider_factory().clone(), ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), + engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, ctx.sync_metrics_tx(), @@ -220,7 +244,7 @@ where Either::Left(eth_service) } else { let eth_service = EngineService::new( - ctx.consensus(), + consensus.clone(), ctx.components().block_executor().clone(), ctx.chain_spec(), network_client.clone(), @@ -231,6 +255,7 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), + engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, ctx.sync_metrics_tx(), @@ -239,11 +264,6 @@ where Either::Right(eth_service) }; - let event_sender = EventSender::default(); - - let beacon_engine_handle = - BeaconConsensusEngineHandle::new(consensus_engine_tx, event_sender.clone()); - info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( @@ -269,16 +289,6 @@ where ), ); - // extract the jwt secret from the args if possible - let jwt_secret = ctx.auth_jwt_secret()?; - - let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter().clone(), - config: ctx.node_config(), - beacon_engine_handle, - jwt_secret, - }; - let RpcHandle { rpc_server_handles, rpc_registry } = add_ons.launch_add_ons(add_ons_ctx).await?; diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 627145d2df7a..e23ce38da75b 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -70,7 +70,7 @@ pub trait LaunchNode { type Node; /// Create and return a new node asynchronously. - fn launch_node(self, target: Target) -> impl Future> + Send; + fn launch_node(self, target: Target) -> impl Future>; } impl LaunchNode for F @@ -80,7 +80,7 @@ where { type Node = Node; - fn launch_node(self, target: Target) -> impl Future> + Send { + fn launch_node(self, target: Target) -> impl Future> { self(target) } } @@ -236,7 +236,7 @@ where let pipeline = crate::setup::build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), - ctx.consensus(), + consensus.clone(), ctx.provider_factory().clone(), ctx.task_executor(), ctx.sync_metrics_tx(), diff --git a/crates/node/builder/src/lib.rs b/crates/node/builder/src/lib.rs index 899317f158c6..a4f87c479846 100644 --- a/crates/node/builder/src/lib.rs +++ b/crates/node/builder/src/lib.rs @@ -1,4 +1,7 @@ //! Standalone crate for Reth configuration and builder types. +//! +//! # features +//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 2eae77f8d835..24b7db77d888 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -33,6 +33,7 @@ use reth_rpc_builder::{ use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; +use reth_transaction_pool::TransactionPool; use crate::EthApiBuilderCtx; @@ -405,6 +406,7 @@ where N: FullNodeComponents< Types: ProviderNodeTypes, PayloadBuilder: PayloadBuilder::Engine>, + Pool: TransactionPool::Transaction>, >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, @@ -527,6 +529,7 @@ where N: FullNodeComponents< Types: ProviderNodeTypes, PayloadBuilder: PayloadBuilder::Engine>, + Pool: TransactionPool::Transaction>, >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, @@ -601,7 +604,7 @@ where } /// A type that knows how to build the engine validator. -pub trait EngineValidatorBuilder: Send + Clone { +pub trait EngineValidatorBuilder: Send + Sync + Clone { /// The consensus implementation to build. type Validator: EngineValidator<::Engine>; @@ -617,7 +620,7 @@ where Node: FullNodeComponents, Validator: EngineValidator<::Engine> + Clone + Unpin + 'static, - F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send + Clone, + F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send + Sync + Clone, Fut: Future> + Send, { type Validator = Validator; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 092c1fdf6518..0a0e4f10dbc9 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -12,9 +12,9 @@ use reth_downloaders::{ use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_network_p2p::{ - bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, EthBlockClient, + bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, BlockClient, }; -use reth_node_api::{BodyTy, FullNodePrimitives}; +use reth_node_api::{BodyTy, HeaderTy, NodePrimitives}; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -35,17 +35,13 @@ pub fn build_networked_pipeline( max_block: Option, static_file_producer: StaticFileProducer>, executor: Executor, - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where N: ProviderNodeTypes, - Client: EthBlockClient + 'static, - Executor: BlockExecutorProvider, - N::Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, - >, + Client: BlockClient
, Body = BodyTy> + 'static, + Executor: BlockExecutorProvider, + N::Primitives: NodePrimitives, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -80,24 +76,20 @@ pub fn build_pipeline( stage_config: &StageConfig, header_downloader: H, body_downloader: B, - consensus: Arc, + consensus: Arc>, max_block: Option, metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, static_file_producer: StaticFileProducer>, executor: Executor, - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where N: ProviderNodeTypes, - H: HeaderDownloader
+ 'static, + H: HeaderDownloader
> + 'static, B: BodyDownloader> + 'static, - Executor: BlockExecutorProvider, - N::Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, - >, + Executor: BlockExecutorProvider, + N::Primitives: NodePrimitives, { let mut builder = Pipeline::::builder(); diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index c667a56293c1..0ede9fe80c4d 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -34,6 +34,7 @@ reth-net-nat.workspace = true reth-network-peers.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true +reth-ethereum-forks.workspace = true # ethereum alloy-primitives.workspace = true diff --git a/crates/node/core/src/args/rpc_state_cache.rs b/crates/node/core/src/args/rpc_state_cache.rs index 9169d40b317d..b140d47b5fed 100644 --- a/crates/node/core/src/args/rpc_state_cache.rs +++ b/crates/node/core/src/args/rpc_state_cache.rs @@ -1,6 +1,6 @@ use clap::Args; use reth_rpc_server_types::constants::cache::{ - DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_ENV_CACHE_MAX_LEN, + DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_HEADER_CACHE_MAX_LEN, DEFAULT_RECEIPT_CACHE_MAX_LEN, }; @@ -22,12 +22,12 @@ pub struct RpcStateCacheArgs { )] pub max_receipts: u32, - /// Max number of bytes for cached env data. + /// Max number of headers in cache. #[arg( long = "rpc-cache.max-envs", - default_value_t = DEFAULT_ENV_CACHE_MAX_LEN, + default_value_t = DEFAULT_HEADER_CACHE_MAX_LEN, )] - pub max_envs: u32, + pub max_headers: u32, /// Max number of concurrent database requests. #[arg( @@ -42,7 +42,7 @@ impl Default for RpcStateCacheArgs { Self { max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN, max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, - max_envs: DEFAULT_ENV_CACHE_MAX_LEN, + max_headers: DEFAULT_HEADER_CACHE_MAX_LEN, max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS, } } diff --git a/crates/node/core/src/lib.rs b/crates/node/core/src/lib.rs index a69a255a3c67..aa4f72bd6a4d 100644 --- a/crates/node/core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -15,9 +15,11 @@ pub mod exit; pub mod node_config; pub mod utils; pub mod version; -/// Re-exported from `reth_primitives`. + +/// Re-exported primitive types pub mod primitives { - pub use reth_primitives::*; + pub use reth_ethereum_forks::*; + pub use reth_primitives_traits::*; } /// Re-export of `reth_rpc_*` crates. diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 24d5588b6884..861e47fc3cf1 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -9,22 +9,26 @@ use crate::{ utils::get_single_header, }; use alloy_consensus::BlockHeader; +use alloy_eips::BlockHashOrNumber; +use alloy_primitives::{BlockNumber, B256}; use eyre::eyre; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_config::config::PruneConfig; +use reth_ethereum_forks::Head; use reth_network_p2p::headers::client::HeadersClient; -use serde::{de::DeserializeOwned, Serialize}; -use std::{fs, path::Path}; - -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; -use reth_primitives::{Head, SealedHeader}; +use reth_primitives_traits::SealedHeader; use reth_stages_types::StageId; use reth_storage_api::{ BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader, }; use reth_storage_errors::provider::ProviderResult; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{ + fs, + net::SocketAddr, + path::{Path, PathBuf}, + sync::Arc, +}; use tracing::*; /// This includes all necessary configuration to launch the node. @@ -315,9 +319,9 @@ impl NodeConfig { Ok(Head { number: head, hash, - difficulty: header.difficulty, + difficulty: header.difficulty(), total_difficulty, - timestamp: header.timestamp, + timestamp: header.timestamp(), }) } @@ -340,7 +344,7 @@ impl NodeConfig { // try to look up the header in the database if let Some(header) = header { info!(target: "reth::cli", ?tip, "Successfully looked up tip block in the database"); - return Ok(header.number) + return Ok(header.number()) } Ok(self.fetch_tip_from_network(client, tip.into()).await.number()) diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index e52af4b46fe1..65f90f27eb72 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -9,7 +9,8 @@ use reth_consensus::Consensus; use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::HeadersClient, priority::Priority, }; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives::SealedBlock; +use reth_primitives_traits::SealedHeader; use std::{ env::VarError, path::{Path, PathBuf}, diff --git a/crates/node/events/src/cl.rs b/crates/node/events/src/cl.rs index bf0d4a59b213..dac13fe07631 100644 --- a/crates/node/events/src/cl.rs +++ b/crates/node/events/src/cl.rs @@ -1,5 +1,6 @@ //! Events related to Consensus Layer health. +use alloy_consensus::Header; use futures::Stream; use reth_storage_api::CanonChainTracker; use std::{ @@ -20,9 +21,9 @@ const NO_TRANSITION_CONFIG_EXCHANGED_PERIOD: Duration = Duration::from_secs(120) const NO_FORKCHOICE_UPDATE_RECEIVED_PERIOD: Duration = Duration::from_secs(120); /// A Stream of [`ConsensusLayerHealthEvent`]. -pub struct ConsensusLayerHealthEvents { +pub struct ConsensusLayerHealthEvents { interval: Interval, - canon_chain: Box, + canon_chain: Box>, } impl fmt::Debug for ConsensusLayerHealthEvents { @@ -31,9 +32,9 @@ impl fmt::Debug for ConsensusLayerHealthEvents { } } -impl ConsensusLayerHealthEvents { +impl ConsensusLayerHealthEvents { /// Creates a new [`ConsensusLayerHealthEvents`] with the given canonical chain tracker. - pub fn new(canon_chain: Box) -> Self { + pub fn new(canon_chain: Box>) -> Self { // Skip the first tick to prevent the false `ConsensusLayerHealthEvent::NeverSeen` event. let interval = tokio::time::interval_at(Instant::now() + CHECK_INTERVAL, CHECK_INTERVAL); Self { interval, canon_chain } diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index edd85501ec0a..86f1ea507ac5 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -298,14 +298,14 @@ impl NodeState { fn handle_pruner_event(&self, event: PrunerEvent) { match event { PrunerEvent::Started { tip_block_number } => { - info!(tip_block_number, "Pruner started"); + debug!(tip_block_number, "Pruner started"); } PrunerEvent::Finished { tip_block_number, elapsed, stats } => { let stats = format!( "[{}]", stats.iter().map(|item| item.to_string()).collect::>().join(", ") ); - info!(tip_block_number, ?elapsed, %stats, "Pruner finished"); + debug!(tip_block_number, ?elapsed, pruned_segments = %stats, "Pruner finished"); } } } @@ -313,10 +313,10 @@ impl NodeState { fn handle_static_file_producer_event(&self, event: StaticFileProducerEvent) { match event { StaticFileProducerEvent::Started { targets } => { - info!(?targets, "Static File Producer started"); + debug!(?targets, "Static File Producer started"); } StaticFileProducerEvent::Finished { targets, elapsed } => { - info!(?targets, ?elapsed, "Static File Producer finished"); + debug!(?targets, ?elapsed, "Static File Producer finished"); } } } diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index c0d266e57755..6e1eb81a0c8b 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -31,7 +31,7 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { /// The node's primitive types, defining basic operations and structures. type Primitives: NodePrimitives; /// The type used for configuration of the EVM. - type ChainSpec: EthChainSpec; + type ChainSpec: EthChainSpec
::BlockHeader>; /// The type used to perform state commitment operations. type StateCommitment: StateCommitment; /// The type responsible for writing chain primitives to storage. @@ -151,7 +151,7 @@ impl AnyNodeTypes { impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, - C: EthChainSpec + 'static, + C: EthChainSpec
+ 'static, SC: StateCommitment, S: Default + Send + Sync + Unpin + Debug + 'static, { @@ -212,7 +212,7 @@ impl NodeTypes for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, - C: EthChainSpec + 'static, + C: EthChainSpec
+ 'static, SC: StateCommitment, S: Default + Send + Sync + Unpin + Debug + 'static, { @@ -226,7 +226,7 @@ impl NodeTypesWithEngine for AnyNodeTypesWithEngine + 'static, SC: StateCommitment, S: Default + Send + Sync + Unpin + Debug + 'static, { diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 45f4492e82b6..60fde90f1914 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -9,7 +9,7 @@ repository.workspace = true exclude.workspace = true [dependencies] -reth-node-builder.workspace = true +reth-node-builder = { workspace = true, features = ["js-tracer"] } reth-cli-util.workspace = true reth-optimism-cli.workspace = true reth-provider.workspace = true @@ -44,7 +44,8 @@ optimism = [ "reth-optimism-evm/optimism", "reth-optimism-payload-builder/optimism", "reth-optimism-rpc/optimism", - "reth-provider/optimism" + "reth-provider/optimism", + "reth-optimism-primitives/op", ] dev = [ diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 6494298ba393..82fb3c241954 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -3,9 +3,9 @@ #![cfg(feature = "optimism")] use clap::Parser; -use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher}; +use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher, Node}; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; -use reth_optimism_node::{args::RollupArgs, node::OpAddOns, OpNode}; +use reth_optimism_node::{args::RollupArgs, OpNode}; use reth_provider::providers::BlockchainProvider2; use tracing as _; @@ -27,7 +27,6 @@ fn main() { tracing::warn!(target: "reth::cli", "Experimental engine is default now, and the --engine.experimental flag is deprecated. To enable the legacy functionality, use --engine.legacy."); } let use_legacy_engine = rollup_args.legacy; - let sequencer_http_arg = rollup_args.sequencer_http.clone(); match use_legacy_engine { false => { let engine_tree_config = TreeConfig::default() @@ -35,8 +34,8 @@ fn main() { .with_memory_block_buffer_target(rollup_args.memory_block_buffer_target); let handle = builder .with_types_and_provider::>() - .with_components(OpNode::components(rollup_args)) - .with_add_ons(OpAddOns::new(sequencer_http_arg)) + .with_components(OpNode::components(rollup_args.clone())) + .with_add_ons(OpNode::new(rollup_args).add_ons()) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( builder.task_executor().clone(), diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index 4e573ce29946..5ccf26607094 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -46,15 +46,17 @@ op-alloy-rpc-types.workspace = true [features] default = ["std"] std = [ - "alloy-chains/std", - "alloy-genesis/std", - "alloy-primitives/std", - "alloy-eips/std", - "op-alloy-rpc-types/std", - "reth-chainspec/std", - "reth-ethereum-forks/std", - "reth-primitives-traits/std", - "reth-optimism-forks/std", - "alloy-consensus/std", - "once_cell/std", + "alloy-chains/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-eips/std", + "op-alloy-rpc-types/std", + "reth-chainspec/std", + "reth-ethereum-forks/std", + "reth-primitives-traits/std", + "reth-optimism-forks/std", + "alloy-consensus/std", + "once_cell/std", + "derive_more/std", + "reth-network-peers/std" ] diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index d552d08f18ca..a3dab80705e5 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -186,14 +186,20 @@ pub struct OpChainSpec { impl OpChainSpec { /// Read from parent to determine the base fee for the next block + /// + /// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) pub fn next_block_base_fee( &self, parent: &Header, timestamp: u64, ) -> Result { - let is_holocene_activated = self - .inner - .is_fork_active_at_timestamp(reth_optimism_forks::OpHardfork::Holocene, timestamp); + // > if Holocene is active in parent_header.timestamp, then the parameters from + // > parent_header.extraData are used. + let is_holocene_activated = self.inner.is_fork_active_at_timestamp( + reth_optimism_forks::OpHardfork::Holocene, + parent.timestamp, + ); + // If we are in the Holocene, we need to use the base fee params // from the parent block's extra data. // Else, use the base fee params (default values) from chainspec @@ -253,6 +259,8 @@ pub fn decode_holocene_1559_params(extra_data: Bytes) -> Result<(u32, u32), Deco } impl EthChainSpec for OpChainSpec { + type Header = Header; + fn chain(&self) -> alloy_chains::Chain { self.inner.chain() } @@ -281,7 +289,7 @@ impl EthChainSpec for OpChainSpec { Box::new(ChainSpec::display_hardforks(self)) } - fn genesis_header(&self) -> &Header { + fn genesis_header(&self) -> &Self::Header { self.inner.genesis_header() } diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index b61a4628f4d2..48ea2d07decc 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -95,6 +95,7 @@ optimism = [ "reth-execution-types/optimism", "reth-db/optimism", "reth-db-api/optimism", + "reth-optimism-primitives/op", "reth-downloaders/optimism" ] asm-keccak = [ diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 0dffceaddca9..30f16e4eb228 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -22,6 +22,7 @@ reth-trie-common.workspace = true # op-reth reth-optimism-forks.workspace = true reth-optimism-chainspec.workspace = true +reth-optimism-primitives.workspace = true # ethereum alloy-primitives.workspace = true diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 69d943785821..b50efd5f6f26 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -12,7 +12,9 @@ use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; -use reth_consensus::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; +use reth_consensus::{ + Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput, +}; use reth_consensus_common::validation::{ validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, validate_against_parent_timestamp, @@ -21,6 +23,7 @@ use reth_consensus_common::validation::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OpHardforks; +use reth_optimism_primitives::OpPrimitives; use reth_primitives::{BlockBody, BlockWithSenders, GotExpected, SealedBlock, SealedHeader}; use std::{sync::Arc, time::SystemTime}; @@ -46,6 +49,16 @@ impl OpBeaconConsensus { } } +impl FullConsensus for OpBeaconConsensus { + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts) + } +} + impl Consensus for OpBeaconConsensus { fn validate_body_against_header( &self, @@ -80,14 +93,6 @@ impl Consensus for OpBeaconConsensus { Ok(()) } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts) - } } impl HeaderValidator for OpBeaconConsensus { @@ -101,17 +106,21 @@ impl HeaderValidator for OpBeaconConsensus { header: &SealedHeader, parent: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_against_parent_hash_number(header, parent)?; + validate_against_parent_hash_number(header.header(), parent)?; if self.chain_spec.is_bedrock_active_at_block(header.number) { - validate_against_parent_timestamp(header, parent)?; + validate_against_parent_timestamp(header.header(), parent.header())?; } - validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?; + validate_against_parent_eip1559_base_fee( + header.header(), + parent.header(), + &self.chain_spec, + )?; // ensure that the blob gas fields for this block if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_against_parent_4844(header, parent)?; + validate_against_parent_4844(header.header(), parent.header())?; } Ok(()) diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 807f224ca4b8..309ddc1cb4e1 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -21,6 +21,7 @@ reth-execution-errors.workspace = true reth-execution-types.workspace = true reth-prune-types.workspace = true reth-consensus.workspace = true +reth-consensus-common.workspace = true # ethereum alloy-eips.workspace = true @@ -32,6 +33,7 @@ alloy-consensus.workspace = true reth-optimism-consensus.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-forks.workspace = true +reth-optimism-primitives.workspace = true # revm revm.workspace = true @@ -61,12 +63,17 @@ std = [ "alloy-genesis/std", "alloy-primitives/std", "revm-primitives/std", - "revm/std" + "revm/std", + "reth-optimism-primitives/std", + "reth-ethereum-forks/std", + "derive_more/std", + "reth-optimism-forks/std" ] optimism = [ "reth-primitives/optimism", "reth-execution-types/optimism", "reth-optimism-consensus/optimism", "revm/optimism", - "revm-primitives/optimism" + "revm-primitives/optimism", + "reth-optimism-primitives/op", ] diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 1c93d2b71d03..205c85160dcd 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -10,8 +10,9 @@ use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_evm::{ execute::{ - BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, - BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, ProviderError, + balance_increment_state, BasicBlockExecutorProvider, BlockExecutionError, + BlockExecutionStrategy, BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, + ProviderError, }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, @@ -20,7 +21,8 @@ use reth_evm::{ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OpHardfork; -use reth_primitives::{BlockWithSenders, Receipt, TxType}; +use reth_optimism_primitives::OpPrimitives; +use reth_primitives::{BlockWithSenders, Receipt, TransactionSigned, TxType}; use reth_revm::{Database, State}; use revm_primitives::{db::DatabaseCommit, EnvWithHandlerCfg, ResultAndState, U256}; use tracing::trace; @@ -50,9 +52,14 @@ impl OpExecutionStrategyFactory { impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory where - EvmConfig: - Clone + Unpin + Sync + Send + 'static + ConfigureEvm
, + EvmConfig: Clone + + Unpin + + Sync + + Send + + 'static + + ConfigureEvm
, { + type Primitives = OpPrimitives; type Strategy + Display>> = OpExecutionStrategy; @@ -109,11 +116,13 @@ where } } -impl BlockExecutionStrategy for OpExecutionStrategy +impl BlockExecutionStrategy for OpExecutionStrategy where DB: Database + Display>, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, { + type DB = DB; + type Primitives = OpPrimitives; type Error = BlockExecutionError; fn init(&mut self, tx_env_overrides: Box) { @@ -154,7 +163,7 @@ where &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result { + ) -> Result, Self::Error> { let env = self.evm_env_for_block(&block.header, total_difficulty); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); @@ -217,7 +226,7 @@ where ?transaction, "Executed transaction" ); - self.system_caller.on_state(&result_and_state); + self.system_caller.on_state(&result_and_state.state); let ResultAndState { result, state } = result_and_state; evm.db_mut().commit(state); @@ -253,11 +262,14 @@ where _receipts: &[Receipt], ) -> Result { let balance_increments = - post_block_balance_increments(&self.chain_spec.clone(), block, total_difficulty); + post_block_balance_increments(&self.chain_spec.clone(), &block.block, total_difficulty); // increment balances self.state - .increment_balances(balance_increments) + .increment_balances(balance_increments.clone()) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + // call state hook with changes due to balance increments. + let balance_state = balance_increment_state(&balance_increments, &mut self.state)?; + self.system_caller.on_state(&balance_state); Ok(Requests::default()) } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 176864de6dc5..7424379f5ae7 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -57,6 +57,7 @@ impl OpEvmConfig { impl ConfigureEvmEnv for OpEvmConfig { type Header = Header; + type Transaction = TransactionSigned; type Error = DecodeError; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { diff --git a/crates/optimism/hardforks/Cargo.toml b/crates/optimism/hardforks/Cargo.toml index c30566a54eb4..1ea23069a685 100644 --- a/crates/optimism/hardforks/Cargo.toml +++ b/crates/optimism/hardforks/Cargo.toml @@ -30,10 +30,13 @@ default = ["std"] std = [ "alloy-primitives/std", "once_cell/std", - "serde?/std" + "serde?/std", + "alloy-chains/std", + "reth-ethereum-forks/std" ] serde = [ "dep:serde", "alloy-chains/serde", - "alloy-primitives/serde" + "alloy-primitives/serde", + "reth-ethereum-forks/serde" ] diff --git a/crates/optimism/hardforks/src/dev.rs b/crates/optimism/hardforks/src/dev.rs index 5fe77a314029..6dcd28c46c9e 100644 --- a/crates/optimism/hardforks/src/dev.rs +++ b/crates/optimism/hardforks/src/dev.rs @@ -1,3 +1,4 @@ +use alloc::vec; use alloy_primitives::U256; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition}; diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index 3915bcf6cbda..bf6ca98ce4e9 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -6,6 +6,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 5f100f0a28d1..b0b7065f3363 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -41,7 +41,7 @@ reth-optimism-rpc.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-forks.workspace = true -reth-optimism-primitives.workspace = true +reth-optimism-primitives = { workspace = true, features = ["serde"] } # revm with required optimism features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -96,7 +96,8 @@ optimism = [ "reth-optimism-consensus/optimism", "reth-db/optimism", "reth-optimism-node/optimism", - "reth-node-core/optimism" + "reth-node-core/optimism", + "reth-optimism-primitives/op", ] asm-keccak = [ "reth-primitives/asm-keccak", @@ -105,6 +106,9 @@ asm-keccak = [ "reth-optimism-node/asm-keccak", "reth-node-core/asm-keccak" ] +js-tracer = [ + "reth-node-builder/js-tracer" +] test-utils = [ "reth-tasks", "reth-e2e-test-utils", diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 57b76b904bd3..063ac3617af5 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -229,6 +229,8 @@ mod test { suggested_fee_recipient: Address::ZERO, withdrawals: Some(vec![]), parent_beacon_block_root: Some(B256::ZERO), + target_blobs_per_block: None, + max_blobs_per_block: None, }, } } diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 7af0f3b8a722..81db8b2b7fcf 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -1,4 +1,7 @@ //! Standalone crate for Optimism-specific Reth configuration and builder types. +//! +//! # features +//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index d6cd47cf2af1..b2203331ddfa 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -13,7 +13,7 @@ use reth_db::transaction::{DbTx, DbTxMut}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, PayloadBuilder, + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, PayloadBuilder, TxTy, }; use reth_node_builder::{ components::{ @@ -31,10 +31,10 @@ use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_primitives::OpPrimitives; use reth_optimism_rpc::{ witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, - OpEthApi, + OpEthApi, SequencerClient, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::BlockBody; +use reth_primitives::{BlockBody, TransactionSigned}; use reth_provider::{ providers::ChainStorage, BlockBodyReader, BlockBodyWriter, CanonStateSubscriptions, ChainSpecProvider, DBProvider, EthStorage, ProviderResult, ReadBodyInput, @@ -42,7 +42,7 @@ use reth_provider::{ use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ - blobstore::DiskFileBlobStore, CoinbaseTipOrdering, TransactionPool, + blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; @@ -178,12 +178,11 @@ where OpAddOns>::Components>>; fn components_builder(&self) -> Self::ComponentsBuilder { - let Self { args } = self; - Self::components(args.clone()) + Self::components(self.args.clone()) } fn add_ons(&self) -> Self::AddOns { - OpAddOns::new(self.args.sequencer_http.clone()) + Self::AddOns::builder().with_sequencer(self.args.sequencer_http.clone()).build() } } @@ -204,14 +203,14 @@ pub struct OpAddOns(pub RpcAddOns, OpEngin impl>> Default for OpAddOns { fn default() -> Self { - Self::new(None) + Self::builder().build() } } impl>> OpAddOns { - /// Create a new instance with the given `sequencer_http` URL. - pub fn new(sequencer_http: Option) -> Self { - Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http), Default::default())) + /// Build a [`OpAddOns`] using [`OpAddOnsBuilder`]. + pub fn builder() -> OpAddOnsBuilder { + OpAddOnsBuilder::default() } } @@ -270,6 +269,38 @@ where } } +/// A regular optimism evm and executor builder. +#[derive(Debug, Default, Clone)] +#[non_exhaustive] +pub struct OpAddOnsBuilder { + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: Option, +} + +impl OpAddOnsBuilder { + /// With a [`SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_client: Option) -> Self { + self.sequencer_client = sequencer_client.map(SequencerClient::new); + self + } +} + +impl OpAddOnsBuilder { + /// Builds an instance of [`OpAddOns`]. + pub fn build(self) -> OpAddOns + where + N: FullNodeComponents>, + { + let Self { sequencer_client, .. } = self; + + OpAddOns(RpcAddOns::new( + move |ctx| OpEthApi::::builder().with_sequencer(sequencer_client).build(ctx), + Default::default(), + )) + } +} + /// A regular optimism evm and executor builder. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -277,7 +308,7 @@ pub struct OpExecutorBuilder; impl ExecutorBuilder for OpExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type EVM = OpEvmConfig; type Executor = BasicBlockExecutorProvider; @@ -434,8 +465,10 @@ where Primitives = OpPrimitives, >, >, - Pool: TransactionPool + Unpin + 'static, - Evm: ConfigureEvm
, + Pool: TransactionPool>> + + Unpin + + 'static, + Evm: ConfigureEvm
, { let payload_builder = reth_optimism_payload_builder::OpPayloadBuilder::new(evm_config) .with_transactions(self.best_transactions) @@ -474,7 +507,9 @@ where Primitives = OpPrimitives, >, >, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, Txs: OpPayloadTransactions, { async fn spawn_payload_service( @@ -546,7 +581,9 @@ impl OpNetworkBuilder { impl NetworkBuilder for OpNetworkBuilder where Node: FullNodeTypes>, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, { async fn build_network( self, @@ -569,9 +606,9 @@ pub struct OpConsensusBuilder; impl ConsensusBuilder for OpConsensusBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { - type Consensus = Arc; + type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 6db5d69568b6..a3e474a60760 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -70,7 +70,7 @@ impl OpTransactionValidator { impl OpTransactionValidator where Client: StateProviderFactory + BlockReaderIdExt, - Tx: EthPoolTransaction, + Tx: EthPoolTransaction, { /// Create a new [`OpTransactionValidator`]. pub fn new(inner: EthTransactionValidator) -> Self { @@ -142,7 +142,7 @@ where let l1_block_info = self.block_info.l1_block_info.read().clone(); let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); - let tx: TransactionSigned = valid_tx.transaction().clone().into_consensus().into(); + let tx = valid_tx.transaction().clone_into_consensus(); tx.encode_2718(&mut encoded); let cost_addition = match l1_block_info.l1_tx_data_fee( @@ -196,7 +196,7 @@ where impl TransactionValidator for OpTransactionValidator where Client: StateProviderFactory + BlockReaderIdExt, - Tx: EthPoolTransaction, + Tx: EthPoolTransaction, { type Transaction = Tx; @@ -237,7 +237,7 @@ mod tests { use alloy_primitives::{PrimitiveSignature as Signature, TxKind, U256}; use op_alloy_consensus::TxDeposit; use reth_chainspec::MAINNET; - use reth_primitives::{Transaction, TransactionSigned, TransactionSignedEcRecovered}; + use reth_primitives::{RecoveredTx, Transaction, TransactionSigned}; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, @@ -266,8 +266,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(deposit_tx, signature); - let signed_recovered = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, signer); + let signed_recovered = RecoveredTx::from_signed_transaction(signed_tx, signer); let len = signed_recovered.encode_2718_len(); let pooled_tx = EthPooledTransaction::new(signed_recovered, len); let outcome = validator.validate_one(origin, pooled_tx); diff --git a/crates/optimism/node/src/utils.rs b/crates/optimism/node/src/utils.rs index e70e35031982..147aaac59dcc 100644 --- a/crates/optimism/node/src/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -1,10 +1,8 @@ -use crate::{node::OpAddOns, OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes}; +use crate::{OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes}; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; use alloy_rpc_types_engine::PayloadAttributes; -use reth_e2e_test_utils::{ - transaction::TransactionTestContext, wallet::Wallet, Adapter, NodeHelperType, -}; +use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_tasks::TaskManager; @@ -12,7 +10,7 @@ use std::sync::Arc; use tokio::sync::Mutex; /// Optimism Node Helper type -pub(crate) type OpNode = NodeHelperType>>; +pub(crate) type OpNode = NodeHelperType; /// Creates the initial setup with `num_nodes` of the node config, started and connected. pub async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { @@ -57,6 +55,8 @@ pub fn optimism_payload_attributes(timestamp: u64) -> OpPayloadBuilderAttributes suggested_fee_recipient: Address::ZERO, withdrawals: Some(vec![]), parent_beacon_block_root: Some(B256::ZERO), + target_blobs_per_block: None, + max_blobs_per_block: None, }; OpPayloadBuilderAttributes { diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index 67cac17d3987..875b282e0ad0 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -2,20 +2,21 @@ use reth_db::test_utils::create_test_rw_db; use reth_node_api::FullNodeComponents; -use reth_node_builder::{NodeBuilder, NodeConfig}; +use reth_node_builder::{Node, NodeBuilder, NodeConfig}; use reth_optimism_chainspec::BASE_MAINNET; -use reth_optimism_node::{node::OpAddOns, OpNode}; +use reth_optimism_node::{args::RollupArgs, OpNode}; #[test] fn test_basic_setup() { // parse CLI -> config let config = NodeConfig::new(BASE_MAINNET.clone()); let db = create_test_rw_db(); + let args = RollupArgs::default(); let _builder = NodeBuilder::new(config) .with_database(db) .with_types::() - .with_components(OpNode::components(Default::default())) - .with_add_ons(OpAddOns::new(None)) + .with_components(OpNode::components(args.clone())) + .with_add_ons(OpNode::new(args).add_ons()) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); Ok(()) diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index 35be3dfd3ee1..1b49ed684bfc 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -27,10 +27,10 @@ use reth_optimism_node::{ use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_primitives::OpPrimitives; use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}; -use reth_primitives::{SealedBlock, Transaction, TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, SealedBlock, Transaction, TransactionSigned}; use reth_provider::providers::BlockchainProvider2; use reth_tasks::TaskManager; -use reth_transaction_pool::pool::BestPayloadTransactions; +use reth_transaction_pool::{pool::BestPayloadTransactions, PoolTransaction}; use std::sync::Arc; use tokio::sync::Mutex; @@ -44,9 +44,11 @@ impl OpPayloadTransactions for CustomTxPriority { &self, pool: Pool, attr: reth_transaction_pool::BestTransactionsAttributes, - ) -> impl PayloadTransactions + ) -> impl PayloadTransactions where - Pool: reth_transaction_pool::TransactionPool, + Pool: reth_transaction_pool::TransactionPool< + Transaction: PoolTransaction, + >, { // Block composition: // 1. Best transactions from the pool (up to 250k gas) @@ -64,7 +66,7 @@ impl OpPayloadTransactions for CustomTxPriority { ..Default::default() }; let signature = sender.sign_transaction_sync(&mut end_of_block_tx).unwrap(); - let end_of_block_tx = TransactionSignedEcRecovered::from_signed_transaction( + let end_of_block_tx = RecoveredTx::from_signed_transaction( TransactionSigned::new_unhashed(Transaction::Eip1559(end_of_block_tx), signature), sender.address(), ); diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 7f47da7e2360..1c4f855b6aa2 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -26,7 +26,6 @@ reth-payload-builder-primitives.workspace = true reth-payload-util.workspace = true reth-payload-primitives = { workspace = true, features = ["op"] } reth-basic-payload-builder.workspace = true -reth-trie.workspace = true reth-chain-state.workspace = true # op-reth diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index fbf99c78d9e7..f370ed496f04 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -19,14 +19,17 @@ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_payload_util::PayloadTransactions; use reth_primitives::{ - proofs, Block, BlockBody, BlockExt, Receipt, SealedHeader, TransactionSigned, TxType, + proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, BlockExt, Receipt, + SealedHeader, TransactionSigned, TxType, +}; +use reth_provider::{ + HashedPostStateProvider, ProviderError, StateProofProvider, StateProviderFactory, + StateRootProvider, }; -use reth_provider::{ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, + noop::NoopTransactionPool, BestTransactionsAttributes, PoolTransaction, TransactionPool, }; -use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, primitives::{ @@ -95,7 +98,7 @@ impl OpPayloadBuilder { } impl OpPayloadBuilder where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Txs: OpPayloadTransactions, { /// Constructs an Optimism payload from the transactions sent via the @@ -112,7 +115,7 @@ where ) -> Result, PayloadBuilderError> where Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, { let (initialized_cfg, initialized_block_env) = self .cfg_and_block_env(&args.config.attributes, &args.config.parent_header) @@ -152,7 +155,7 @@ where impl OpPayloadBuilder where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, { /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload /// (that has the `parent` as its parent). @@ -213,8 +216,8 @@ where impl PayloadBuilder for OpPayloadBuilder where Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, - EvmConfig: ConfigureEvm
, + Pool: TransactionPool>, + EvmConfig: ConfigureEvm
, Txs: OpPayloadTransactions, { type Attributes = OpPayloadBuilderAttributes; @@ -281,7 +284,7 @@ pub struct OpBuilder { impl OpBuilder where - Pool: TransactionPool, + Pool: TransactionPool>, Txs: OpPayloadTransactions, { /// Executes the payload and returns the outcome. @@ -291,7 +294,7 @@ where ctx: &OpPayloadBuilderCtx, ) -> Result, PayloadBuilderError> where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, DB: Database, { let Self { pool, best } = self; @@ -336,9 +339,9 @@ where ctx: OpPayloadBuilderCtx, ) -> Result, PayloadBuilderError> where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, DB: Database + AsRef

, - P: StateRootProvider, + P: StateRootProvider + HashedPostStateProvider, { let ExecutedPayload { info, withdrawals_root } = match self.execute(&mut state, &ctx)? { BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, @@ -366,17 +369,16 @@ where execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); // // calculate the state root - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + let state_provider = state.database.as_ref(); + let hashed_state = state_provider.hashed_post_state(execution_outcome.state()); let (state_root, trie_output) = { - state.database.as_ref().state_root_with_updates(hashed_state.clone()).inspect_err( - |err| { - warn!(target: "payload_builder", - parent_header=%ctx.parent().hash(), - %err, - "failed to calculate state root for payload" - ); - }, - )? + state_provider.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + warn!(target: "payload_builder", + parent_header=%ctx.parent().hash(), + %err, + "failed to calculate state root for payload" + ); + })? }; // create the block header @@ -410,6 +412,7 @@ where blob_gas_used, excess_blob_gas, requests_hash: None, + target_blobs_per_block: None, }; // seal the block @@ -462,7 +465,7 @@ where ctx: &OpPayloadBuilderCtx, ) -> Result where - EvmConfig: ConfigureEvm

, + EvmConfig: ConfigureEvm
, DB: Database + AsRef

, P: StateProofProvider, { @@ -478,19 +481,23 @@ where pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { /// Returns an iterator that yields the transaction in the order they should get included in the /// new payload. - fn best_transactions( + fn best_transactions< + Pool: TransactionPool>, + >( &self, pool: Pool, attr: BestTransactionsAttributes, - ) -> impl PayloadTransactions; + ) -> impl PayloadTransactions; } impl OpPayloadTransactions for () { - fn best_transactions( + fn best_transactions< + Pool: TransactionPool>, + >( &self, pool: Pool, attr: BestTransactionsAttributes, - ) -> impl PayloadTransactions { + ) -> impl PayloadTransactions { BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)) } } @@ -693,7 +700,7 @@ impl OpPayloadBuilderCtx { impl OpPayloadBuilderCtx where - EvmConfig: ConfigureEvm

, + EvmConfig: ConfigureEvm
, { /// apply eip-4788 pre block contract call pub fn apply_pre_beacon_root_contract_call( @@ -748,7 +755,7 @@ where )) } - // Convert the transaction to a [TransactionSignedEcRecovered]. This is + // Convert the transaction to a [RecoveredTx]. This is // purely for the purposes of utilizing the `evm_config.tx_env`` function. // Deposit transactions do not have signatures, so if the tx is a deposit, this // will just pull in its `from` address. @@ -829,11 +836,10 @@ where &self, info: &mut ExecutionInfo, db: &mut State, - mut best_txs: impl PayloadTransactions, + mut best_txs: impl PayloadTransactions, ) -> Result, PayloadBuilderError> where DB: Database, - Pool: TransactionPool, { let block_gas_limit = self.block_gas_limit(); let base_fee = self.base_fee(); diff --git a/crates/optimism/payload/src/config.rs b/crates/optimism/payload/src/config.rs new file mode 100644 index 000000000000..5055c05c42ea --- /dev/null +++ b/crates/optimism/payload/src/config.rs @@ -0,0 +1,87 @@ +//! Additional configuration for the OP builder + +use std::sync::{atomic::AtomicU64, Arc}; + +/// Contains the Data Availability configuration for the OP builder. +#[derive(Debug, Clone, Default)] +pub struct OpDAConfig { + inner: Arc, +} + +impl OpDAConfig { + /// Creates a new Data Availability configuration with the given maximum sizes. + pub fn new(max_da_tx_size: u64, max_da_block_size: u64) -> Self { + let this = Self::default(); + this.set_max_da_size(max_da_tx_size, max_da_block_size); + this + } + + /// Returns the max allowed data availability size per transactions, if any. + pub fn max_da_tx_size(&self) -> Option { + let val = self.inner.max_da_tx_size.load(std::sync::atomic::Ordering::Relaxed); + if val == 0 { + None + } else { + Some(val) + } + } + + /// Returns the max allowed data availability size per block, if any. + pub fn max_da_block_size(&self) -> Option { + let val = self.inner.max_da_block_size.load(std::sync::atomic::Ordering::Relaxed); + if val == 0 { + None + } else { + Some(val) + } + } + + /// Sets the maximum data availability size currently allowed for inclusion. 0 means no maximum. + pub fn set_max_da_size(&self, max_da_tx_size: u64, max_da_block_size: u64) { + self.set_max_tx_size(max_da_tx_size); + self.set_max_block_size(max_da_block_size); + } + + /// Sets the maximum data availability size per transaction currently allowed for inclusion. 0 + /// means no maximum. + pub fn set_max_tx_size(&self, max_da_tx_size: u64) { + self.inner.max_da_tx_size.store(max_da_tx_size, std::sync::atomic::Ordering::Relaxed); + } + + /// Sets the maximum data availability size per block currently allowed for inclusion. 0 means + /// no maximum. + pub fn set_max_block_size(&self, max_da_block_size: u64) { + self.inner.max_da_block_size.store(max_da_block_size, std::sync::atomic::Ordering::Relaxed); + } +} + +#[derive(Debug, Default)] +struct OpDAConfigInner { + /// Don't include any transactions with data availability size larger than this in any built + /// block + /// + /// 0 means no limit. + max_da_tx_size: AtomicU64, + /// Maximum total data availability size for a block + /// + /// 0 means no limit. + max_da_block_size: AtomicU64, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_da() { + let da = OpDAConfig::default(); + assert_eq!(da.max_da_tx_size(), None); + assert_eq!(da.max_da_block_size(), None); + da.set_max_da_size(100, 200); + assert_eq!(da.max_da_tx_size(), Some(100)); + assert_eq!(da.max_da_block_size(), Some(200)); + da.set_max_da_size(0, 0); + assert_eq!(da.max_da_tx_size(), None); + assert_eq!(da.max_da_block_size(), None); + } +} diff --git a/crates/optimism/payload/src/error.rs b/crates/optimism/payload/src/error.rs index 8a254e9835c2..6b2a85e7a97b 100644 --- a/crates/optimism/payload/src/error.rs +++ b/crates/optimism/payload/src/error.rs @@ -4,8 +4,8 @@ #[derive(Debug, thiserror::Error)] pub enum OpPayloadBuilderError { /// Thrown when a transaction fails to convert to a - /// [`reth_primitives::TransactionSignedEcRecovered`]. - #[error("failed to convert deposit transaction to TransactionSignedEcRecovered")] + /// [`reth_primitives::RecoveredTx`]. + #[error("failed to convert deposit transaction to RecoveredTx")] TransactionEcRecoverFailed, /// Thrown when the L1 block info could not be parsed from the calldata of the /// first transaction supplied in the payload attributes. diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index 8447026d783a..53fad1118fd0 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -16,3 +16,5 @@ pub use builder::OpPayloadBuilder; pub mod error; pub mod payload; pub use payload::{OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes}; + +pub mod config; diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 1a951abadcae..e243745cea68 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -370,7 +370,9 @@ mod tests { prev_randao: b256!("9158595abbdab2c90635087619aa7042bbebe47642dfab3c9bfb934f6b082765"), suggested_fee_recipient: address!("4200000000000000000000000000000000000011"), withdrawals: Some([].into()), - parent_beacon_block_root: b256!("8fe0193b9bf83cb7e5a08538e494fecc23046aab9a497af3704f4afdae3250ff").into() + parent_beacon_block_root: b256!("8fe0193b9bf83cb7e5a08538e494fecc23046aab9a497af3704f4afdae3250ff").into(), + target_blobs_per_block: None, + max_blobs_per_block: None, }, transactions: Some([bytes!("7ef8f8a0dc19cfa777d90980e4875d0a548a881baaa3f83f14d1bc0d3038bc329350e54194deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000f424000000000000000000000000300000000670d6d890000000000000125000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000014bf9181db6e381d4384bbf69c48b0ee0eed23c6ca26143c6d2544f9d39997a590000000000000000000000007f83d659683caf2767fd3c720981d51f5bc365bc")].into()), no_tx_pool: None, diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index abd27300fa59..38f76aa6256a 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -14,48 +14,66 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-primitives-traits.workspace = true -reth-codecs = { workspace = true, optional = true, features = ["optimism"] } +reth-primitives-traits = { workspace = true, features = ["op"] } +reth-codecs = { workspace = true, optional = true, features = ["op"] } # ethereum alloy-primitives.workspace = true alloy-consensus.workspace = true -alloy-eips.workspace = true alloy-rlp.workspace = true +alloy-eips.workspace = true +revm-primitives.workspace = true +secp256k1 = { workspace = true, optional = true } # op op-alloy-consensus.workspace = true # codec -bytes.workspace = true +bytes = { workspace = true, optional = true } serde = { workspace = true, optional = true } # misc -derive_more.workspace = true +derive_more = { workspace = true, features = ["deref", "from", "into", "constructor"] } +rand = { workspace = true, optional = true } # test arbitrary = { workspace = true, features = ["derive"], optional = true } +proptest = { workspace = true, optional = true } [dev-dependencies] -reth-codecs = { workspace = true, features = ["test-utils"] } +proptest-arbitrary-interop.workspace = true +reth-codecs = { workspace = true, features = ["test-utils", "op"] } rstest.workspace = true arbitrary.workspace = true +proptest.workspace = true [features] -default = ["std", "reth-codec"] +default = ["std"] std = [ - "reth-primitives-traits/std", - "reth-primitives/std", - "reth-codecs/std", - "alloy-consensus/std", - "alloy-eips/std", - "alloy-primitives/std", - "serde/std", + "reth-primitives-traits/std", + "reth-primitives/std", + "reth-codecs?/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "serde?/std", + "bytes?/std", + "derive_more/std", + "revm-primitives/std", + "secp256k1?/std", + "alloy-rlp/std", ] reth-codec = [ "dep:reth-codecs", + "std", + "rand", + "dep:proptest", + "dep:arbitrary", "reth-primitives/reth-codec", "reth-primitives-traits/reth-codec", + "reth-codecs?/op", + "reth-primitives/reth-codec", + "dep:bytes", ] serde = [ "dep:serde", @@ -63,12 +81,16 @@ serde = [ "alloy-primitives/serde", "alloy-consensus/serde", "alloy-eips/serde", - "bytes/serde", + "bytes?/serde", "reth-codecs?/serde", "op-alloy-consensus/serde", + "rand?/serde", + "revm-primitives/serde", + "secp256k1?/serde", ] arbitrary = [ "dep:arbitrary", + "dep:secp256k1", "reth-primitives-traits/arbitrary", "reth-primitives/arbitrary", "reth-codecs?/arbitrary", @@ -76,4 +98,9 @@ arbitrary = [ "alloy-consensus/arbitrary", "alloy-eips/arbitrary", "alloy-primitives/arbitrary", + "revm-primitives/arbitrary", + "rand", +] +op = [ + "revm-primitives/optimism", ] diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index 204b34d33782..3a345abe20ab 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -85,6 +85,7 @@ pub const BEDROCK_HEADER: Header = Header { excess_blob_gas: None, parent_beacon_block_root: None, requests_hash: None, + target_blobs_per_block: None, }; /// Bedrock total difficulty on Optimism Mainnet. diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 796f5cb06138..df5042110217 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -6,17 +6,20 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "op")] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub mod bedrock; pub mod transaction; -use reth_primitives::EthPrimitives; -pub use transaction::{tx_type::OpTxType, OpTransaction}; +pub use transaction::{signed::OpTransactionSigned, tx_type::OpTxType, OpTransaction}; /// Optimism primitive types. -pub type OpPrimitives = EthPrimitives; +pub type OpPrimitives = reth_primitives::EthPrimitives; // TODO: once we are ready for separating primitive types, introduce a separate `NodePrimitives` // implementation used exclusively by legacy engine. diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs index 070b3d984e0b..86ac822c744c 100644 --- a/crates/optimism/primitives/src/transaction/mod.rs +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -1,14 +1,21 @@ //! Wrapper of [`OpTypedTransaction`], that implements reth database encoding [`Compact`]. +pub mod signed; pub mod tx_type; use alloy_primitives::{bytes, Bytes, TxKind, Uint, B256}; -use alloy_consensus::{constants::EIP7702_TX_TYPE_ID, TxLegacy}; +#[cfg(any(test, feature = "reth-codec"))] +use alloy_consensus::constants::EIP7702_TX_TYPE_ID; +use alloy_consensus::{SignableTransaction, TxLegacy}; use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; -use derive_more::{Deref, From}; -use op_alloy_consensus::{OpTypedTransaction, DEPOSIT_TX_TYPE_ID}; +use derive_more::{Constructor, Deref, From}; +use op_alloy_consensus::OpTypedTransaction; +#[cfg(any(test, feature = "reth-codec"))] +use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; +#[cfg(any(test, feature = "reth-codec"))] use reth_codecs::Compact; +#[cfg(any(test, feature = "reth-codec"))] use reth_primitives::transaction::{ COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY, @@ -17,16 +24,31 @@ use reth_primitives_traits::InMemorySize; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[derive(Debug, Clone, PartialEq, Eq, Deref, Hash, From)] +#[derive(Debug, Clone, PartialEq, Eq, Deref, Hash, From, Constructor)] /// Optimistic transaction. pub struct OpTransaction(OpTypedTransaction); +impl OpTransaction { + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + match self.deref() { + OpTypedTransaction::Legacy(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Eip2930(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Eip1559(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Eip7702(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Deposit(_) => {} + } + } +} + impl Default for OpTransaction { fn default() -> Self { Self(OpTypedTransaction::Legacy(TxLegacy::default())) } } +#[cfg(any(test, feature = "reth-codec"))] impl Compact for OpTransaction { fn to_compact(&self, out: &mut B) -> usize where @@ -123,6 +145,10 @@ impl alloy_consensus::Transaction for OpTransaction { self.0.kind() } + fn is_create(&self) -> bool { + self.0.is_create() + } + fn value(&self) -> Uint<256, 4> { self.0.value() } diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs new file mode 100644 index 000000000000..2dc72026e7cb --- /dev/null +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -0,0 +1,479 @@ +//! A signed Optimism transaction. + +use alloc::vec::Vec; +use core::{ + hash::{Hash, Hasher}, + mem, +}; +#[cfg(feature = "std")] +use std::sync::OnceLock; + +use alloy_consensus::{ + transaction::RlpEcdsaTx, SignableTransaction, Transaction, TxEip1559, TxEip2930, TxEip7702, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip2930::AccessList, + eip7702::SignedAuthorization, +}; +use alloy_primitives::{ + keccak256, Address, Bytes, PrimitiveSignature as Signature, TxHash, TxKind, Uint, B256, U256, +}; +use alloy_rlp::Header; +use derive_more::{AsRef, Deref}; +#[cfg(not(feature = "std"))] +use once_cell::sync::OnceCell as OnceLock; +use op_alloy_consensus::{OpTypedTransaction, TxDeposit}; +#[cfg(any(test, feature = "reth-codec"))] +use proptest as _; +use reth_primitives::{ + transaction::{recover_signer, recover_signer_unchecked}, + TransactionSigned, +}; +use reth_primitives_traits::{FillTxEnv, InMemorySize, SignedTransaction}; +use revm_primitives::{AuthorizationList, OptimismFields, TxEnv}; + +use crate::{OpTransaction, OpTxType}; + +/// Signed transaction. +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, Eq, AsRef, Deref)] +pub struct OpTransactionSigned { + /// Transaction hash + #[serde(skip)] + pub hash: OnceLock, + /// The transaction signature values + pub signature: Signature, + /// Raw transaction info + #[deref] + #[as_ref] + pub transaction: OpTransaction, +} + +impl OpTransactionSigned { + /// Calculates hash of given transaction and signature and returns new instance. + pub fn new(transaction: OpTypedTransaction, signature: Signature) -> Self { + let signed_tx = Self::new_unhashed(transaction, signature); + if !matches!(signed_tx.tx_type(), OpTxType::Deposit) { + signed_tx.hash.get_or_init(|| signed_tx.recalculate_hash()); + } + + signed_tx + } + + /// Creates a new signed transaction from the given transaction and signature without the hash. + /// + /// Note: this only calculates the hash on the first [`TransactionSigned::hash`] call. + pub fn new_unhashed(transaction: OpTypedTransaction, signature: Signature) -> Self { + Self { hash: Default::default(), signature, transaction: OpTransaction::new(transaction) } + } +} + +impl SignedTransaction for OpTransactionSigned { + type Type = OpTxType; + + fn tx_hash(&self) -> &TxHash { + self.hash.get_or_init(|| self.recalculate_hash()) + } + + fn signature(&self) -> &Signature { + &self.signature + } + + fn recover_signer(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + if let OpTypedTransaction::Deposit(TxDeposit { from, .. }) = *self.transaction { + return Some(from) + } + + let Self { transaction, signature, .. } = self; + let signature_hash = signature_hash(transaction); + recover_signer(signature, signature_hash) + } + + fn recover_signer_unchecked(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + if let OpTypedTransaction::Deposit(TxDeposit { from, .. }) = *self.transaction { + return Some(from) + } + + let Self { transaction, signature, .. } = self; + let signature_hash = signature_hash(transaction); + recover_signer_unchecked(signature, signature_hash) + } + + fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } + + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + if let OpTypedTransaction::Deposit(TxDeposit { from, .. }) = *self.transaction { + return Some(from) + } + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); + recover_signer_unchecked(&self.signature, signature_hash) + } +} + +impl FillTxEnv for OpTransactionSigned { + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { + let envelope = self.encoded_2718(); + + tx_env.caller = sender; + match self.transaction.deref() { + OpTypedTransaction::Legacy(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = tx.chain_id; + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clear(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + OpTypedTransaction::Eip2930(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + OpTypedTransaction::Eip1559(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + OpTypedTransaction::Eip7702(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to.into(); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = + Some(AuthorizationList::Signed(tx.authorization_list.clone())); + } + OpTypedTransaction::Deposit(tx) => { + tx_env.access_list.clear(); + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::ZERO; + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = None; + tx_env.nonce = None; + tx_env.authorization_list = None; + + tx_env.optimism = OptimismFields { + source_hash: Some(tx.source_hash), + mint: tx.mint, + is_system_transaction: Some(tx.is_system_transaction), + enveloped_tx: Some(envelope.into()), + }; + return + } + } + + tx_env.optimism = OptimismFields { + source_hash: None, + mint: None, + is_system_transaction: Some(false), + enveloped_tx: Some(envelope.into()), + } + } +} + +impl InMemorySize for OpTransactionSigned { + #[inline] + fn size(&self) -> usize { + mem::size_of::() + self.transaction.size() + mem::size_of::() + } +} + +impl alloy_rlp::Encodable for OpTransactionSigned { + /// See [`alloy_rlp::Encodable`] impl for [`TransactionSigned`]. + fn encode(&self, out: &mut dyn alloy_rlp::bytes::BufMut) { + self.network_encode(out); + } + + fn length(&self) -> usize { + let mut payload_length = self.encode_2718_len(); + if !self.is_legacy() { + payload_length += Header { list: false, payload_length }.length(); + } + + payload_length + } +} + +impl alloy_rlp::Decodable for OpTransactionSigned { + /// See [`alloy_rlp::Decodable`] impl for [`TransactionSigned`]. + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + Self::network_decode(buf).map_err(Into::into) + } +} + +impl Encodable2718 for OpTransactionSigned { + fn type_flag(&self) -> Option { + match self.tx_type() { + op_alloy_consensus::OpTxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } + + fn encode_2718_len(&self) -> usize { + match self.transaction.deref() { + OpTypedTransaction::Legacy(legacy_tx) => { + legacy_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip2930(access_list_tx) => { + access_list_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip7702(set_code_tx) => { + set_code_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), + } + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + let Self { transaction, signature, .. } = self; + + match transaction.deref() { + OpTypedTransaction::Legacy(legacy_tx) => { + // do nothing w/ with_header + legacy_tx.eip2718_encode(signature, out) + } + OpTypedTransaction::Eip2930(access_list_tx) => { + access_list_tx.eip2718_encode(signature, out) + } + OpTypedTransaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.eip2718_encode(signature, out) + } + OpTypedTransaction::Eip7702(set_code_tx) => set_code_tx.eip2718_encode(signature, out), + OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.encode_2718(out), + } + } +} + +impl Decodable2718 for OpTransactionSigned { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { + match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { + op_alloy_consensus::OpTxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), + op_alloy_consensus::OpTxType::Eip2930 => { + let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); + let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip2930(tx), signature); + signed_tx.hash.get_or_init(|| hash); + Ok(signed_tx) + } + op_alloy_consensus::OpTxType::Eip1559 => { + let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); + let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip1559(tx), signature); + signed_tx.hash.get_or_init(|| hash); + Ok(signed_tx) + } + op_alloy_consensus::OpTxType::Eip7702 => { + let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); + let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + signed_tx.hash.get_or_init(|| hash); + Ok(signed_tx) + } + op_alloy_consensus::OpTxType::Deposit => Ok(Self::new_unhashed( + OpTypedTransaction::Deposit(TxDeposit::rlp_decode(buf)?), + TxDeposit::signature(), + )), + } + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { + let (transaction, hash, signature) = + TransactionSigned::decode_rlp_legacy_transaction_tuple(buf)?; + let signed_tx = Self::new_unhashed(OpTypedTransaction::Legacy(transaction), signature); + signed_tx.hash.get_or_init(|| hash); + + Ok(signed_tx) + } +} + +impl Transaction for OpTransactionSigned { + fn chain_id(&self) -> Option { + self.deref().chain_id() + } + + fn nonce(&self) -> u64 { + self.deref().nonce() + } + + fn gas_limit(&self) -> u64 { + self.deref().gas_limit() + } + + fn gas_price(&self) -> Option { + self.deref().gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.deref().max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.deref().max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.deref().max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.deref().priority_fee_or_price() + } + + fn kind(&self) -> TxKind { + self.deref().kind() + } + + fn is_create(&self) -> bool { + self.deref().is_create() + } + + fn value(&self) -> Uint<256, 4> { + self.deref().value() + } + + fn input(&self) -> &Bytes { + self.deref().input() + } + + fn ty(&self) -> u8 { + self.deref().ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.deref().access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.deref().blob_versioned_hashes() + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.deref().authorization_list() + } + + fn is_dynamic_fee(&self) -> bool { + self.deref().is_dynamic_fee() + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.deref().effective_gas_price(base_fee) + } + + fn effective_tip_per_gas(&self, base_fee: u64) -> Option { + self.deref().effective_tip_per_gas(base_fee) + } +} + +impl Default for OpTransactionSigned { + fn default() -> Self { + Self { + hash: Default::default(), + signature: Signature::test_signature(), + transaction: OpTransaction::new(OpTypedTransaction::Legacy(Default::default())), + } + } +} + +impl PartialEq for OpTransactionSigned { + fn eq(&self, other: &Self) -> bool { + self.signature == other.signature && + self.transaction == other.transaction && + self.tx_hash() == other.tx_hash() + } +} + +impl Hash for OpTransactionSigned { + fn hash(&self, state: &mut H) { + self.signature.hash(state); + self.transaction.hash(state); + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> arbitrary::Arbitrary<'a> for OpTransactionSigned { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + #[allow(unused_mut)] + let mut transaction = OpTypedTransaction::arbitrary(u)?; + + let secp = secp256k1::Secp256k1::new(); + let key_pair = secp256k1::Keypair::new(&secp, &mut rand::thread_rng()); + let signature = reth_primitives::transaction::util::secp256k1::sign_message( + B256::from_slice(&key_pair.secret_bytes()[..]), + signature_hash(&transaction), + ) + .unwrap(); + + // Both `Some(0)` and `None` values are encoded as empty string byte. This introduces + // ambiguity in roundtrip tests. Patch the mint value of deposit transaction here, so that + // it's `None` if zero. + if let OpTypedTransaction::Deposit(ref mut tx_deposit) = transaction { + if tx_deposit.mint == Some(0) { + tx_deposit.mint = None; + } + } + + let signature = if is_deposit(&transaction) { TxDeposit::signature() } else { signature }; + + Ok(Self::new(transaction, signature)) + } +} + +/// Calculates the signing hash for the transaction. +pub fn signature_hash(tx: &OpTypedTransaction) -> B256 { + match tx { + OpTypedTransaction::Legacy(tx) => tx.signature_hash(), + OpTypedTransaction::Eip2930(tx) => tx.signature_hash(), + OpTypedTransaction::Eip1559(tx) => tx.signature_hash(), + OpTypedTransaction::Eip7702(tx) => tx.signature_hash(), + OpTypedTransaction::Deposit(_) => B256::ZERO, + } +} + +/// Returns `true` if transaction is deposit transaction. +pub const fn is_deposit(tx: &OpTypedTransaction) -> bool { + matches!(tx, OpTypedTransaction::Deposit(_)) +} diff --git a/crates/optimism/primitives/src/transaction/tx_type.rs b/crates/optimism/primitives/src/transaction/tx_type.rs index 9976221b4240..8be5f3a3d5e5 100644 --- a/crates/optimism/primitives/src/transaction/tx_type.rs +++ b/crates/optimism/primitives/src/transaction/tx_type.rs @@ -1,286 +1,21 @@ -//! newtype pattern on `op_alloy_consensus::OpTxType`. -//! `OpTxType` implements `reth_primitives_traits::TxType`. -//! This type is required because a `Compact` impl is needed on the deposit tx type. +//! Optimism transaction type. -use core::fmt::Debug; - -use alloy_primitives::{U64, U8}; -use alloy_rlp::{Decodable, Encodable, Error}; -use bytes::BufMut; -use derive_more::{ - derive::{From, Into}, - Display, -}; -use op_alloy_consensus::OpTxType as AlloyOpTxType; -use reth_primitives_traits::{InMemorySize, TxType}; - -/// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement -/// [`TxType`] trait. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[into(u8)] -pub struct OpTxType(AlloyOpTxType); - -impl TxType for OpTxType { - #[inline] - fn is_legacy(&self) -> bool { - matches!(self.0, AlloyOpTxType::Legacy) - } - - #[inline] - fn is_eip2930(&self) -> bool { - matches!(self.0, AlloyOpTxType::Eip2930) - } - - #[inline] - fn is_eip1559(&self) -> bool { - matches!(self.0, AlloyOpTxType::Eip1559) - } - - #[inline] - fn is_eip4844(&self) -> bool { - false - } - - #[inline] - fn is_eip7702(&self) -> bool { - matches!(self.0, AlloyOpTxType::Eip7702) - } -} - -impl InMemorySize for OpTxType { - /// Calculates a heuristic for the in-memory size of the [`OpTxType`]. - #[inline] - fn size(&self) -> usize { - core::mem::size_of::() - } -} - -impl From for U8 { - fn from(tx_type: OpTxType) -> Self { - Self::from(u8::from(tx_type)) - } -} - -impl TryFrom for OpTxType { - type Error = Error; - - fn try_from(value: u8) -> Result { - AlloyOpTxType::try_from(value) - .map(OpTxType) - .map_err(|_| Error::Custom("Invalid transaction type")) - } -} - -impl Default for OpTxType { - fn default() -> Self { - Self(AlloyOpTxType::Legacy) - } -} - -impl PartialEq for OpTxType { - fn eq(&self, other: &u8) -> bool { - let self_as_u8: u8 = (*self).into(); - &self_as_u8 == other - } -} - -impl TryFrom for OpTxType { - type Error = Error; - - fn try_from(value: u64) -> Result { - if value > u8::MAX as u64 { - return Err(Error::Custom("value out of range")); - } - Self::try_from(value as u8) - } -} - -impl TryFrom for OpTxType { - type Error = Error; - - fn try_from(value: U64) -> Result { - let u64_value: u64 = value.try_into().map_err(|_| Error::Custom("value out of range"))?; - Self::try_from(u64_value) - } -} - -impl Encodable for OpTxType { - fn length(&self) -> usize { - let value: u8 = (*self).into(); - value.length() - } - - fn encode(&self, out: &mut dyn BufMut) { - let value: u8 = (*self).into(); - value.encode(out); - } -} - -impl Decodable for OpTxType { - fn decode(buf: &mut &[u8]) -> Result { - // Decode the u8 value from RLP - let value = if buf.is_empty() { - return Err(alloy_rlp::Error::InputTooShort); - } else if buf[0] == 0x80 { - 0 // Special case: RLP encoding for integer 0 is `b"\x80"` - } else { - u8::decode(buf)? - }; - - Self::try_from(value).map_err(|_| alloy_rlp::Error::Custom("Invalid transaction type")) - } -} - -#[cfg(any(test, feature = "reth-codec"))] -impl reth_codecs::Compact for OpTxType { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - use reth_codecs::txtype::*; - match self.0 { - AlloyOpTxType::Legacy => COMPACT_IDENTIFIER_LEGACY, - AlloyOpTxType::Eip2930 => COMPACT_IDENTIFIER_EIP2930, - AlloyOpTxType::Eip1559 => COMPACT_IDENTIFIER_EIP1559, - AlloyOpTxType::Eip7702 => { - buf.put_u8(alloy_consensus::constants::EIP7702_TX_TYPE_ID); - COMPACT_EXTENDED_IDENTIFIER_FLAG - } - AlloyOpTxType::Deposit => { - buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); - COMPACT_EXTENDED_IDENTIFIER_FLAG - } - } - } - - fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { - use bytes::Buf; - ( - match identifier { - reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => Self(AlloyOpTxType::Legacy), - reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => Self(AlloyOpTxType::Eip2930), - reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => Self(AlloyOpTxType::Eip1559), - reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { - let extended_identifier = buf.get_u8(); - match extended_identifier { - alloy_consensus::constants::EIP7702_TX_TYPE_ID => { - Self(AlloyOpTxType::Eip7702) - } - op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self(AlloyOpTxType::Deposit), - _ => panic!("Unsupported OpTxType identifier: {extended_identifier}"), - } - } - _ => panic!("Unknown identifier for OpTxType: {identifier}"), - }, - buf, - ) - } -} +pub use op_alloy_consensus::OpTxType; #[cfg(test)] mod tests { use super::*; use alloy_consensus::constants::EIP7702_TX_TYPE_ID; - use bytes::BytesMut; use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; use reth_codecs::{txtype::*, Compact}; use rstest::rstest; - #[test] - fn test_from_alloy_op_tx_type() { - let alloy_tx = AlloyOpTxType::Legacy; - let op_tx: OpTxType = OpTxType::from(alloy_tx); - assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_from_op_tx_type_to_u8() { - let op_tx = OpTxType(AlloyOpTxType::Legacy); - let tx_type_u8: u8 = op_tx.into(); - assert_eq!(tx_type_u8, AlloyOpTxType::Legacy as u8); - } - - #[test] - fn test_from_op_tx_type_to_u8_u8() { - let op_tx = OpTxType(AlloyOpTxType::Legacy); - let tx_type_u8: U8 = op_tx.into(); - assert_eq!(tx_type_u8, U8::from(AlloyOpTxType::Legacy as u8)); - } - - #[test] - fn test_try_from_u8() { - let op_tx = OpTxType::try_from(AlloyOpTxType::Legacy as u8).unwrap(); - assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_try_from_invalid_u8() { - let invalid_value: u8 = 255; - let result = OpTxType::try_from(invalid_value); - assert_eq!(result, Err(Error::Custom("Invalid transaction type"))); - } - - #[test] - fn test_try_from_u64() { - let op_tx = OpTxType::try_from(AlloyOpTxType::Legacy as u64).unwrap(); - assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_try_from_u64_out_of_range() { - let result = OpTxType::try_from(u64::MAX); - assert_eq!(result, Err(Error::Custom("value out of range"))); - } - - #[test] - fn test_try_from_u64_within_range() { - let valid_value: U64 = U64::from(AlloyOpTxType::Legacy as u64); - let op_tx = OpTxType::try_from(valid_value).unwrap(); - assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_default() { - let default_tx = OpTxType::default(); - assert_eq!(default_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_partial_eq_u8() { - let op_tx = OpTxType(AlloyOpTxType::Legacy); - assert_eq!(op_tx, AlloyOpTxType::Legacy as u8); - } - - #[test] - fn test_encodable() { - let op_tx = OpTxType(AlloyOpTxType::Legacy); - let mut buf = BytesMut::new(); - op_tx.encode(&mut buf); - assert_eq!(buf, BytesMut::from(&[0x80][..])); - } - - #[test] - fn test_decodable_success() { - // Using the RLP-encoded form of 0, which is `b"\x80"` - let mut buf: &[u8] = &[0x80]; - let decoded_tx = OpTxType::decode(&mut buf).unwrap(); - assert_eq!(decoded_tx, OpTxType(AlloyOpTxType::Legacy)); - } - - #[test] - fn test_decodable_invalid() { - let mut buf: &[u8] = &[255]; - let result = OpTxType::decode(&mut buf); - assert!(result.is_err()); - } - #[rstest] - #[case(OpTxType(AlloyOpTxType::Legacy), COMPACT_IDENTIFIER_LEGACY, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip2930), COMPACT_IDENTIFIER_EIP2930, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip1559), COMPACT_IDENTIFIER_EIP1559, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip7702), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] - #[case(OpTxType(AlloyOpTxType::Deposit), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] + #[case(OpTxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(OpTxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(OpTxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(OpTxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[case(OpTxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] fn test_txtype_to_compact( #[case] tx_type: OpTxType, #[case] expected_identifier: usize, @@ -297,11 +32,11 @@ mod tests { } #[rstest] - #[case(OpTxType(AlloyOpTxType::Legacy), COMPACT_IDENTIFIER_LEGACY, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip2930), COMPACT_IDENTIFIER_EIP2930, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip1559), COMPACT_IDENTIFIER_EIP1559, vec![])] - #[case(OpTxType(AlloyOpTxType::Eip7702), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] - #[case(OpTxType(AlloyOpTxType::Deposit), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] + #[case(OpTxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(OpTxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(OpTxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(OpTxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[case(OpTxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] fn test_txtype_from_compact( #[case] expected_type: OpTxType, #[case] identifier: usize, diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 50194f39aa3c..9894dd8a3dba 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -61,7 +61,6 @@ serde_json.workspace = true # misc thiserror.workspace = true tracing.workspace = true -derive_more = { workspace = true, features = ["constructor", "deref"] } [dev-dependencies] reth-optimism-chainspec.workspace = true @@ -73,5 +72,6 @@ optimism = [ "reth-provider/optimism", "revm/optimism", "reth-optimism-consensus/optimism", - "reth-optimism-payload-builder/optimism" + "reth-optimism-payload-builder/optimism", + "reth-optimism-primitives/op", ] diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 64a55496993d..92b4353ec9e4 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -57,7 +57,7 @@ where }; Ok(OpReceiptBuilder::new( - &self.inner.provider().chain_spec(), + &self.inner.eth_api.provider().chain_spec(), tx, meta, receipt, diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index 9495a359e329..c5e96bb87d17 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -33,12 +33,12 @@ where { #[inline] fn call_gas_limit(&self) -> u64 { - self.inner.gas_cap() + self.inner.eth_api.gas_cap() } #[inline] fn max_simulate_blocks(&self) -> u64 { - self.inner.max_simulate_blocks() + self.inner.eth_api.max_simulate_blocks() } fn create_txn_env( diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 6b909f012c55..27672804839b 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -14,7 +14,6 @@ use std::{fmt, sync::Arc}; use alloy_consensus::Header; use alloy_primitives::U256; -use derive_more::Deref; use op_alloy_network::Optimism; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; @@ -59,14 +58,10 @@ pub type EthApiNodeBackend = EthApiInner< /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. -#[derive(Deref, Clone)] +#[derive(Clone)] pub struct OpEthApi { /// Gateway to node's core components. - #[deref] - inner: Arc>, - /// Sequencer client, configured to forward submitted transactions to sequencer of given OP - /// network. - sequencer_client: Option, + inner: Arc>, } impl OpEthApi @@ -79,28 +74,9 @@ where + 'static, >, { - /// Creates a new instance for given context. - pub fn new(ctx: &EthApiBuilderCtx, sequencer_http: Option) -> Self { - let blocking_task_pool = - BlockingTaskPool::build().expect("failed to build blocking task pool"); - - let inner = EthApiInner::new( - ctx.provider.clone(), - ctx.pool.clone(), - ctx.network.clone(), - ctx.cache.clone(), - ctx.new_gas_price_oracle(), - ctx.config.rpc_gas_cap, - ctx.config.rpc_max_simulate_blocks, - ctx.config.eth_proof_window, - blocking_task_pool, - ctx.new_fee_history_cache(), - ctx.evm_config.clone(), - ctx.executor.clone(), - ctx.config.proof_permits, - ); - - Self { inner: Arc::new(inner), sequencer_client: sequencer_http.map(SequencerClient::new) } + /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. + pub const fn builder() -> OpEthApiBuilder { + OpEthApiBuilder::new() } } @@ -130,17 +106,17 @@ where #[inline] fn pool(&self) -> &Self::Pool { - self.inner.pool() + self.inner.eth_api.pool() } #[inline] fn evm_config(&self) -> &Self::Evm { - self.inner.evm_config() + self.inner.eth_api.evm_config() } #[inline] fn network(&self) -> &Self::Network { - self.inner.network() + self.inner.eth_api.network() } #[inline] @@ -150,7 +126,7 @@ where #[inline] fn provider(&self) -> &Self::Provider { - self.inner.provider() + self.inner.eth_api.provider() } } @@ -160,7 +136,7 @@ where { #[inline] fn cache(&self) -> &EthStateCache { - self.inner.cache() + self.inner.eth_api.cache() } } @@ -175,12 +151,12 @@ where { #[inline] fn starting_block(&self) -> U256 { - self.inner.starting_block() + self.inner.eth_api.starting_block() } #[inline] fn signers(&self) -> &parking_lot::RwLock>> { - self.inner.signers() + self.inner.eth_api.signers() } } @@ -191,17 +167,17 @@ where { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { - self.inner.task_spawner() + self.inner.eth_api.task_spawner() } #[inline] fn tracing_task_pool(&self) -> &BlockingTaskPool { - self.inner.blocking_task_pool() + self.inner.eth_api.blocking_task_pool() } #[inline] fn tracing_task_guard(&self) -> &BlockingTaskGuard { - self.inner.blocking_task_guard() + self.inner.eth_api.blocking_task_guard() } } @@ -217,12 +193,12 @@ where { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { - self.inner.gas_oracle() + self.inner.eth_api.gas_oracle() } #[inline] fn fee_history_cache(&self) -> &FeeHistoryCache { - self.inner.fee_history_cache() + self.inner.eth_api.fee_history_cache() } } @@ -241,7 +217,7 @@ where { #[inline] fn max_proof_window(&self) -> u64 { - self.inner.eth_proof_window() + self.inner.eth_api.eth_proof_window() } } @@ -264,7 +240,7 @@ where N: RpcNodeCore, { fn with_dev_accounts(&self) { - *self.inner.signers().write() = DevSigner::random_signers(20) + *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) } } @@ -273,3 +249,71 @@ impl fmt::Debug for OpEthApi { f.debug_struct("OpEthApi").finish_non_exhaustive() } } + +/// Container type `OpEthApi` +#[allow(missing_debug_implementations)] +struct OpEthApiInner { + /// Gateway to node's core components. + eth_api: EthApiNodeBackend, + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: Option, +} + +/// A type that knows how to build a [`OpEthApi`]. +#[derive(Debug, Default)] +pub struct OpEthApiBuilder { + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: Option, +} + +impl OpEthApiBuilder { + /// Creates a [`OpEthApiBuilder`] instance from [`EthApiBuilderCtx`]. + pub const fn new() -> Self { + Self { sequencer_client: None } + } + + /// With a [`SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_client: Option) -> Self { + self.sequencer_client = sequencer_client; + self + } +} + +impl OpEthApiBuilder { + /// Builds an instance of [`OpEthApi`] + pub fn build(self, ctx: &EthApiBuilderCtx) -> OpEthApi + where + N: RpcNodeCore< + Provider: BlockReaderIdExt + + ChainSpecProvider + + CanonStateSubscriptions + + Clone + + 'static, + >, + { + let blocking_task_pool = + BlockingTaskPool::build().expect("failed to build blocking task pool"); + + let eth_api = EthApiInner::new( + ctx.provider.clone(), + ctx.pool.clone(), + ctx.network.clone(), + ctx.cache.clone(), + ctx.new_gas_price_oracle(), + ctx.config.rpc_gas_cap, + ctx.config.rpc_max_simulate_blocks, + ctx.config.eth_proof_window, + blocking_task_pool, + ctx.new_fee_history_cache(), + ctx.evm_config.clone(), + ctx.executor.clone(), + ctx.config.proof_permits, + ); + + OpEthApi { + inner: Arc::new(OpEthApiInner { eth_api, sequencer_client: self.sequencer_client }), + } + } +} diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 98ea65778d8d..eebb61c8cb02 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -7,9 +7,9 @@ use alloy_primitives::{BlockNumber, B256}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_primitives::{Receipt, SealedBlockWithSenders}; +use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSigned}; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, ProviderTx, ReceiptProvider, StateProviderFactory, }; use reth_rpc_eth_api::{ @@ -17,7 +17,7 @@ use reth_rpc_eth_api::{ FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use revm::primitives::BlockEnv; impl LoadPendingBlock for OpEthApi @@ -25,18 +25,20 @@ where Self: SpawnBlocking, N: RpcNodeCore< Provider: BlockReaderIdExt< + Transaction = reth_primitives::TransactionSigned, Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, - Pool: TransactionPool, - Evm: ConfigureEvm
, + Pool: TransactionPool>>, + Evm: ConfigureEvm
, >, { #[inline] fn pending_block(&self) -> &tokio::sync::Mutex> { - self.inner.pending_block() + self.inner.eth_api.pending_block() } /// Returns the locally built pending block diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index e803ea210197..2a4df1ada49d 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -31,6 +31,8 @@ where receipt: Receipt, ) -> Result, Self::Error> { let (block, receipts) = self + .inner + .eth_api .cache() .get_block_and_receipts(meta.block_hash) .await @@ -43,7 +45,7 @@ where reth_optimism_evm::extract_l1_info(&block.body).map_err(OpEthApiError::from)?; Ok(OpReceiptBuilder::new( - &self.inner.provider().chain_spec(), + &self.inner.eth_api.provider().chain_spec(), &tx, meta, &receipt, diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 3202dc46ad1b..3ba5edead55a 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -6,7 +6,7 @@ use alloy_rpc_types_eth::TransactionInfo; use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::Transaction; use reth_node_api::FullNodeComponents; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, TransactionSigned}; use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, @@ -23,7 +23,7 @@ where N: RpcNodeCore, { fn signers(&self) -> &parking_lot::RwLock>> { - self.inner.signers() + self.inner.eth_api.signers() } /// Decodes and recovers the transaction and submits it to the pool. @@ -68,7 +68,7 @@ where { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { - self.sequencer_client.clone() + self.inner.sequencer_client.clone() } } @@ -81,7 +81,7 @@ where fn fill( &self, - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, tx_info: TransactionInfo, ) -> Result { let from = tx.signer(); @@ -106,6 +106,7 @@ where } reth_primitives::Transaction::Deposit(tx) => { self.inner + .eth_api .provider() .receipt_by_hash(hash) .map_err(Self::Error::from_eth_err)? diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs index ed9d77e73e84..8cc4bd98ef25 100644 --- a/crates/optimism/rpc/src/witness.rs +++ b/crates/optimism/rpc/src/witness.rs @@ -9,7 +9,7 @@ use reth_chainspec::ChainSpecProvider; use reth_evm::ConfigureEvm; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_payload_builder::OpPayloadBuilder; -use reth_primitives::SealedHeader; +use reth_primitives::{SealedHeader, TransactionSigned}; use reth_provider::{BlockReaderIdExt, ProviderError, ProviderResult, StateProviderFactory}; pub use reth_rpc_api::DebugExecutionWitnessApiServer; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; @@ -31,7 +31,7 @@ impl OpDebugWitnessApi { impl OpDebugWitnessApi where - Provider: BlockReaderIdExt, + Provider: BlockReaderIdExt
, { /// Fetches the parent header by hash. fn parent_header(&self, parent_block_hash: B256) -> ProviderResult { @@ -45,11 +45,11 @@ where impl DebugExecutionWitnessApiServer for OpDebugWitnessApi where - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt
+ StateProviderFactory + ChainSpecProvider + 'static, - EvmConfig: ConfigureEvm
+ 'static, + EvmConfig: ConfigureEvm
+ 'static, { fn execute_payload( &self, diff --git a/crates/optimism/storage/Cargo.toml b/crates/optimism/storage/Cargo.toml index 2b18897d94a0..b72e9c287df3 100644 --- a/crates/optimism/storage/Cargo.toml +++ b/crates/optimism/storage/Cargo.toml @@ -22,6 +22,6 @@ reth-stages-types.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-codecs/optimism", + "reth-codecs/op", "reth-db-api/optimism" ] diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index 391f26093ba6..0db8f4e20a9d 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -13,7 +13,7 @@ mod tests { use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; use reth_db_api::models::{ - CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockOmmers, + CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockWithdrawals, }; use reth_primitives::{Account, Receipt}; @@ -43,7 +43,6 @@ mod tests { assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); @@ -67,7 +66,6 @@ mod tests { validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); - validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); } diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 0ab411d3e600..8e9c06865d03 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -132,7 +132,11 @@ impl BasicPayloadJobGenerator PayloadJobGenerator for BasicPayloadJobGenerator where - Client: StateProviderFactory + BlockReaderIdExt + Clone + Unpin + 'static, + Client: StateProviderFactory + + BlockReaderIdExt
+ + Clone + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + Unpin + 'static, Builder: PayloadBuilder + Unpin + 'static, diff --git a/crates/payload/util/src/traits.rs b/crates/payload/util/src/traits.rs index 52dad5111698..e9bb7e03704c 100644 --- a/crates/payload/util/src/traits.rs +++ b/crates/payload/util/src/traits.rs @@ -1,5 +1,5 @@ use alloy_primitives::Address; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::RecoveredTx; /// Iterator that returns transactions for the block building process in the order they should be /// included in the block. @@ -7,12 +7,15 @@ use reth_primitives::TransactionSignedEcRecovered; /// Can include transactions from the pool and other sources (alternative pools, /// sequencer-originated transactions, etc.). pub trait PayloadTransactions { + /// The transaction type this iterator yields. + type Transaction; + /// Returns the next transaction to include in the block. fn next( &mut self, // In the future, `ctx` can include access to state for block building purposes. ctx: (), - ) -> Option; + ) -> Option>; /// Exclude descendants of the transaction with given sender and nonce from the iterator, /// because this transaction won't be included in the block. diff --git a/crates/payload/util/src/transaction.rs b/crates/payload/util/src/transaction.rs index a45e177d4d34..71387946aef1 100644 --- a/crates/payload/util/src/transaction.rs +++ b/crates/payload/util/src/transaction.rs @@ -1,7 +1,7 @@ use crate::PayloadTransactions; use alloy_consensus::Transaction; use alloy_primitives::Address; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::RecoveredTx; /// An implementation of [`crate::traits::PayloadTransactions`] that yields /// a pre-defined set of transactions. @@ -26,8 +26,10 @@ impl PayloadTransactionsFixed { } } -impl PayloadTransactions for PayloadTransactionsFixed { - fn next(&mut self, _ctx: ()) -> Option { +impl PayloadTransactions for PayloadTransactionsFixed> { + type Transaction = T; + + fn next(&mut self, _ctx: ()) -> Option> { (self.index < self.transactions.len()).then(|| { let tx = self.transactions[self.index].clone(); self.index += 1; @@ -87,20 +89,22 @@ impl PayloadTransactionsChain PayloadTransactions for PayloadTransactionsChain +impl PayloadTransactions for PayloadTransactionsChain where - B: PayloadTransactions, - A: PayloadTransactions, + A: PayloadTransactions, + B: PayloadTransactions, { - fn next(&mut self, ctx: ()) -> Option { + type Transaction = A::Transaction; + + fn next(&mut self, ctx: ()) -> Option> { while let Some(tx) = self.before.next(ctx) { if let Some(before_max_gas) = self.before_max_gas { - if self.before_gas + tx.transaction.gas_limit() <= before_max_gas { - self.before_gas += tx.transaction.gas_limit(); + if self.before_gas + tx.as_signed().gas_limit() <= before_max_gas { + self.before_gas += tx.as_signed().gas_limit(); return Some(tx); } - self.before.mark_invalid(tx.signer(), tx.transaction.nonce()); - self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + self.before.mark_invalid(tx.signer(), tx.as_signed().nonce()); + self.after.mark_invalid(tx.signer(), tx.as_signed().nonce()); } else { return Some(tx); } @@ -108,11 +112,11 @@ where while let Some(tx) = self.after.next(ctx) { if let Some(after_max_gas) = self.after_max_gas { - if self.after_gas + tx.transaction.gas_limit() <= after_max_gas { - self.after_gas += tx.transaction.gas_limit(); + if self.after_gas + tx.as_signed().gas_limit() <= after_max_gas { + self.after_gas += tx.as_signed().gas_limit(); return Some(tx); } - self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + self.after.mark_invalid(tx.signer(), tx.as_signed().nonce()); } else { return Some(tx); } diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index df4491b2d126..459fdbde1a70 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -23,11 +23,13 @@ alloy-primitives.workspace = true alloy-rlp.workspace = true revm-primitives.workspace = true +# op +op-alloy-consensus = { workspace = true, optional = true } + # misc byteorder = { workspace = true, optional = true } bytes.workspace = true derive_more.workspace = true -roaring = "0.10.2" serde_with = { workspace = true, optional = true } auto_impl.workspace = true @@ -61,7 +63,11 @@ std = [ "alloy-genesis/std", "alloy-primitives/std", "revm-primitives/std", - "serde?/std" + "serde?/std", + "serde_with?/std", + "alloy-rlp/std", + "bytes/std", + "derive_more/std" ] test-utils = [ "arbitrary", @@ -76,28 +82,33 @@ arbitrary = [ "dep:proptest-arbitrary-interop", "alloy-eips/arbitrary", "revm-primitives/arbitrary", - "reth-codecs?/arbitrary" + "reth-codecs?/arbitrary", + "op-alloy-consensus?/arbitrary" ] serde-bincode-compat = [ "serde", "serde_with", "alloy-consensus/serde-bincode-compat", - "alloy-eips/serde-bincode-compat" + "alloy-eips/serde-bincode-compat", + "op-alloy-consensus?/serde-bincode-compat" ] serde = [ "dep:serde", "alloy-consensus/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "bytes/serde", - "rand/serde", - "reth-codecs?/serde", - "revm-primitives/serde", - "roaring/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "reth-codecs?/serde", + "revm-primitives/serde", "revm-primitives/serde", + "op-alloy-consensus?/serde" ] reth-codec = [ "dep:reth-codecs", "dep:modular-bitfield", "dep:byteorder", ] +op = [ + "dep:op-alloy-consensus", +] diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index c8504f3b63cb..398294b09d88 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -49,7 +49,7 @@ impl Account { pub fn is_empty(&self) -> bool { self.nonce == 0 && self.balance.is_zero() && - self.bytecode_hash.map_or(true, |hash| hash == KECCAK_EMPTY) + self.bytecode_hash.is_none_or(|hash| hash == KECCAK_EMPTY) } /// Returns an account bytecode's hash. diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 76bf916add9b..b0fe1e3d0822 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,10 +1,13 @@ //! Block body abstraction. +use crate::{ + FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, MaybeSerdeBincodeCompat, + SignedTransaction, +}; use alloc::{fmt, vec::Vec}; - -use alloy_eips::eip4895::Withdrawals; - -use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, SignedTransaction}; +use alloy_consensus::Transaction; +use alloy_eips::{eip2718::Encodable2718, eip4844::DATA_GAS_PER_BLOB, eip4895::Withdrawals}; +use alloy_primitives::Bytes; /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullBlockBody: BlockBody {} @@ -26,6 +29,8 @@ pub trait BlockBody: + InMemorySize + MaybeSerde + MaybeArbitrary + + MaybeSerdeBincodeCompat + + 'static { /// Ordered list of signed transactions as committed in block. type Transaction: SignedTransaction; @@ -44,4 +49,34 @@ pub trait BlockBody: /// Returns block ommers if any. fn ommers(&self) -> Option<&[Self::OmmerHeader]>; + + /// Calculates the total blob gas used by _all_ EIP-4844 transactions in the block. + fn blob_gas_used(&self) -> u64 { + // TODO(mattss): simplify after + self.transactions() + .iter() + .filter_map(|tx| tx.blob_versioned_hashes()) + .map(|hashes| hashes.len() as u64 * DATA_GAS_PER_BLOB) + .sum() + } + + /// Returns an iterator over the encoded 2718 transactions. + /// + /// This is also known as `raw transactions`. + /// + /// See also [`Encodable2718`]. + #[doc(alias = "raw_transactions_iter")] + fn encoded_2718_transactions_iter(&self) -> impl Iterator> + '_ { + self.transactions().iter().map(|tx| tx.encoded_2718()) + } + + /// Returns a vector of encoded 2718 transactions. + /// + /// This is also known as `raw transactions`. + /// + /// See also [`Encodable2718`]. + #[doc(alias = "raw_transactions")] + fn encoded_2718_transactions(&self) -> Vec { + self.encoded_2718_transactions_iter().map(Into::into).collect() + } } diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 26806808532b..47d50a45bb54 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -4,7 +4,7 @@ use core::fmt; use alloy_primitives::Sealable; -use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, MaybeSerdeBincodeCompat}; /// Helper trait that unifies all behaviour required by block header to support full node /// operations. @@ -29,24 +29,16 @@ pub trait BlockHeader: + InMemorySize + MaybeSerde + MaybeArbitrary + + MaybeSerdeBincodeCompat + + AsRef + + 'static { + /// Returns whether this header corresponds to an empty block. + fn is_empty(&self) -> bool; } -impl BlockHeader for T where - T: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + alloy_rlp::Encodable - + alloy_rlp::Decodable - + alloy_consensus::BlockHeader - + Sealable - + InMemorySize - + MaybeSerde - + MaybeArbitrary -{ +impl BlockHeader for alloy_consensus::Header { + fn is_empty(&self) -> bool { + self.is_empty() + } } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 5b22ff590be5..fcbf02a76c6c 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -41,10 +41,10 @@ pub trait Block: + MaybeArbitrary { /// Header part of the block. - type Header: BlockHeader + 'static; + type Header: BlockHeader; /// The block's body contains the transactions in the block. - type Body: BlockBody + Send + Sync + Unpin + 'static; + type Body: BlockBody; /// Create new block instance. fn new(header: Self::Header, body: Self::Body) -> Self; diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index ea5f7eafb518..abcdf4ee0cca 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -1,5 +1,5 @@ mod sealed; -pub use sealed::{BlockWithParent, Header, SealedHeader}; +pub use sealed::{Header, SealedHeader}; mod error; pub use error::HeaderError; diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 08add0ac3c15..1a5163e6ba3f 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,26 +1,13 @@ +use crate::InMemorySize; pub use alloy_consensus::Header; - -use core::mem; - use alloy_consensus::Sealed; use alloy_eips::BlockNumHash; -use alloy_primitives::{keccak256, BlockHash, Sealable, B256}; +use alloy_primitives::{keccak256, BlockHash, Sealable}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; +use core::mem; use derive_more::{AsRef, Deref}; -use crate::InMemorySize; - -/// A helper struct to store the block number/hash and its parent hash. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct BlockWithParent { - /// Parent hash. - pub parent: B256, - /// Block number/hash. - pub block: BlockNumHash, -} - /// A [`Header`] that is sealed at a precalculated hash, use [`SealedHeader::unseal()`] if you want /// to modify header. #[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref)] @@ -173,11 +160,12 @@ where /// Bincode-compatible [`SealedHeader`] serde implementation. #[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { - use alloy_consensus::serde_bincode_compat::Header; use alloy_primitives::BlockHash; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; + use crate::serde_bincode_compat::SerdeBincodeCompat; + /// Bincode-compatible [`super::SealedHeader`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: @@ -193,20 +181,21 @@ pub(super) mod serde_bincode_compat { /// header: SealedHeader, /// } /// ``` - #[derive(Debug, Serialize, Deserialize)] - pub struct SealedHeader<'a> { + #[derive(derive_more::Debug, Serialize, Deserialize)] + #[debug(bound(H::BincodeRepr<'a>: core::fmt::Debug))] + pub struct SealedHeader<'a, H: SerdeBincodeCompat = super::Header> { hash: BlockHash, - header: Header<'a>, + header: H::BincodeRepr<'a>, } - impl<'a> From<&'a super::SealedHeader> for SealedHeader<'a> { - fn from(value: &'a super::SealedHeader) -> Self { - Self { hash: value.hash, header: Header::from(&value.header) } + impl<'a, H: SerdeBincodeCompat> From<&'a super::SealedHeader> for SealedHeader<'a, H> { + fn from(value: &'a super::SealedHeader) -> Self { + Self { hash: value.hash, header: (&value.header).into() } } } - impl<'a> From> for super::SealedHeader { - fn from(value: SealedHeader<'a>) -> Self { + impl<'a, H: SerdeBincodeCompat> From> for super::SealedHeader { + fn from(value: SealedHeader<'a, H>) -> Self { Self { hash: value.hash, header: value.header.into() } } } @@ -229,6 +218,9 @@ pub(super) mod serde_bincode_compat { } } + impl SerdeBincodeCompat for super::SealedHeader { + type BincodeRepr<'a> = SealedHeader<'a, H>; + } #[cfg(test)] mod tests { use super::super::{serde_bincode_compat, SealedHeader}; diff --git a/crates/primitives-traits/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs deleted file mode 100644 index 6fc6d75899ce..000000000000 --- a/crates/primitives-traits/src/integer_list.rs +++ /dev/null @@ -1,196 +0,0 @@ -use alloc::vec::Vec; -use core::fmt; - -use bytes::BufMut; -use derive_more::Deref; -use roaring::RoaringTreemap; - -/// A data structure that uses Roaring Bitmaps to efficiently store a list of integers. -/// -/// This structure provides excellent compression while allowing direct access to individual -/// elements without the need for full decompression. -/// -/// Key features: -/// - Efficient compression: the underlying Roaring Bitmaps significantly reduce memory usage. -/// - Direct access: elements can be accessed or queried without needing to decode the entire list. -/// - [`RoaringTreemap`] backing: internally backed by [`RoaringTreemap`], which supports 64-bit -/// integers. -#[derive(Clone, PartialEq, Default, Deref)] -pub struct IntegerList(pub RoaringTreemap); - -impl fmt::Debug for IntegerList { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("IntegerList")?; - f.debug_list().entries(self.0.iter()).finish() - } -} - -impl IntegerList { - /// Creates a new empty [`IntegerList`]. - pub fn empty() -> Self { - Self(RoaringTreemap::new()) - } - - /// Creates an [`IntegerList`] from a list of integers. - /// - /// Returns an error if the list is not pre-sorted. - pub fn new(list: impl IntoIterator) -> Result { - RoaringTreemap::from_sorted_iter(list) - .map(Self) - .map_err(|_| IntegerListError::UnsortedInput) - } - - /// Creates an [`IntegerList`] from a pre-sorted list of integers. - /// - /// # Panics - /// - /// Panics if the list is not pre-sorted. - #[inline] - #[track_caller] - pub fn new_pre_sorted(list: impl IntoIterator) -> Self { - Self::new(list).expect("IntegerList must be pre-sorted and non-empty") - } - - /// Appends a list of integers to the current list. - pub fn append(&mut self, list: impl IntoIterator) -> Result { - self.0.append(list).map_err(|_| IntegerListError::UnsortedInput) - } - - /// Pushes a new integer to the list. - pub fn push(&mut self, value: u64) -> Result<(), IntegerListError> { - self.0.push(value).then_some(()).ok_or(IntegerListError::UnsortedInput) - } - - /// Clears the list. - pub fn clear(&mut self) { - self.0.clear(); - } - - /// Serializes a [`IntegerList`] into a sequence of bytes. - pub fn to_bytes(&self) -> Vec { - let mut vec = Vec::with_capacity(self.0.serialized_size()); - self.0.serialize_into(&mut vec).expect("not able to encode IntegerList"); - vec - } - - /// Serializes a [`IntegerList`] into a sequence of bytes. - pub fn to_mut_bytes(&self, buf: &mut B) { - self.0.serialize_into(buf.writer()).unwrap(); - } - - /// Deserializes a sequence of bytes into a proper [`IntegerList`]. - pub fn from_bytes(data: &[u8]) -> Result { - RoaringTreemap::deserialize_from(data) - .map(Self) - .map_err(|_| IntegerListError::FailedToDeserialize) - } -} - -#[cfg(feature = "serde")] -impl serde::Serialize for IntegerList { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - use serde::ser::SerializeSeq; - - let mut seq = serializer.serialize_seq(Some(self.len() as usize))?; - for e in &self.0 { - seq.serialize_element(&e)?; - } - seq.end() - } -} - -#[cfg(feature = "serde")] -struct IntegerListVisitor; - -#[cfg(feature = "serde")] -impl<'de> serde::de::Visitor<'de> for IntegerListVisitor { - type Value = IntegerList; - - fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("a usize array") - } - - fn visit_seq(self, mut seq: E) -> Result - where - E: serde::de::SeqAccess<'de>, - { - let mut list = IntegerList::empty(); - while let Some(item) = seq.next_element()? { - list.push(item).map_err(serde::de::Error::custom)?; - } - Ok(list) - } -} - -#[cfg(feature = "serde")] -impl<'de> serde::Deserialize<'de> for IntegerList { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - deserializer.deserialize_byte_buf(IntegerListVisitor) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -use arbitrary::{Arbitrary, Unstructured}; - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> Arbitrary<'a> for IntegerList { - fn arbitrary(u: &mut Unstructured<'a>) -> Result { - let mut nums: Vec = Vec::arbitrary(u)?; - nums.sort_unstable(); - Self::new(nums).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -/// Primitives error type. -#[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum IntegerListError { - /// The provided input is unsorted. - #[display("the provided input is unsorted")] - UnsortedInput, - /// Failed to deserialize data into type. - #[display("failed to deserialize data into type")] - FailedToDeserialize, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn empty_list() { - assert_eq!(IntegerList::empty().len(), 0); - assert_eq!(IntegerList::new_pre_sorted(std::iter::empty()).len(), 0); - } - - #[test] - fn test_integer_list() { - let original_list = [1, 2, 3]; - let ef_list = IntegerList::new(original_list).unwrap(); - assert_eq!(ef_list.iter().collect::>(), original_list); - } - - #[test] - fn test_integer_list_serialization() { - let original_list = [1, 2, 3]; - let ef_list = IntegerList::new(original_list).unwrap(); - - let blist = ef_list.to_bytes(); - assert_eq!(IntegerList::from_bytes(&blist).unwrap(), ef_list) - } - - #[test] - fn serde_serialize_deserialize() { - let original_list = [1, 2, 3]; - let ef_list = IntegerList::new(original_list).unwrap(); - - let serde_out = serde_json::to_string(&ef_list).unwrap(); - let serde_ef_list = serde_json::from_str::(&serde_out).unwrap(); - assert_eq!(serde_ef_list, ef_list); - } -} diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 338f8f621e1a..04d02be0b7dd 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -31,9 +31,6 @@ pub use transaction::{ FullTransaction, Transaction, }; -mod integer_list; -pub use integer_list::{IntegerList, IntegerListError}; - pub mod block; pub use block::{ body::{BlockBody, FullBlockBody}, @@ -58,7 +55,7 @@ pub use storage::StorageEntry; pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; -pub use header::{BlockWithParent, Header, HeaderError, SealedHeader}; +pub use header::{Header, HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. /// @@ -68,9 +65,7 @@ pub use header::{BlockWithParent, Header, HeaderError, SealedHeader}; /// /// Read more: #[cfg(feature = "serde-bincode-compat")] -pub mod serde_bincode_compat { - pub use super::header::{serde_bincode_compat as header, serde_bincode_compat::*}; -} +pub mod serde_bincode_compat; /// Heuristic size trait pub mod size; @@ -118,3 +113,16 @@ pub trait MaybeCompact {} impl MaybeCompact for T where T: reth_codecs::Compact {} #[cfg(not(feature = "reth-codec"))] impl MaybeCompact for T {} + +/// Helper trait that requires serde bincode compatibility implementation. +#[cfg(feature = "serde-bincode-compat")] +pub trait MaybeSerdeBincodeCompat: crate::serde_bincode_compat::SerdeBincodeCompat {} +/// Noop. Helper trait that would require serde bincode compatibility implementation if +/// `serde-bincode-compat` feature were enabled. +#[cfg(not(feature = "serde-bincode-compat"))] +pub trait MaybeSerdeBincodeCompat {} + +#[cfg(feature = "serde-bincode-compat")] +impl MaybeSerdeBincodeCompat for T where T: crate::serde_bincode_compat::SerdeBincodeCompat {} +#[cfg(not(feature = "serde-bincode-compat"))] +impl MaybeSerdeBincodeCompat for T {} diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 4adf258177e1..5b3691d2fdf7 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,9 +1,8 @@ -use core::fmt; - use crate::{ Block, BlockBody, BlockHeader, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, - FullSignedTx, FullTxType, MaybeArbitrary, MaybeSerde, + FullSignedTx, FullTxType, Receipt, SignedTransaction, TxType, }; +use core::fmt; /// Configures all the primitive types of the node. pub trait NodePrimitives: @@ -16,39 +15,11 @@ pub trait NodePrimitives: /// Block body primitive. type BlockBody: BlockBody; /// Signed version of the transaction type. - type SignedTx: Send - + Sync - + Unpin - + Clone - + fmt::Debug - + PartialEq - + Eq - + MaybeSerde - + MaybeArbitrary - + 'static; + type SignedTx: SignedTransaction + 'static; /// Transaction envelope type ID. - type TxType: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + MaybeArbitrary - + 'static; + type TxType: TxType + 'static; /// A receipt. - type Receipt: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + MaybeSerde - + MaybeArbitrary - + 'static; + type Receipt: Receipt; } /// Helper trait that sets trait bounds on [`NodePrimitives`]. pub trait FullNodePrimitives diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index e2af40c447ed..435748c4e1bd 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -22,7 +22,7 @@ pub trait Receipt: + Clone + Default + fmt::Debug - + TxReceipt + + TxReceipt + alloy_rlp::Encodable + alloy_rlp::Decodable + MaybeSerde diff --git a/crates/primitives-traits/src/serde_bincode_compat.rs b/crates/primitives-traits/src/serde_bincode_compat.rs new file mode 100644 index 000000000000..a1f7d42569e8 --- /dev/null +++ b/crates/primitives-traits/src/serde_bincode_compat.rs @@ -0,0 +1,14 @@ +use core::fmt::Debug; + +pub use super::header::{serde_bincode_compat as header, serde_bincode_compat::*}; +use serde::{de::DeserializeOwned, Serialize}; + +/// Trait for types that can be serialized and deserialized using bincode. +pub trait SerdeBincodeCompat: Sized + 'static { + /// Serde representation of the type for bincode serialization. + type BincodeRepr<'a>: Debug + Serialize + DeserializeOwned + From<&'a Self> + Into; +} + +impl SerdeBincodeCompat for alloy_consensus::Header { + type BincodeRepr<'a> = alloy_consensus::serde_bincode_compat::Header<'a>; +} diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs index 4d721dd00b30..f9065cda24aa 100644 --- a/crates/primitives-traits/src/size.rs +++ b/crates/primitives-traits/src/size.rs @@ -46,6 +46,13 @@ macro_rules! impl_in_mem_size { impl_in_mem_size!(Header, TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844); +#[cfg(feature = "op")] +impl InMemorySize for op_alloy_consensus::OpTxType { + fn size(&self) -> usize { + 1 + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index ae9a8f0d2acf..5e0a91b4da2b 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -1,7 +1,7 @@ //! API of a signed transaction. use crate::{FillTxEnv, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, TxType}; -use alloc::fmt; +use alloc::{fmt, vec::Vec}; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; use core::hash::Hash; @@ -61,7 +61,13 @@ pub trait SignedTransaction: /// /// Returns `None` if the transaction's signature is invalid, see also /// `reth_primitives::transaction::recover_signer_unchecked`. - fn recover_signer_unchecked(&self) -> Option
; + fn recover_signer_unchecked(&self) -> Option
{ + self.recover_signer_unchecked_with_buf(&mut Vec::new()) + } + + /// Same as [`Self::recover_signer_unchecked`] but receives a buffer to operate on. This is used + /// during batch recovery to avoid allocating a new buffer for each transaction. + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
; /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with /// tx type. diff --git a/crates/primitives-traits/src/transaction/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs index d2caebe4c9f1..c2f2e04899df 100644 --- a/crates/primitives-traits/src/transaction/tx_type.rs +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -1,10 +1,8 @@ //! Abstraction of transaction envelope type ID. -use core::fmt; - -use alloy_primitives::{U64, U8}; - use crate::{InMemorySize, MaybeArbitrary, MaybeCompact}; +use alloy_primitives::{U64, U8}; +use core::fmt; /// Helper trait that unifies all behaviour required by transaction type ID to support full node /// operations. @@ -60,3 +58,26 @@ pub trait TxType: !self.is_eip4844() } } + +#[cfg(feature = "op")] +impl TxType for op_alloy_consensus::OpTxType { + fn is_legacy(&self) -> bool { + matches!(self, Self::Legacy) + } + + fn is_eip2930(&self) -> bool { + matches!(self, Self::Eip2930) + } + + fn is_eip1559(&self) -> bool { + matches!(self, Self::Eip1559) + } + + fn is_eip4844(&self) -> bool { + false + } + + fn is_eip7702(&self) -> bool { + matches!(self, Self::Eip7702) + } +} diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 9787c9f3a6a9..2f8f37bcd356 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -103,7 +103,12 @@ std = [ "revm-primitives/std", "secp256k1?/std", "serde/std", - "alloy-trie/std" + "alloy-trie/std", + "serde_with?/std", + "alloy-rlp/std", + "reth-ethereum-forks/std", + "bytes/std", + "derive_more/std" ] reth-codec = [ "dep:reth-codecs", @@ -141,7 +146,7 @@ c-kzg = [ ] optimism = [ "dep:op-alloy-consensus", - "reth-codecs?/optimism", + "reth-codecs?/op", "revm-primitives/optimism", ] alloy-compat = [ diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index a72c83996c01..06451c30b9e5 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -58,6 +58,7 @@ impl TryFrom for Block { excess_blob_gas, parent_beacon_block_root, requests_hash, + target_blobs_per_block, } = block.header.inner; Ok(Self { @@ -84,6 +85,7 @@ impl TryFrom for Block { excess_blob_gas, parent_beacon_block_root, requests_hash, + target_blobs_per_block, }, body: BlockBody { transactions, diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 5618d81bd8fc..9edbb2471efb 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,6 +1,6 @@ use crate::{ traits::BlockExt, transaction::SignedTransactionIntoRecoveredExt, BlockBodyTxExt, GotExpected, - SealedHeader, TransactionSigned, TransactionSignedEcRecovered, + RecoveredTx, SealedHeader, TransactionSigned, }; use alloc::vec::Vec; use alloy_consensus::Header; @@ -10,7 +10,7 @@ use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; -use reth_primitives_traits::{BlockBody as _, InMemorySize, SignedTransaction}; +use reth_primitives_traits::{BlockBody as _, InMemorySize, SignedTransaction, Transaction}; use serde::{Deserialize, Serialize}; /// Ethereum full block. @@ -206,11 +206,7 @@ impl BlockWithSenders { #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator< - Item = TransactionSignedEcRecovered< - ::Transaction, - >, - > + ) -> impl Iterator::Transaction>> where ::Transaction: SignedTransaction, { @@ -273,11 +269,6 @@ impl SealedBlock { self.body.blob_transactions_iter() } - /// Calculates the total gas used by blob transactions in the sealed block. - pub fn blob_gas_used(&self) -> u64 { - self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() - } - /// Returns whether or not the block contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { @@ -422,13 +413,14 @@ where Ok(()) } - /// Returns a vector of transactions RLP encoded with - /// [`alloy_eips::eip2718::Encodable2718::encoded_2718`]. - pub fn raw_transactions(&self) -> Vec - where - B::Transaction: Encodable2718, - { - self.body.transactions().iter().map(|tx| tx.encoded_2718().into()).collect() + /// Returns a vector of encoded 2718 transactions. + /// + /// This is also known as `raw transactions`. + /// + /// See also [`Encodable2718`]. + #[doc(alias = "raw_transactions")] + pub fn encoded_2718_transactions(&self) -> Vec { + self.body.encoded_2718_transactions() } } @@ -457,9 +449,8 @@ where impl reth_primitives_traits::Block for SealedBlock where - H: reth_primitives_traits::BlockHeader + 'static, - B: reth_primitives_traits::BlockBody + 'static, - Self: Serialize + for<'a> Deserialize<'a>, + H: reth_primitives_traits::BlockHeader, + B: reth_primitives_traits::BlockBody, { type Header = H; type Body = B; @@ -565,11 +556,7 @@ impl SealedBlockWithSenders { #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator< - Item = TransactionSignedEcRecovered< - ::Transaction, - >, - > + ) -> impl Iterator::Transaction>> where ::Transaction: SignedTransaction, { @@ -606,9 +593,9 @@ impl<'a> arbitrary::Arbitrary<'a> for SealedBlockWithSenders { Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable, )] #[rlp(trailing)] -pub struct BlockBody { +pub struct BlockBody { /// Transactions in the block - pub transactions: Vec, + pub transactions: Vec, /// Uncle headers for the given block pub ommers: Vec
, /// Withdrawals in the block. @@ -621,6 +608,22 @@ impl BlockBody { Block { header, body: self } } + /// Returns an iterator over all blob versioned hashes from the block body. + #[inline] + pub fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { + self.blob_transactions_iter() + .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) + .flatten() + } + + /// Returns all blob versioned hashes from the block body. + #[inline] + pub fn blob_versioned_hashes(&self) -> Vec<&B256> { + self.blob_versioned_hashes_iter().collect() + } +} + +impl BlockBody { /// Calculate the ommers root for the block body. pub fn calculate_ommers_root(&self) -> B256 { crate::proofs::calculate_ommers_root(&self.ommers) @@ -631,7 +634,9 @@ impl BlockBody { pub fn calculate_withdrawals_root(&self) -> Option { self.withdrawals.as_ref().map(|w| crate::proofs::calculate_withdrawals_root(w)) } +} +impl BlockBody { /// Returns whether or not the block body contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { @@ -646,37 +651,23 @@ impl BlockBody { /// Returns an iterator over all blob transactions of the block #[inline] - pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { + pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { self.transactions.iter().filter(|tx| tx.is_eip4844()) } /// Returns only the blob transactions, if any, from the block body. #[inline] - pub fn blob_transactions(&self) -> Vec<&TransactionSigned> { + pub fn blob_transactions(&self) -> Vec<&T> { self.blob_transactions_iter().collect() } - - /// Returns an iterator over all blob versioned hashes from the block body. - #[inline] - pub fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { - self.blob_transactions_iter() - .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) - .flatten() - } - - /// Returns all blob versioned hashes from the block body. - #[inline] - pub fn blob_versioned_hashes(&self) -> Vec<&B256> { - self.blob_versioned_hashes_iter().collect() - } } -impl InMemorySize for BlockBody { +impl InMemorySize for BlockBody { /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. #[inline] fn size(&self) -> usize { - self.transactions.iter().map(TransactionSigned::size).sum::() + - self.transactions.capacity() * core::mem::size_of::() + + self.transactions.iter().map(T::size).sum::() + + self.transactions.capacity() * core::mem::size_of::() + self.ommers.iter().map(Header::size).sum::() + self.ommers.capacity() * core::mem::size_of::
() + self.withdrawals @@ -744,7 +735,7 @@ pub(super) mod serde_bincode_compat { use alloy_consensus::serde_bincode_compat::Header; use alloy_eips::eip4895::Withdrawals; use alloy_primitives::Address; - use reth_primitives_traits::serde_bincode_compat::SealedHeader; + use reth_primitives_traits::serde_bincode_compat::{SealedHeader, SerdeBincodeCompat}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -810,6 +801,10 @@ pub(super) mod serde_bincode_compat { } } + impl SerdeBincodeCompat for super::BlockBody { + type BincodeRepr<'a> = BlockBody<'a>; + } + /// Bincode-compatible [`super::SealedBlock`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: @@ -826,19 +821,34 @@ pub(super) mod serde_bincode_compat { /// } /// ``` #[derive(Debug, Serialize, Deserialize)] - pub struct SealedBlock<'a> { - header: SealedHeader<'a>, - body: BlockBody<'a>, + pub struct SealedBlock<'a, H = super::Header, B = super::BlockBody> + where + H: SerdeBincodeCompat, + B: SerdeBincodeCompat, + { + header: SealedHeader<'a, H>, + body: B::BincodeRepr<'a>, } - impl<'a> From<&'a super::SealedBlock> for SealedBlock<'a> { - fn from(value: &'a super::SealedBlock) -> Self { - Self { header: SealedHeader::from(&value.header), body: BlockBody::from(&value.body) } + impl<'a, H, B> From<&'a super::SealedBlock> for SealedBlock<'a, H, B> + where + H: SerdeBincodeCompat, + B: SerdeBincodeCompat, + { + fn from(value: &'a super::SealedBlock) -> Self { + Self { + header: SealedHeader::from(&value.header), + body: B::BincodeRepr::from(&value.body), + } } } - impl<'a> From> for super::SealedBlock { - fn from(value: SealedBlock<'a>) -> Self { + impl<'a, H, B> From> for super::SealedBlock + where + H: SerdeBincodeCompat, + B: SerdeBincodeCompat, + { + fn from(value: SealedBlock<'a, H, B>) -> Self { Self { header: value.header.into(), body: value.body.into() } } } @@ -877,19 +887,28 @@ pub(super) mod serde_bincode_compat { /// } /// ``` #[derive(Debug, Serialize, Deserialize)] - pub struct SealedBlockWithSenders<'a> { - block: SealedBlock<'a>, + pub struct SealedBlockWithSenders<'a, B = super::Block> + where + B: reth_primitives_traits::Block, + { + block: SealedBlock<'a, B::Header, B::Body>, senders: Cow<'a, Vec
>, } - impl<'a> From<&'a super::SealedBlockWithSenders> for SealedBlockWithSenders<'a> { - fn from(value: &'a super::SealedBlockWithSenders) -> Self { + impl<'a, B> From<&'a super::SealedBlockWithSenders> for SealedBlockWithSenders<'a, B> + where + B: reth_primitives_traits::Block, + { + fn from(value: &'a super::SealedBlockWithSenders) -> Self { Self { block: SealedBlock::from(&value.block), senders: Cow::Borrowed(&value.senders) } } } - impl<'a> From> for super::SealedBlockWithSenders { - fn from(value: SealedBlockWithSenders<'a>) -> Self { + impl<'a, B> From> for super::SealedBlockWithSenders + where + B: reth_primitives_traits::Block, + { + fn from(value: SealedBlockWithSenders<'a, B>) -> Self { Self { block: value.block.into(), senders: value.senders.into_owned() } } } @@ -1187,7 +1206,7 @@ mod tests { #[test] fn empty_block_rlp() { - let body = BlockBody::default(); + let body: BlockBody = BlockBody::default(); let mut buf = Vec::new(); body.encode(&mut buf); let decoded = BlockBody::decode(&mut buf.as_slice()).unwrap(); diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 224e025f39d5..2844c9397b83 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -52,8 +52,8 @@ pub use static_file::StaticFileSegment; pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, BlobTransaction, InvalidTransactionError, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, Transaction, TransactionMeta, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, + PooledTransactionsElementEcRecovered, RecoveredTx, Transaction, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, TxType, }; // Re-exports diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 95d707d1b2d5..79e15b89d7d6 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -73,6 +73,8 @@ impl Receipt { } impl TxReceipt for Receipt { + type Log = Log; + fn status_or_post_state(&self) -> Eip658Value { self.success.into() } diff --git a/crates/primitives/src/traits.rs b/crates/primitives/src/traits.rs index ec4e75c8c6d0..73eabd8ec986 100644 --- a/crates/primitives/src/traits.rs +++ b/crates/primitives/src/traits.rs @@ -3,8 +3,8 @@ use crate::{ BlockWithSenders, SealedBlock, }; use alloc::vec::Vec; -use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; -use reth_primitives_traits::{Block, BlockBody, BlockHeader, SealedHeader, SignedTransaction}; +use alloy_eips::eip2718::Encodable2718; +use reth_primitives_traits::{Block, BlockBody, SealedHeader, SignedTransaction}; use revm_primitives::{Address, B256}; /// Extension trait for [`reth_primitives_traits::Block`] implementations @@ -121,17 +121,3 @@ pub trait BlockBodyTxExt: BlockBody { } impl BlockBodyTxExt for T {} - -/// Extension trait for [`BlockHeader`] adding useful helper methods. -pub trait HeaderExt: BlockHeader { - /// TODO: remove once is released - /// - /// Returns the parent block's number and hash - /// - /// Note: for the genesis block the parent number is 0 and the parent hash is the zero hash. - fn parent_num_hash(&self) -> BlockNumHash { - BlockNumHash::new(self.number().saturating_sub(1), self.parent_hash()) - } -} - -impl HeaderExt for T {} diff --git a/crates/primitives/src/transaction/error.rs b/crates/primitives/src/transaction/error.rs index 790292cd82b7..78f6cf5e5fd3 100644 --- a/crates/primitives/src/transaction/error.rs +++ b/crates/primitives/src/transaction/error.rs @@ -76,7 +76,7 @@ pub enum TransactionConversionError { } /// Represents error variants than can happen when trying to convert a -/// [`TransactionSignedEcRecovered`](crate::TransactionSignedEcRecovered) transaction. +/// [`RecoveredTx`](crate::RecoveredTx) transaction. #[derive(Debug, Clone, Eq, PartialEq, derive_more::Display)] pub enum TryFromRecoveredTransactionError { /// Thrown if the transaction type is unsupported. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index db789d1f6de8..d0b88c4b179d 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -42,6 +42,11 @@ pub use sidecar::BlobTransaction; pub use signature::{recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; +/// Handling transaction signature operations, including signature recovery, +/// applying chain IDs, and EIP-2 validation. +pub mod signature; +pub mod util; + pub(crate) mod access_list; mod compat; mod error; @@ -50,12 +55,6 @@ mod pooled; mod sidecar; mod tx_type; -/// Handling transaction signature operations, including signature recovery, -/// applying chain IDs, and EIP-2 validation. -pub mod signature; - -pub(crate) mod util; - #[cfg(any(test, feature = "reth-codec"))] pub use tx_type::{ COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, @@ -308,7 +307,7 @@ impl Transaction { set_code_tx.eip2718_encode(signature, out); } #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.eip2718_encode(out), + Self::Deposit(deposit_tx) => deposit_tx.encode_2718(out), } } @@ -675,6 +674,18 @@ impl alloy_consensus::Transaction for Transaction { } } + fn is_create(&self) -> bool { + match self { + Self::Legacy(tx) => tx.is_create(), + Self::Eip2930(tx) => tx.is_create(), + Self::Eip1559(tx) => tx.is_create(), + Self::Eip4844(tx) => tx.is_create(), + Self::Eip7702(tx) => tx.is_create(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.is_create(), + } + } + fn value(&self) -> U256 { match self { Self::Legacy(tx) => tx.value(), @@ -769,230 +780,6 @@ impl From for Transaction { } } -/// Signed transaction without its Hash. Used type for inserting into the DB. -/// -/// This can by converted to [`TransactionSigned`] by calling [`TransactionSignedNoHash::hash`]. -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -pub struct TransactionSignedNoHash { - /// The transaction signature values - pub signature: Signature, - /// Raw transaction info - #[deref] - #[as_ref] - pub transaction: Transaction, -} - -impl TransactionSignedNoHash { - /// Calculates the transaction hash. If used more than once, it's better to convert it to - /// [`TransactionSigned`] first. - pub fn hash(&self) -> B256 { - // pre-allocate buffer for the transaction - let mut buf = Vec::with_capacity(128 + self.transaction.input().len()); - self.transaction.eip2718_encode(&self.signature, &mut buf); - keccak256(&buf) - } - - /// Recover signer from signature and hash. - /// - /// Returns `None` if the transaction's signature is invalid, see also [`Self::recover_signer`]. - pub fn recover_signer(&self) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - - let signature_hash = self.signature_hash(); - recover_signer(&self.signature, signature_hash) - } - - /// Recover signer from signature and hash _without ensuring that the signature has a low `s` - /// value_. - /// - /// Reuses a given buffer to avoid numerous reallocations when recovering batches. **Clears the - /// buffer before use.** - /// - /// Returns `None` if the transaction's signature is invalid, see also - /// [`recover_signer_unchecked`]. - /// - /// # Optimism - /// - /// For optimism this will return [`Address::ZERO`] if the Signature is empty, this is because pre bedrock (on OP mainnet), relay messages to the L2 Cross Domain Messenger were sent as legacy transactions from the zero address with an empty signature, e.g.: - /// This makes it possible to import pre bedrock transactions via the sender recovery stage. - pub fn encode_and_recover_unchecked(&self, buffer: &mut Vec) -> Option
{ - buffer.clear(); - self.transaction.encode_for_signing(buffer); - - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - { - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - - // pre bedrock system transactions were sent from the zero address as legacy - // transactions with an empty signature - // - // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if self.is_legacy() && self.signature == TxDeposit::signature() { - return Some(Address::ZERO) - } - } - - recover_signer_unchecked(&self.signature, keccak256(buffer)) - } - - /// Converts into a transaction type with its hash: [`TransactionSigned`]. - /// - /// Note: This will recalculate the hash of the transaction. - #[inline] - pub fn with_hash(self) -> TransactionSigned { - let Self { signature, transaction } = self; - TransactionSigned::new_unhashed(transaction, signature) - } - - /// Recovers a list of signers from a transaction list iterator - /// - /// Returns `None`, if some transaction's signature is invalid, see also - /// [`Self::recover_signer`]. - pub fn recover_signers<'a, T>(txes: T, num_txes: usize) -> Option> - where - T: IntoParallelIterator + IntoIterator + Send, - { - if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { - txes.into_iter().map(|tx| tx.recover_signer()).collect() - } else { - txes.into_par_iter().map(|tx| tx.recover_signer()).collect() - } - } -} - -impl Default for TransactionSignedNoHash { - fn default() -> Self { - Self { signature: Signature::test_signature(), transaction: Default::default() } - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for TransactionSignedNoHash { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let tx_signed = TransactionSigned::arbitrary(u)?; - - Ok(Self { signature: tx_signed.signature, transaction: tx_signed.transaction }) - } -} - -#[cfg(any(test, feature = "reth-codec"))] -impl reth_codecs::Compact for TransactionSignedNoHash { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - let start = buf.as_mut().len(); - - // Placeholder for bitflags. - // The first byte uses 4 bits as flags: IsCompressed[1bit], TxType[2bits], Signature[1bit] - buf.put_u8(0); - - let sig_bit = self.signature.to_compact(buf) as u8; - let zstd_bit = self.transaction.input().len() >= 32; - - let tx_bits = if zstd_bit { - let mut tmp = Vec::with_capacity(256); - if cfg!(feature = "std") { - crate::compression::TRANSACTION_COMPRESSOR.with(|compressor| { - let mut compressor = compressor.borrow_mut(); - let tx_bits = self.transaction.to_compact(&mut tmp); - buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); - tx_bits as u8 - }) - } else { - let mut compressor = crate::compression::create_tx_compressor(); - let tx_bits = self.transaction.to_compact(&mut tmp); - buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); - tx_bits as u8 - } - } else { - self.transaction.to_compact(buf) as u8 - }; - - // Replace bitflags with the actual values - buf.as_mut()[start] = sig_bit | (tx_bits << 1) | ((zstd_bit as u8) << 3); - - buf.as_mut().len() - start - } - - fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { - use bytes::Buf; - - // The first byte uses 4 bits as flags: IsCompressed[1], TxType[2], Signature[1] - let bitflags = buf.get_u8() as usize; - - let sig_bit = bitflags & 1; - let (signature, buf) = Signature::from_compact(buf, sig_bit); - - let zstd_bit = bitflags >> 3; - let (transaction, buf) = if zstd_bit != 0 { - if cfg!(feature = "std") { - crate::compression::TRANSACTION_DECOMPRESSOR.with(|decompressor| { - let mut decompressor = decompressor.borrow_mut(); - - // TODO: enforce that zstd is only present at a "top" level type - - let transaction_type = (bitflags & 0b110) >> 1; - let (transaction, _) = - Transaction::from_compact(decompressor.decompress(buf), transaction_type); - - (transaction, buf) - }) - } else { - let mut decompressor = crate::compression::create_tx_decompressor(); - let transaction_type = (bitflags & 0b110) >> 1; - let (transaction, _) = - Transaction::from_compact(decompressor.decompress(buf), transaction_type); - - (transaction, buf) - } - } else { - let transaction_type = bitflags >> 1; - Transaction::from_compact(buf, transaction_type) - }; - - (Self { signature, transaction }, buf) - } -} - -#[cfg(any(test, feature = "reth-codec"))] -impl reth_codecs::Compact for TransactionSigned { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - let tx: TransactionSignedNoHash = self.clone().into(); - tx.to_compact(buf) - } - - fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, buf) = TransactionSignedNoHash::from_compact(buf, len); - (tx.into(), buf) - } -} - -impl From for TransactionSigned { - fn from(tx: TransactionSignedNoHash) -> Self { - tx.with_hash() - } -} - -impl From for TransactionSignedNoHash { - fn from(tx: TransactionSigned) -> Self { - Self { signature: tx.signature, transaction: tx.transaction } - } -} - /// Signed transaction. #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] #[derive(Debug, Clone, Eq, AsRef, Deref, Serialize, Deserialize)] @@ -1095,56 +882,39 @@ impl TransactionSigned { } } - /// Returns the [`TransactionSignedEcRecovered`] transaction with the given sender. + /// Returns the [`RecoveredTx`] transaction with the given sender. #[inline] - pub const fn with_signer(self, signer: Address) -> TransactionSignedEcRecovered { - TransactionSignedEcRecovered::from_signed_transaction(self, signer) + pub const fn with_signer(self, signer: Address) -> RecoveredTx { + RecoveredTx::from_signed_transaction(self, signer) } - /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] + /// Consumes the type, recover signer and return [`RecoveredTx`] /// /// Returns `None` if the transaction's signature is invalid, see also [`Self::recover_signer`]. - pub fn into_ecrecovered(self) -> Option { + pub fn into_ecrecovered(self) -> Option { let signer = self.recover_signer()?; - Some(TransactionSignedEcRecovered { signed_transaction: self, signer }) + Some(RecoveredTx { signed_transaction: self, signer }) } - /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] _without + /// Consumes the type, recover signer and return [`RecoveredTx`] _without /// ensuring that the signature has a low `s` value_ (EIP-2). /// /// Returns `None` if the transaction's signature is invalid, see also /// [`Self::recover_signer_unchecked`]. - pub fn into_ecrecovered_unchecked(self) -> Option { + pub fn into_ecrecovered_unchecked(self) -> Option { let signer = self.recover_signer_unchecked()?; - Some(TransactionSignedEcRecovered { signed_transaction: self, signer }) + Some(RecoveredTx { signed_transaction: self, signer }) } - /// Tries to recover signer and return [`TransactionSignedEcRecovered`] by cloning the type. - pub fn try_ecrecovered(&self) -> Option { - let signer = self.recover_signer()?; - Some(TransactionSignedEcRecovered { signed_transaction: self.clone(), signer }) - } - - /// Tries to recover signer and return [`TransactionSignedEcRecovered`]. - /// - /// Returns `Err(Self)` if the transaction's signature is invalid, see also - /// [`Self::recover_signer`]. - pub fn try_into_ecrecovered(self) -> Result { - match self.recover_signer() { - None => Err(self), - Some(signer) => Ok(TransactionSignedEcRecovered { signed_transaction: self, signer }), - } - } - - /// Tries to recover signer and return [`TransactionSignedEcRecovered`]. _without ensuring that + /// Tries to recover signer and return [`RecoveredTx`]. _without ensuring that /// the signature has a low `s` value_ (EIP-2). /// /// Returns `Err(Self)` if the transaction's signature is invalid, see also /// [`Self::recover_signer_unchecked`]. - pub fn try_into_ecrecovered_unchecked(self) -> Result { + pub fn try_into_ecrecovered_unchecked(self) -> Result { match self.recover_signer_unchecked() { None => Err(self), - Some(signer) => Ok(TransactionSignedEcRecovered { signed_transaction: self, signer }), + Some(signer) => Ok(RecoveredTx { signed_transaction: self, signer }), } } @@ -1166,7 +936,7 @@ impl TransactionSigned { /// /// Refer to the docs for [`Self::decode_rlp_legacy_transaction`] for details on the exact /// format expected. - pub(crate) fn decode_rlp_legacy_transaction_tuple( + pub fn decode_rlp_legacy_transaction_tuple( data: &mut &[u8], ) -> alloy_rlp::Result<(TxLegacy, TxHash, Signature)> { // keep this around, so we can use it to calculate the hash @@ -1245,14 +1015,15 @@ impl SignedTransaction for TransactionSigned { recover_signer(&self.signature, signature_hash) } - fn recover_signer_unchecked(&self) -> Option
{ + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. #[cfg(feature = "optimism")] if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { return Some(from) } - let signature_hash = self.signature_hash(); + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); recover_signer_unchecked(&self.signature, signature_hash) } } @@ -1391,6 +1162,10 @@ impl alloy_consensus::Transaction for TransactionSigned { self.deref().kind() } + fn is_create(&self) -> bool { + self.deref().is_create() + } + fn value(&self) -> U256 { self.deref().value() } @@ -1416,8 +1191,8 @@ impl alloy_consensus::Transaction for TransactionSigned { } } -impl From for TransactionSigned { - fn from(recovered: TransactionSignedEcRecovered) -> Self { +impl From for TransactionSigned { + fn from(recovered: RecoveredTx) -> Self { recovered.signed_transaction } } @@ -1544,6 +1319,86 @@ impl Decodable2718 for TransactionSigned { } } +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for TransactionSigned { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let start = buf.as_mut().len(); + + // Placeholder for bitflags. + // The first byte uses 4 bits as flags: IsCompressed[1bit], TxType[2bits], Signature[1bit] + buf.put_u8(0); + + let sig_bit = self.signature.to_compact(buf) as u8; + let zstd_bit = self.transaction.input().len() >= 32; + + let tx_bits = if zstd_bit { + let mut tmp = Vec::with_capacity(256); + if cfg!(feature = "std") { + crate::compression::TRANSACTION_COMPRESSOR.with(|compressor| { + let mut compressor = compressor.borrow_mut(); + let tx_bits = self.transaction.to_compact(&mut tmp); + buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); + tx_bits as u8 + }) + } else { + let mut compressor = crate::compression::create_tx_compressor(); + let tx_bits = self.transaction.to_compact(&mut tmp); + buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); + tx_bits as u8 + } + } else { + self.transaction.to_compact(buf) as u8 + }; + + // Replace bitflags with the actual values + buf.as_mut()[start] = sig_bit | (tx_bits << 1) | ((zstd_bit as u8) << 3); + + buf.as_mut().len() - start + } + + fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; + + // The first byte uses 4 bits as flags: IsCompressed[1], TxType[2], Signature[1] + let bitflags = buf.get_u8() as usize; + + let sig_bit = bitflags & 1; + let (signature, buf) = Signature::from_compact(buf, sig_bit); + + let zstd_bit = bitflags >> 3; + let (transaction, buf) = if zstd_bit != 0 { + if cfg!(feature = "std") { + crate::compression::TRANSACTION_DECOMPRESSOR.with(|decompressor| { + let mut decompressor = decompressor.borrow_mut(); + + // TODO: enforce that zstd is only present at a "top" level type + + let transaction_type = (bitflags & 0b110) >> 1; + let (transaction, _) = + Transaction::from_compact(decompressor.decompress(buf), transaction_type); + + (transaction, buf) + }) + } else { + let mut decompressor = crate::compression::create_tx_decompressor(); + let transaction_type = (bitflags & 0b110) >> 1; + let (transaction, _) = + Transaction::from_compact(decompressor.decompress(buf), transaction_type); + + (transaction, buf) + } + } else { + let transaction_type = bitflags >> 1; + Transaction::from_compact(buf, transaction_type) + }; + + (Self { signature, transaction, hash: Default::default() }, buf) + } +} + macro_rules! impl_from_signed { ($($tx:ident),*) => { $( @@ -1603,9 +1458,12 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { } } +/// Type alias kept for backward compatibility. +pub type TransactionSignedEcRecovered = RecoveredTx; + /// Signed transaction with recovered signer. #[derive(Debug, Clone, PartialEq, Hash, Eq, AsRef, Deref)] -pub struct TransactionSignedEcRecovered { +pub struct RecoveredTx { /// Signer of the transaction signer: Address, /// Signed transaction @@ -1614,14 +1472,19 @@ pub struct TransactionSignedEcRecovered { signed_transaction: T, } -// === impl TransactionSignedEcRecovered === +// === impl RecoveredTx === -impl TransactionSignedEcRecovered { +impl RecoveredTx { /// Signer of transaction recovered from signature pub const fn signer(&self) -> Address { self.signer } + /// Reference to the signer of transaction recovered from signature + pub const fn signer_ref(&self) -> &Address { + &self.signer + } + /// Returns a reference to [`TransactionSigned`] pub const fn as_signed(&self) -> &T { &self.signed_transaction @@ -1637,7 +1500,7 @@ impl TransactionSignedEcRecovered { (self.signed_transaction, self.signer) } - /// Create [`TransactionSignedEcRecovered`] from [`TransactionSigned`] and [`Address`] of the + /// Create [`RecoveredTx`] from [`TransactionSigned`] and [`Address`] of the /// signer. #[inline] pub const fn from_signed_transaction(signed_transaction: T, signer: Address) -> Self { @@ -1645,7 +1508,7 @@ impl TransactionSignedEcRecovered { } } -impl Encodable for TransactionSignedEcRecovered { +impl Encodable for RecoveredTx { /// This encodes the transaction _with_ the signature, and an rlp header. /// /// Refer to docs for [`TransactionSigned::encode`] for details on the exact format. @@ -1658,7 +1521,7 @@ impl Encodable for TransactionSignedEcRecovered { } } -impl Decodable for TransactionSignedEcRecovered { +impl Decodable for RecoveredTx { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { let signed_transaction = T::decode(buf)?; let signer = signed_transaction @@ -1668,20 +1531,55 @@ impl Decodable for TransactionSignedEcRecovered { } } -/// Extension trait for [`SignedTransaction`] to convert it into [`TransactionSignedEcRecovered`]. +impl Encodable2718 for RecoveredTx { + fn type_flag(&self) -> Option { + self.signed_transaction.type_flag() + } + + fn encode_2718_len(&self) -> usize { + self.signed_transaction.encode_2718_len() + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.signed_transaction.encode_2718(out) + } + + fn trie_hash(&self) -> B256 { + self.signed_transaction.trie_hash() + } +} + +/// Extension trait for [`SignedTransaction`] to convert it into [`RecoveredTx`]. pub trait SignedTransactionIntoRecoveredExt: SignedTransaction { - /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] _without + /// Tries to recover signer and return [`RecoveredTx`] by cloning the type. + fn try_ecrecovered(&self) -> Option> { + let signer = self.recover_signer()?; + Some(RecoveredTx { signed_transaction: self.clone(), signer }) + } + + /// Tries to recover signer and return [`RecoveredTx`]. + /// + /// Returns `Err(Self)` if the transaction's signature is invalid, see also + /// [`SignedTransaction::recover_signer`]. + fn try_into_ecrecovered(self) -> Result, Self> { + match self.recover_signer() { + None => Err(self), + Some(signer) => Ok(RecoveredTx { signed_transaction: self, signer }), + } + } + + /// Consumes the type, recover signer and return [`RecoveredTx`] _without /// ensuring that the signature has a low `s` value_ (EIP-2). /// /// Returns `None` if the transaction's signature is invalid. - fn into_ecrecovered_unchecked(self) -> Option> { + fn into_ecrecovered_unchecked(self) -> Option> { let signer = self.recover_signer_unchecked()?; - Some(TransactionSignedEcRecovered::from_signed_transaction(self, signer)) + Some(RecoveredTx::from_signed_transaction(self, signer)) } - /// Returns the [`TransactionSignedEcRecovered`] transaction with the given sender. - fn with_signer(self, signer: Address) -> TransactionSignedEcRecovered { - TransactionSignedEcRecovered::from_signed_transaction(self, signer) + /// Returns the [`RecoveredTx`] transaction with the given sender. + fn with_signer(self, signer: Address) -> RecoveredTx { + RecoveredTx::from_signed_transaction(self, signer) } } @@ -1927,7 +1825,7 @@ where mod tests { use crate::{ transaction::{TxEip1559, TxKind, TxLegacy}, - Transaction, TransactionSigned, TransactionSignedEcRecovered, + RecoveredTx, Transaction, TransactionSigned, }; use alloy_consensus::Transaction as _; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; @@ -2188,8 +2086,7 @@ mod tests { let tx = TransactionSigned::decode(&mut &input[..]).unwrap(); let recovered = tx.into_ecrecovered().unwrap(); - let decoded = - TransactionSignedEcRecovered::decode(&mut &alloy_rlp::encode(&recovered)[..]).unwrap(); + let decoded = RecoveredTx::decode(&mut &alloy_rlp::encode(&recovered)[..]).unwrap(); assert_eq!(recovered, decoded) } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 145660f44c71..cdcc6b808dd7 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -5,13 +5,12 @@ use super::{ error::TransactionConversionError, recover_signer_unchecked, signature::recover_signer, TxEip7702, }; -use crate::{ - BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, -}; +use crate::{BlobTransaction, RecoveredTx, Transaction, TransactionSigned, TxType}; +use alloc::vec::Vec; use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, - Signed, TxEip4844WithSidecar, + SignableTransaction, Signed, TxEip4844WithSidecar, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Result, Encodable2718}, @@ -25,8 +24,8 @@ use alloy_primitives::{ use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; use core::hash::{Hash, Hasher}; -use derive_more::{AsRef, Deref}; use reth_primitives_traits::{InMemorySize, SignedTransaction}; +use revm_primitives::keccak256; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions`. This can include either a blob transaction, or a @@ -75,7 +74,7 @@ impl PooledTransactionsElement { } } - /// Converts from an EIP-4844 [`TransactionSignedEcRecovered`] to a + /// Converts from an EIP-4844 [`RecoveredTx`] to a /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// /// Returns an `Err` containing the original `TransactionSigned` if the transaction is not @@ -149,14 +148,26 @@ impl PooledTransactionsElement { pub fn try_into_ecrecovered(self) -> Result { match self.recover_signer() { None => Err(self), - Some(signer) => Ok(PooledTransactionsElementEcRecovered { transaction: self, signer }), + Some(signer) => Ok(RecoveredTx { signed_transaction: self, signer }), + } + } + + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + match self { + Self::Legacy(tx) => tx.tx().encode_for_signing(out), + Self::Eip2930(tx) => tx.tx().encode_for_signing(out), + Self::Eip1559(tx) => tx.tx().encode_for_signing(out), + Self::BlobTransaction(tx) => tx.tx().encode_for_signing(out), + Self::Eip7702(tx) => tx.tx().encode_for_signing(out), } } - /// Create [`TransactionSignedEcRecovered`] by converting this transaction into + /// Create [`RecoveredTx`] by converting this transaction into /// [`TransactionSigned`] and [`Address`] of the signer. - pub fn into_ecrecovered_transaction(self, signer: Address) -> TransactionSignedEcRecovered { - TransactionSignedEcRecovered::from_signed_transaction(self.into_transaction(), signer) + pub fn into_ecrecovered_transaction(self, signer: Address) -> RecoveredTx { + RecoveredTx::from_signed_transaction(self.into_transaction(), signer) } /// Returns the inner [`TransactionSigned`]. @@ -511,6 +522,16 @@ impl alloy_consensus::Transaction for PooledTransactionsElement { } } + fn is_create(&self) -> bool { + match self { + Self::Legacy(tx) => tx.tx().is_create(), + Self::Eip2930(tx) => tx.tx().is_create(), + Self::Eip1559(tx) => tx.tx().is_create(), + Self::Eip7702(tx) => tx.tx().is_create(), + Self::BlobTransaction(tx) => tx.tx().is_create(), + } + } + fn value(&self) -> U256 { match self { Self::Legacy(tx) => tx.tx().value(), @@ -600,8 +621,9 @@ impl SignedTransaction for PooledTransactionsElement { recover_signer(self.signature(), signature_hash) } - fn recover_signer_unchecked(&self) -> Option
{ - let signature_hash = self.signature_hash(); + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); recover_signer_unchecked(self.signature(), signature_hash) } } @@ -620,7 +642,7 @@ impl InMemorySize for PooledTransactionsElement { impl From for PooledTransactionsElement { fn from(recovered: PooledTransactionsElementEcRecovered) -> Self { - recovered.into_transaction() + recovered.into_signed() } } @@ -666,92 +688,45 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { } /// A signed pooled transaction with recovered signer. -#[derive(Debug, Clone, PartialEq, Eq, AsRef, Deref)] -pub struct PooledTransactionsElementEcRecovered { - /// Signer of the transaction - signer: Address, - /// Signed transaction - #[deref] - #[as_ref] - transaction: T, -} - -impl PooledTransactionsElementEcRecovered { - /// Create an instance from the given transaction and the [`Address`] of the signer. - pub const fn from_signed_transaction(transaction: T, signer: Address) -> Self { - Self { transaction, signer } - } - - /// Signer of transaction recovered from signature - pub const fn signer(&self) -> Address { - self.signer - } - - /// Consume the type and return the transaction - pub fn into_transaction(self) -> T { - self.transaction - } +pub type PooledTransactionsElementEcRecovered = RecoveredTx; - /// Dissolve Self to its component - pub fn into_components(self) -> (T, Address) { - (self.transaction, self.signer) - } -} impl PooledTransactionsElementEcRecovered { - /// Transform back to [`TransactionSignedEcRecovered`] - pub fn into_ecrecovered_transaction(self) -> TransactionSignedEcRecovered { - let (tx, signer) = self.into_components(); + /// Transform back to [`RecoveredTx`] + pub fn into_ecrecovered_transaction(self) -> RecoveredTx { + let (tx, signer) = self.to_components(); tx.into_ecrecovered_transaction(signer) } - /// Converts from an EIP-4844 [`TransactionSignedEcRecovered`] to a + /// Converts from an EIP-4844 [`RecoveredTx`] to a /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// /// Returns the transaction is not an EIP-4844 transaction. pub fn try_from_blob_transaction( - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, sidecar: BlobTransactionSidecar, - ) -> Result { - let TransactionSignedEcRecovered { signer, signed_transaction } = tx; + ) -> Result { + let RecoveredTx { signer, signed_transaction } = tx; let transaction = PooledTransactionsElement::try_from_blob_transaction(signed_transaction, sidecar) - .map_err(|tx| TransactionSignedEcRecovered { signer, signed_transaction: tx })?; - Ok(Self { transaction, signer }) + .map_err(|tx| RecoveredTx { signer, signed_transaction: tx })?; + Ok(Self::from_signed_transaction(transaction, signer)) } } -/// Converts a `TransactionSignedEcRecovered` into a `PooledTransactionsElementEcRecovered`. -impl TryFrom for PooledTransactionsElementEcRecovered { +/// Converts a `Recovered` into a `PooledTransactionsElementEcRecovered`. +impl TryFrom for PooledTransactionsElementEcRecovered { type Error = TransactionConversionError; - fn try_from(tx: TransactionSignedEcRecovered) -> Result { + fn try_from(tx: RecoveredTx) -> Result { match PooledTransactionsElement::try_from(tx.signed_transaction) { Ok(pooled_transaction) => { - Ok(Self { transaction: pooled_transaction, signer: tx.signer }) + Ok(Self::from_signed_transaction(pooled_transaction, tx.signer)) } Err(_) => Err(TransactionConversionError::UnsupportedForP2P), } } } -impl Encodable2718 for PooledTransactionsElementEcRecovered { - fn type_flag(&self) -> Option { - self.transaction.type_flag() - } - - fn encode_2718_len(&self) -> usize { - self.transaction.encode_2718_len() - } - - fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { - self.transaction.encode_2718(out) - } - - fn trie_hash(&self) -> B256 { - self.transaction.trie_hash() - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 784a976ab792..1d709b902b53 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -29,7 +29,7 @@ pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; /// Transaction Type /// /// Currently being used as 2-bit type when encoding it to `reth_codecs::Compact` on -/// [`crate::TransactionSignedNoHash`]. Adding more transaction types will break the codec and +/// [`crate::TransactionSigned`]. Adding more transaction types will break the codec and /// database format. /// /// Other required changes when adding a new type can be seen on [PR#3953](https://github.com/paradigmxyz/reth/pull/3953/files). diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index 7964cc1c5f00..8eb1a639d965 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -1,7 +1,10 @@ +//! Utility functions for signature. + use alloy_primitives::{Address, PrimitiveSignature as Signature}; +/// Secp256k1 utility functions. #[cfg(feature = "secp256k1")] -pub(crate) mod secp256k1 { +pub mod secp256k1 { pub use super::impl_secp256k1::*; } diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 41156d3e56b8..f772ff546691 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -22,9 +22,11 @@ reth-provider.workspace = true reth-tokio-util.workspace = true reth-config.workspace = true reth-prune-types.workspace = true +reth-primitives-traits.workspace = true reth-static-file-types.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true # metrics diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index 8088bd7e12b2..4fd56617121a 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -2,8 +2,9 @@ use crate::{segments::SegmentSet, Pruner}; use alloy_eips::eip2718::Encodable2718; use reth_chainspec::MAINNET; use reth_config::PruneConfig; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; use reth_exex_types::FinishedExExHeight; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, DatabaseProviderFactory, NodePrimitivesProvider, PruneCheckpointWriter, StaticFileProviderFactory, @@ -80,7 +81,9 @@ impl PrunerBuilder { PF: DatabaseProviderFactory< ProviderRW: PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory< + Primitives: NodePrimitives, + >, > + StaticFileProviderFactory< Primitives = ::Primitives, >, @@ -104,7 +107,7 @@ impl PrunerBuilder { static_file_provider: StaticFileProvider, ) -> Pruner where - Provider: StaticFileProviderFactory + Provider: StaticFileProviderFactory> + DBProvider + BlockReader + PruneCheckpointWriter, diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index c081bf88c7d2..a365738a777d 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -6,10 +6,11 @@ //! node after static file producer has finished use crate::{db_ext::DbTxPruneExt, segments::PruneInput, PrunerError}; -use reth_db::{tables, transaction::DbTxMut}; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, + PruneCheckpointWriter, TransactionsProvider, }; use reth_prune_types::{ PruneCheckpoint, PruneProgress, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, @@ -21,7 +22,10 @@ pub(crate) fn prune( input: PruneInput, ) -> Result where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: DBProvider + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, @@ -35,7 +39,9 @@ where let mut limiter = input.limiter; let mut last_pruned_transaction = tx_range_end; - let (pruned, done) = provider.tx_ref().prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::::Receipt, + >>( tx_range, &mut limiter, |_| false, diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 198d01ce44dc..d7bbee1042ba 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -3,7 +3,8 @@ use crate::segments::{ UserReceipts, }; use alloy_eips::eip2718::Encodable2718; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter, StaticFileProviderFactory, @@ -46,7 +47,7 @@ impl SegmentSet { impl SegmentSet where - Provider: StaticFileProviderFactory + Provider: StaticFileProviderFactory> + DBProvider + PruneCheckpointWriter + BlockReader, diff --git a/crates/prune/prune/src/segments/static_file/receipts.rs b/crates/prune/prune/src/segments/static_file/receipts.rs index 5221418674aa..6cdc53759904 100644 --- a/crates/prune/prune/src/segments/static_file/receipts.rs +++ b/crates/prune/prune/src/segments/static_file/receipts.rs @@ -2,7 +2,8 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, @@ -23,7 +24,7 @@ impl Receipts { impl Segment for Receipts where - Provider: StaticFileProviderFactory + Provider: StaticFileProviderFactory> + DBProvider + PruneCheckpointWriter + TransactionsProvider diff --git a/crates/prune/prune/src/segments/static_file/transactions.rs b/crates/prune/prune/src/segments/static_file/transactions.rs index 7dc7a23191a0..20274e5dc706 100644 --- a/crates/prune/prune/src/segments/static_file/transactions.rs +++ b/crates/prune/prune/src/segments/static_file/transactions.rs @@ -3,7 +3,8 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::{tables, transaction::DbTxMut}; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, StaticFileProviderFactory, TransactionsProvider, @@ -27,8 +28,10 @@ impl Transactions { impl Segment for Transactions where - Provider: - DBProvider + TransactionsProvider + BlockReader + StaticFileProviderFactory, + Provider: DBProvider + + TransactionsProvider + + BlockReader + + StaticFileProviderFactory>, { fn segment(&self) -> PruneSegment { PruneSegment::Transactions @@ -56,7 +59,9 @@ where let mut limiter = input.limiter; let mut last_pruned_transaction = *tx_range.end(); - let (pruned, done) = provider.tx_ref().prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::::SignedTx, + >>( tx_range, &mut limiter, |_| false, diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index 5bc9feaf023d..97708ad6de18 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -2,10 +2,11 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, + PruneCheckpointWriter, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; use tracing::instrument; @@ -23,7 +24,11 @@ impl Receipts { impl Segment for Receipts where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { fn segment(&self) -> PruneSegment { PruneSegment::Receipts diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index ee404b074c3c..778aac1e7b9b 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -3,8 +3,12 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{BlockReader, DBProvider, PruneCheckpointWriter, TransactionsProvider}; +use alloy_consensus::TxReceipt; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, +}; use reth_prune_types::{ PruneCheckpoint, PruneMode, PruneProgress, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, SegmentOutput, MINIMUM_PRUNING_DISTANCE, @@ -23,7 +27,11 @@ impl ReceiptsByLogs { impl Segment for ReceiptsByLogs where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { fn segment(&self) -> PruneSegment { PruneSegment::ContractLogs @@ -141,12 +149,14 @@ where // Delete receipts, except the ones in the inclusion list let mut last_skipped_transaction = 0; let deleted; - (deleted, done) = provider.tx_ref().prune_table_with_range::( + (deleted, done) = provider.tx_ref().prune_table_with_range::::Receipt, + >>( tx_range, &mut limiter, |(tx_num, receipt)| { let skip = num_addresses > 0 && - receipt.logs.iter().any(|log| { + receipt.logs().iter().any(|log| { filtered_addresses[..num_addresses].contains(&&log.address) }); diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 95def23a4432..cc1c8edcb8db 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -37,12 +37,13 @@ alloy-consensus.workspace = true [features] default = ["std"] std = [ - "reth-primitives/std", - "alloy-primitives/std", - "revm/std", - "alloy-eips/std", - "alloy-consensus/std", - "reth-primitives-traits/std", + "reth-primitives/std", + "alloy-primitives/std", + "revm/std", + "alloy-eips/std", + "alloy-consensus/std", + "reth-primitives-traits/std", + "reth-ethereum-forks/std" ] witness = ["dep:reth-trie"] test-utils = [ @@ -54,10 +55,11 @@ test-utils = [ "reth-primitives-traits/test-utils", ] serde = [ - "revm/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "alloy-consensus/serde", - "reth-primitives-traits/serde", - "reth-trie?/serde", + "revm/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "reth-primitives-traits/serde", + "reth-trie?/serde", + "reth-ethereum-forks/serde" ] diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 15ba049250f5..c980bdc987c3 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -3,7 +3,7 @@ use alloc::vec::Vec; use alloy_eips::eip7685::Requests; -use alloy_primitives::{map::HashSet, Address, BlockNumber}; +use alloy_primitives::{map::HashSet, Address, BlockNumber, Log}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::Receipts; use reth_primitives_traits::Receipt; @@ -14,7 +14,7 @@ use revm::db::states::bundle_state::BundleRetention; /// - recording receipts during execution of multiple blocks. /// - pruning receipts according to the pruning configuration. /// - batch range if known -#[derive(Debug, Default)] +#[derive(Debug)] pub struct BlockBatchRecord { /// Pruning configuration. prune_modes: PruneModes, @@ -43,6 +43,19 @@ pub struct BlockBatchRecord { tip: Option, } +impl Default for BlockBatchRecord { + fn default() -> Self { + Self { + prune_modes: Default::default(), + receipts: Default::default(), + requests: Default::default(), + pruning_address_filter: Default::default(), + first_block: Default::default(), + tip: Default::default(), + } + } +} + impl BlockBatchRecord { /// Create a new receipts recorder with the given pruning configuration. pub fn new(prune_modes: PruneModes) -> Self @@ -83,10 +96,7 @@ impl BlockBatchRecord { } /// Returns all recorded receipts. - pub fn take_receipts(&mut self) -> Receipts - where - T: Default, - { + pub fn take_receipts(&mut self) -> Receipts { core::mem::take(&mut self.receipts) } @@ -102,7 +112,7 @@ impl BlockBatchRecord { /// Returns the [`BundleRetention`] for the given block based on the configured prune modes. pub fn bundle_retention(&self, block_number: BlockNumber) -> BundleRetention { - if self.tip.map_or(true, |tip| { + if self.tip.is_none_or(|tip| { !self .prune_modes .account_history @@ -121,7 +131,7 @@ impl BlockBatchRecord { /// Save receipts to the executor. pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> where - T: Receipt, + T: Receipt, { let mut receipts = receipts.into_iter().map(Some).collect(); // Prune receipts if necessary. @@ -134,7 +144,7 @@ impl BlockBatchRecord { /// Prune receipts according to the pruning configuration. fn prune_receipts(&mut self, receipts: &mut Vec>) -> Result<(), PruneSegmentError> where - T: Receipt, + T: Receipt, { let (Some(first_block), Some(tip)) = (self.first_block, self.tip) else { return Ok(()) }; diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 443d1d5ebcf2..9460d3e1c784 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -6,13 +6,13 @@ use alloy_primitives::{ }; use reth_primitives::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, - StorageRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, - StorageMultiProof, StorageProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, KeccakKeyHasher, + MultiProof, StorageMultiProof, StorageProof, TrieInput, }; /// Mock state for testing @@ -150,6 +150,12 @@ impl StateProofProvider for StateProviderTest { } } +impl HashedPostStateProvider for StateProviderTest { + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::(bundle_state.state()) + } +} + impl StateProvider for StateProviderTest { fn storage( &self, diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index ac39b4802a87..098214f103f8 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -21,6 +21,7 @@ mod engine; mod ganache; mod hardhat; mod mev; +mod miner; mod net; mod otterscan; mod reth; @@ -40,6 +41,7 @@ pub mod servers { debug::{DebugApiServer, DebugExecutionWitnessApiServer}, engine::{EngineApiServer, EngineEthApiServer}, mev::{MevFullApiServer, MevSimApiServer}, + miner::MinerApiServer, net::NetApiServer, otterscan::OtterscanServer, reth::RethApiServer, @@ -70,6 +72,7 @@ pub mod clients { ganache::GanacheApiClient, hardhat::HardhatApiClient, mev::{MevFullApiClient, MevSimApiClient}, + miner::MinerApiClient, net::NetApiClient, otterscan::OtterscanClient, reth::RethApiClient, diff --git a/crates/rpc/rpc-api/src/miner.rs b/crates/rpc/rpc-api/src/miner.rs new file mode 100644 index 000000000000..3673b51c6eb8 --- /dev/null +++ b/crates/rpc/rpc-api/src/miner.rs @@ -0,0 +1,21 @@ +use alloy_primitives::{Bytes, U128}; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; + +/// Miner namespace rpc interface that can control miner/builder settings +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "miner"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "miner"))] +pub trait MinerApi { + /// Sets the extra data string that is included when this miner mines a block. + /// + /// Returns an error if the extra data is too long. + #[method(name = "setExtra")] + fn set_extra(&self, record: Bytes) -> RpcResult; + + /// Sets the minimum accepted gas price for the miner. + #[method(name = "setGasPrice")] + fn set_gas_price(&self, gas_price: U128) -> RpcResult; + + /// Sets the gaslimit to target towards during mining. + #[method(name = "setGasLimit")] + fn set_gas_limit(&self, gas_price: U128) -> RpcResult; +} diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 25626e4f12d3..f22fd554ca6d 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -221,6 +221,30 @@ impl AuthRpcModule { self.module_mut().merge(other.into()).map(|_| true) } + /// Removes the method with the given name from the configured authenticated methods. + /// + /// Returns `true` if the method was found and removed, `false` otherwise. + pub fn remove_auth_method(&mut self, method_name: &'static str) -> bool { + self.module_mut().remove_method(method_name).is_some() + } + + /// Removes the given methods from the configured authenticated methods. + pub fn remove_auth_methods(&mut self, methods: impl IntoIterator) { + for name in methods { + self.remove_auth_method(name); + } + } + + /// Replace the given [Methods] in the configured authenticated methods. + pub fn replace_auth_methods( + &mut self, + other: impl Into, + ) -> Result { + let other = other.into(); + self.remove_auth_methods(other.method_names()); + self.merge_auth_methods(other) + } + /// Convenience function for starting a server pub async fn start_server( self, diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index daff81fa2ae1..967f5840c011 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -113,7 +113,7 @@ impl RethRpcServerConfig for RpcServerArgs { EthStateCacheConfig { max_blocks: self.rpc_state_cache.max_blocks, max_receipts: self.rpc_state_cache.max_receipts, - max_envs: self.rpc_state_cache.max_envs, + max_headers: self.rpc_state_cache.max_headers, max_concurrent_db_requests: self.rpc_state_cache.max_concurrent_db_requests, } } diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 59b3ef870fe2..283fba6e957f 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -29,8 +29,11 @@ pub struct EthHandlers { impl EthHandlers where Provider: StateProviderFactory - + BlockReader - + EvmEnvProvider + + BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + EvmEnvProvider + Clone + Unpin + 'static, @@ -65,12 +68,7 @@ where EvmConfig: ConfigureEvm
, Tasks: TaskSpawner + Clone + 'static, { - let cache = EthStateCache::spawn_with( - provider.clone(), - config.cache, - executor.clone(), - evm_config.clone(), - ); + let cache = EthStateCache::spawn_with(provider.clone(), config.cache, executor.clone()); let new_canonical_blocks = events.canonical_state_stream(); let c = cache.clone(); diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 8f5c84835aaa..031f960096fa 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -26,7 +26,7 @@ //! RethRpcModule, RpcModuleBuilder, RpcServerConfig, ServerBuilder, TransportRpcModuleConfig, //! }; //! use reth_tasks::TokioTaskExecutor; -//! use reth_transaction_pool::TransactionPool; +//! use reth_transaction_pool::{PoolTransaction, TransactionPool}; //! //! pub async fn launch( //! provider: Provider, @@ -41,15 +41,18 @@ //! Transaction = TransactionSigned, //! Block = reth_primitives::Block, //! Receipt = reth_primitives::Receipt, +//! Header = reth_primitives::Header, //! > + AccountReader //! + ChangeSetReader, -//! Pool: TransactionPool + Unpin + 'static, +//! Pool: TransactionPool> +//! + Unpin +//! + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: //! CanonStateSubscriptions + Clone + 'static, -//! EvmConfig: ConfigureEvm
, -//! BlockExecutor: BlockExecutorProvider, -//! Consensus: reth_consensus::Consensus + Clone + 'static, +//! EvmConfig: ConfigureEvm
, +//! BlockExecutor: BlockExecutorProvider, +//! Consensus: reth_consensus::FullConsensus + Clone + 'static, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -94,7 +97,7 @@ //! }; //! use reth_rpc_layer::JwtSecret; //! use reth_tasks::TokioTaskExecutor; -//! use reth_transaction_pool::TransactionPool; +//! use reth_transaction_pool::{PoolTransaction, TransactionPool}; //! use tokio::try_join; //! //! pub async fn launch< @@ -121,17 +124,20 @@ //! Transaction = TransactionSigned, //! Block = reth_primitives::Block, //! Receipt = reth_primitives::Receipt, +//! Header = reth_primitives::Header, //! > + AccountReader //! + ChangeSetReader, -//! Pool: TransactionPool + Unpin + 'static, +//! Pool: TransactionPool> +//! + Unpin +//! + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: //! CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, //! EngineT: EngineTypes, -//! EvmConfig: ConfigureEvm
, -//! BlockExecutor: BlockExecutorProvider, -//! Consensus: reth_consensus::Consensus + Clone + 'static, +//! EvmConfig: ConfigureEvm
, +//! BlockExecutor: BlockExecutorProvider, +//! Consensus: reth_consensus::FullConsensus + Clone + 'static, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -194,18 +200,18 @@ use jsonrpsee::{ Methods, RpcModule, }; use reth_chainspec::EthereumHardforks; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives::EthPrimitives; +use reth_primitives::{EthPrimitives, NodePrimitives}; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, FullRpcProvider, ReceiptProvider, StateProviderFactory, + EvmEnvProvider, FullRpcProvider, HeaderProvider, ReceiptProvider, StateProviderFactory, }; use reth_rpc::{ - AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, - TxPoolApi, ValidationApi, ValidationApiConfig, Web3Api, + AdminApi, DebugApi, EngineEthApi, EthBundle, MinerApi, NetApi, OtterscanApi, RPCApi, RethApi, + TraceApi, TxPoolApi, ValidationApi, ValidationApiConfig, Web3Api, }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ @@ -266,19 +272,27 @@ pub async fn launch, block_executor: BlockExecutor, - consensus: Arc, + consensus: Arc, ) -> Result where - Provider: FullRpcProvider - + AccountReader + Provider: FullRpcProvider< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + AccountReader + ChangeSetReader, - Pool: TransactionPool + 'static, + Pool: TransactionPool::Transaction> + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, EthApi: FullEthApiServer, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { let module_config = module_config.into(); server_config @@ -630,8 +644,13 @@ where Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, - BlockExecutor: BlockExecutorProvider, - Consensus: reth_consensus::Consensus + Clone + 'static, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -657,7 +676,9 @@ where Provider: BlockReader< Block = ::Block, Receipt = ::Receipt, + Header = ::Header, >, + Pool: TransactionPool::Transaction>, { let Self { provider, @@ -733,7 +754,11 @@ where ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, - Provider: BlockReader, + Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, { let Self { provider, @@ -771,7 +796,9 @@ where Provider: BlockReader< Block = ::Block, Receipt = ::Receipt, + Header = ::Header, >, + Pool: TransactionPool::Transaction>, { let mut modules = TransportRpcModules::default(); @@ -930,8 +957,11 @@ impl RpcRegistryInner where Provider: StateProviderFactory - + BlockReader - + EvmEnvProvider + + BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + EvmEnvProvider + Clone + Unpin + 'static, @@ -1104,7 +1134,8 @@ where RpcBlock, RpcReceipt, > + EthApiTypes, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: + BlockExecutorProvider>, { /// Register Eth Namespace /// @@ -1250,7 +1281,8 @@ where pub fn debug_api(&self) -> DebugApi where EthApi: EthApiSpec + EthTransactions + TraceExt, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: + BlockExecutorProvider>, { DebugApi::new( self.provider.clone(), @@ -1281,7 +1313,7 @@ where /// Instantiates `ValidationApi` pub fn validation_api(&self) -> ValidationApi where - Consensus: reth_consensus::Consensus + Clone + 'static, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { ValidationApi::new( self.provider.clone(), @@ -1299,15 +1331,21 @@ where Provider: FullRpcProvider< Block = ::Block, Receipt = ::Receipt, + Header = ::Header, > + AccountReader + ChangeSetReader, - Pool: TransactionPool + 'static, + Pool: TransactionPool::Transaction> + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, EthApi: FullEthApiServer, - BlockExecutor: BlockExecutorProvider, - Consensus: reth_consensus::Consensus + Clone + 'static, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures the auth module that includes the /// * `engine_` namespace @@ -1461,6 +1499,7 @@ where ) .into_rpc() .into(), + RethRpcModule::Miner => MinerApi::default().into_rpc().into(), }) .clone() }) diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 8393d9427a6b..a8393b0a92e4 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -259,7 +259,7 @@ where Some(block_number.into()), ) .await - .unwrap(); + .unwrap_err(); EthApiClient::::estimate_gas( client, call_request.clone(), @@ -267,7 +267,7 @@ where None, ) .await - .unwrap(); + .unwrap_err(); EthApiClient::::call( client, call_request.clone(), @@ -276,7 +276,7 @@ where None, ) .await - .unwrap(); + .unwrap_err(); EthApiClient::::syncing(client).await.unwrap(); EthApiClient::::send_transaction( client, @@ -368,13 +368,15 @@ where .unwrap_err(); TraceApiClient::trace_call_many(client, vec![], Some(BlockNumberOrTag::Latest.into())) .await - .unwrap(); + .unwrap_err(); TraceApiClient::replay_transaction(client, B256::default(), HashSet::default()) .await .err() .unwrap(); - TraceApiClient::trace_block(client, block_id).await.unwrap(); - TraceApiClient::replay_block_transactions(client, block_id, HashSet::default()).await.unwrap(); + TraceApiClient::trace_block(client, block_id).await.unwrap_err(); + TraceApiClient::replay_block_transactions(client, block_id, HashSet::default()) + .await + .unwrap_err(); TraceApiClient::trace_filter(client, trace_filter).await.unwrap(); } diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 4854ac44dc59..f9f05da33d3c 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -54,7 +54,6 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-testing-utils.workspace = true - alloy-rlp.workspace = true assert_matches.workspace = true \ No newline at end of file diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 1062363eafb8..8b57cb1f19ee 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1,12 +1,17 @@ use crate::{ capabilities::EngineCapabilities, metrics::EngineApiMetrics, EngineApiError, EngineApiResult, }; -use alloy_eips::{eip1898::BlockHashOrNumber, eip4844::BlobAndProofV1, eip7685::Requests}; +use alloy_eips::{ + eip1898::BlockHashOrNumber, + eip4844::BlobAndProofV1, + eip7685::{Requests, RequestsOrHash}, +}; use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV3, - ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, PraguePayloadFields, + TransitionConfiguration, }; use async_trait::async_trait; use jsonrpsee_core::RpcResult; @@ -279,7 +284,11 @@ where payload, ExecutionPayloadSidecar::v4( CancunPayloadFields { versioned_hashes, parent_beacon_block_root }, - execution_requests, + PraguePayloadFields { + requests: RequestsOrHash::Requests(execution_requests), + // TODO: add as an argument and handle in `try_into_block` + target_blobs_per_block: 0, + }, ), ) .await diff --git a/crates/rpc/rpc-engine-api/src/lib.rs b/crates/rpc/rpc-engine-api/src/lib.rs index a2da00eee702..a9305a00820b 100644 --- a/crates/rpc/rpc-engine-api/src/lib.rs +++ b/crates/rpc/rpc-engine-api/src/lib.rs @@ -15,9 +15,6 @@ mod engine_api; /// Engine API capabilities. pub mod capabilities; -/// The Engine API message type. -mod message; - /// Engine API error. mod error; @@ -26,7 +23,6 @@ mod metrics; pub use engine_api::{EngineApi, EngineApiSender}; pub use error::*; -pub use message::EngineApiMessageVersion; // re-export server trait for convenience pub use reth_rpc_api::EngineApiServer; diff --git a/crates/rpc/rpc-engine-api/src/message.rs b/crates/rpc/rpc-engine-api/src/message.rs deleted file mode 100644 index c0d6b85d5118..000000000000 --- a/crates/rpc/rpc-engine-api/src/message.rs +++ /dev/null @@ -1,14 +0,0 @@ -/// The version of Engine API message. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum EngineApiMessageVersion { - /// Version 1 - V1, - /// Version 2 - /// - /// Added for shanghai hardfork. - V2, - /// Version 3 - /// - /// Added for cancun hardfork. - V3, -} diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index e4b1b28074f0..bffd4fa308ec 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -16,6 +16,7 @@ workspace = true revm.workspace = true revm-inspectors.workspace = true revm-primitives = { workspace = true, features = ["dev"] } +reth-primitives-traits.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-primitives.workspace = true @@ -29,8 +30,8 @@ reth-execution-types.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-api.workspace = true -reth-trie.workspace = true reth-node-api.workspace = true +reth-trie-common = { workspace = true, features = ["eip1186"] } # ethereum alloy-serde.workspace = true diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 9cd9ba2921ae..6500c3049781 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -625,6 +625,7 @@ where block_number: Option, ) -> RpcResult>>> { trace!(target: "rpc::eth", ?block_number, "Serving eth_simulateV1"); + let _permit = self.tracing_task_guard().clone().acquire_owned().await; Ok(EthCall::simulate_v1(self, payload, block_number).await?) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index c7f346e951e3..aaa2ce131c9c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -19,7 +19,7 @@ use futures::Future; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_node_api::BlockBody; -use reth_primitives::TransactionSigned; +use reth_primitives_traits::SignedTransaction; use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; use reth_revm::{ database::StateProviderDatabase, @@ -201,7 +201,6 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { parent_hash, total_difficulty, return_full_transactions, - &db, this.tx_resp_builder(), )?; @@ -665,14 +664,15 @@ pub trait Call: LoadState> + SpawnBlocking { where DB: Database + DatabaseCommit, EthApiError: From, - I: IntoIterator, + I: IntoIterator::Transaction)>, + ::Transaction: SignedTransaction, { let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); let mut evm = self.evm_config().evm_with_env(db, env); let mut index = 0; for (sender, tx) in transactions { - if tx.hash() == target_tx_hash { + if *tx.tx_hash() == target_tx_hash { // reached the target transaction break } diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index 465c33ada387..f9d62855be12 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -57,7 +57,7 @@ pub trait EstimateCall: Call { request.nonce = None; // Keep a copy of gas related request values - let tx_request_gas_limit = request.gas; + let tx_request_gas_limit = request.gas.map(U256::from); let tx_request_gas_price = request.gas_price; // the gas limit of the corresponding block let block_env_gas_limit = block.gas_limit; @@ -65,7 +65,13 @@ pub trait EstimateCall: Call { // Determine the highest possible gas limit, considering both the request's specified limit // and the block's limit. let mut highest_gas_limit = tx_request_gas_limit - .map(|tx_gas_limit| U256::from(tx_gas_limit).max(block_env_gas_limit)) + .map(|mut tx_gas_limit| { + if block_env_gas_limit < tx_gas_limit { + // requested gas limit is higher than the allowed gas limit, capping + tx_gas_limit = block_env_gas_limit; + } + tx_gas_limit + }) .unwrap_or(block_env_gas_limit); // Configure the evm env diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 0099e0f6b160..5843e945b8c3 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -163,11 +163,11 @@ pub trait EthFees: LoadFee { for header in &headers { - base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default() as u128); - gas_used_ratio.push(header.gas_used as f64 / header.gas_limit as f64); + base_fee_per_gas.push(header.base_fee_per_gas().unwrap_or_default() as u128); + gas_used_ratio.push(header.gas_used() as f64 / header.gas_limit() as f64); base_fee_per_blob_gas.push(header.blob_fee().unwrap_or_default()); blob_gas_used_ratio.push( - header.blob_gas_used.unwrap_or_default() as f64 + header.blob_gas_used().unwrap_or_default() as f64 / alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, ); @@ -181,8 +181,8 @@ pub trait EthFees: LoadFee { rewards.push( calculate_reward_percentiles_for_block( percentiles, - header.gas_used, - header.base_fee_per_gas.unwrap_or_default(), + header.gas_used(), + header.base_fee_per_gas().unwrap_or_default(), &block.body.transactions, &receipts, ) @@ -198,14 +198,10 @@ pub trait EthFees: LoadFee { // The unwrap is safe since we checked earlier that we got at least 1 header. let last_header = headers.last().expect("is present"); base_fee_per_gas.push( + last_header.next_block_base_fee( self.provider() .chain_spec() - .base_fee_params_at_timestamp(last_header.timestamp) - .next_block_base_fee( - last_header.gas_used , - last_header.gas_limit, - last_header.base_fee_per_gas.unwrap_or_default() , - ) as u128, + .base_fee_params_at_timestamp(last_header.timestamp())).unwrap_or_default() as u128 ); // Same goes for the `base_fee_per_blob_gas`: diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 36ba2c1e84eb..8a6e5c84be1a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -3,37 +3,40 @@ use super::SpawnBlocking; use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; -use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{BlockHeader, Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, }; use alloy_primitives::{BlockNumber, B256, U256}; -use alloy_rpc_types_eth::BlockNumberOrTag; +use alloy_rpc_types_eth::{BlockNumberOrTag, Withdrawals}; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_errors::RethError; use reth_evm::{ state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, - ConfigureEvm, ConfigureEvmEnv, + ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes, }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - proofs::calculate_transaction_root, Block, BlockBody, BlockExt, Receipt, - SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, + proofs::calculate_transaction_root, Block, BlockBody, BlockExt, InvalidTransactionError, + Receipt, RecoveredTx, SealedBlockWithSenders, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, - ProviderReceipt, ReceiptProvider, StateProviderFactory, + ProviderReceipt, ProviderTx, ReceiptProvider, StateProviderFactory, }; use reth_revm::{ database::StateProviderDatabase, primitives::{ - BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, + BlockEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, ResultAndState, SpecId, }, }; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; -use reth_trie::HashedPostState; +use reth_transaction_pool::{ + error::InvalidPoolTransactionError, BestTransactionsAttributes, PoolTransaction, + TransactionPool, +}; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; use std::time::{Duration, Instant}; use tokio::sync::Mutex; @@ -46,13 +49,15 @@ pub trait LoadPendingBlock: EthApiTypes + RpcNodeCore< Provider: BlockReaderIdExt< + Transaction = reth_primitives::TransactionSigned, Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, - Pool: TransactionPool, - Evm: ConfigureEvm
, + Pool: TransactionPool>>, + Evm: ConfigureEvm
>, > { /// Returns a handle to the pending block. @@ -64,55 +69,56 @@ pub trait LoadPendingBlock: /// /// If no pending block is available, this will derive it from the `latest` block fn pending_block_env_and_cfg(&self) -> Result { - let origin: PendingBlockEnvOrigin = if let Some(pending) = + if let Some(block) = self.provider().pending_block_with_senders().map_err(Self::Error::from_eth_err)? { - PendingBlockEnvOrigin::ActualPending(pending) - } else { - // no pending block from the CL yet, so we use the latest block and modify the env - // values that we can - let latest = self + if let Some(receipts) = self .provider() - .latest_header() + .receipts_by_block(block.hash().into()) .map_err(Self::Error::from_eth_err)? - .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; - - let (mut latest_header, block_hash) = latest.split(); - // child block - latest_header.number += 1; - // assumed child block is in the next slot: 12s - latest_header.timestamp += 12; - // base fee of the child block - let chain_spec = self.provider().chain_spec(); - - latest_header.base_fee_per_gas = latest_header.next_block_base_fee( - chain_spec.base_fee_params_at_timestamp(latest_header.timestamp), - ); - - // update excess blob gas consumed above target - latest_header.excess_blob_gas = latest_header.next_block_excess_blob_gas(); - - // we're reusing the same block hash because we need this to lookup the block's state - let latest = SealedHeader::new(latest_header, block_hash); - - PendingBlockEnvOrigin::DerivedFromLatest(latest) - }; + { + // Note: for the PENDING block we assume it is past the known merge block and + // thus this will not fail when looking up the total + // difficulty value for the blockenv. + let (cfg, block_env) = self + .provider() + .env_with_header(block.header(), self.evm_config().clone()) + .map_err(Self::Error::from_eth_err)?; + + return Ok(PendingBlockEnv::new( + cfg, + block_env, + PendingBlockEnvOrigin::ActualPending(block, receipts), + )); + } + } - let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - - let mut block_env = BlockEnv::default(); - // Note: for the PENDING block we assume it is past the known merge block and thus this will - // not fail when looking up the total difficulty value for the blockenv. - self.provider() - .fill_env_with_header( - &mut cfg, - &mut block_env, - origin.header(), - self.evm_config().clone(), + // no pending block from the CL yet, so we use the latest block and modify the env + // values that we can + let latest = self + .provider() + .latest_header() + .map_err(Self::Error::from_eth_err)? + .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; + + let (cfg, block_env) = self + .evm_config() + .next_cfg_and_block_env( + &latest, + NextBlockEnvAttributes { + timestamp: latest.timestamp() + 12, + suggested_fee_recipient: latest.beneficiary(), + prev_randao: B256::random(), + }, ) + .map_err(RethError::other) .map_err(Self::Error::from_eth_err)?; - Ok(PendingBlockEnv::new(cfg, block_env, origin)) + Ok(PendingBlockEnv::new( + cfg, + block_env, + PendingBlockEnvOrigin::DerivedFromLatest(latest.hash()), + )) } /// Returns the locally built pending block @@ -133,18 +139,12 @@ pub trait LoadPendingBlock: { async move { let pending = self.pending_block_env_and_cfg()?; - if pending.origin.is_actual_pending() { - if let Some(block) = pending.origin.clone().into_actual_pending() { - // we have the real pending block, so we should also have its receipts - if let Some(receipts) = self - .provider() - .receipts_by_block(block.hash().into()) - .map_err(Self::Error::from_eth_err)? - { - return Ok(Some((block, receipts))) - } + let parent_hash = match pending.origin { + PendingBlockEnvOrigin::ActualPending(block, receipts) => { + return Ok(Some((block, receipts))); } - } + PendingBlockEnvOrigin::DerivedFromLatest(parent_hash) => parent_hash, + }; // we couldn't find the real pending block, so we need to build it ourselves let mut lock = self.pending_block().lock().await; @@ -155,7 +155,7 @@ pub trait LoadPendingBlock: if let Some(pending_block) = lock.as_ref() { // this is guaranteed to be the `latest` header if pending.block_env.number.to::() == pending_block.block.number && - pending.origin.header().hash() == pending_block.block.parent_hash && + parent_hash == pending_block.block.parent_hash && now <= pending_block.expires_at { return Ok(Some((pending_block.block.clone(), pending_block.receipts.clone()))); @@ -166,7 +166,7 @@ pub trait LoadPendingBlock: let (sealed_block, receipts) = match self .spawn_blocking_io(move |this| { // we rebuild the block - this.build_block(pending) + this.build_block(pending.cfg, pending.block_env, parent_hash) }) .await { @@ -191,7 +191,7 @@ pub trait LoadPendingBlock: /// Assembles a [`Receipt`] for a transaction, based on its [`ExecutionResult`]. fn assemble_receipt( &self, - tx: &TransactionSignedEcRecovered, + tx: &RecoveredTx, result: ExecutionResult, cumulative_gas_used: u64, ) -> Receipt { @@ -225,14 +225,13 @@ pub trait LoadPendingBlock: /// block contract call using the parent beacon block root received from the CL. fn build_block( &self, - env: PendingBlockEnv, + cfg: CfgEnvWithHandlerCfg, + block_env: BlockEnv, + parent_hash: B256, ) -> Result<(SealedBlockWithSenders, Vec), Self::Error> where EthApiError: From, { - let PendingBlockEnv { cfg, block_env, origin } = env; - - let parent_hash = origin.build_target_hash(); let state_provider = self .provider() .history_by_block_hash(parent_hash) @@ -254,34 +253,16 @@ pub trait LoadPendingBlock: block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), )); - let (withdrawals, withdrawals_root) = match origin { - PendingBlockEnvOrigin::ActualPending(ref block) => { - (block.body.withdrawals.clone(), block.withdrawals_root) - } - PendingBlockEnvOrigin::DerivedFromLatest(_) => (None, None), - }; + let withdrawals: Option = None; + let withdrawals_root = None; let chain_spec = self.provider().chain_spec(); let mut system_caller = SystemCaller::new(self.evm_config().clone(), chain_spec.clone()); - let parent_beacon_block_root = if origin.is_actual_pending() { - // apply eip-4788 pre block contract call if we got the block from the CL with the real - // parent beacon block root - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - origin.header().parent_beacon_block_root, - ) - .map_err(|err| EthApiError::Internal(err.into()))?; - origin.header().parent_beacon_block_root - } else { - None - }; + let parent_beacon_block_root = None; system_caller - .pre_block_blockhashes_contract_call(&mut db, &cfg, &block_env, origin.header().hash()) + .pre_block_blockhashes_contract_call(&mut db, &cfg, &block_env, parent_hash) .map_err(|err| EthApiError::Internal(err.into()))?; let mut receipts = Vec::new(); @@ -292,7 +273,13 @@ pub trait LoadPendingBlock: // we can't fit this transaction into the block, so we need to mark it as invalid // which also removes all dependent transaction from the iterator before we can // continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + pool_tx.gas_limit(), + block_gas_limit, + ), + ); continue } @@ -300,12 +287,17 @@ pub trait LoadPendingBlock: // we don't want to leak any state changes made by private transactions, so we mark // them as invalid here which removes all dependent transactions from the iterator // before we can continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); continue } // convert tx to a signed transaction - let tx = pool_tx.to_recovered_transaction(); + let tx = pool_tx.to_consensus(); // There's only limited amount of blob space available per block, so we need to check if // the EIP-4844 can still fit in the block @@ -316,7 +308,13 @@ pub trait LoadPendingBlock: // invalid, which removes its dependent transactions from // the iterator. This is similar to the gas limit condition // for regular transactions above. - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + tx_blob_gas, + MAX_DATA_GAS_PER_BLOCK, + ), + ); continue } } @@ -340,7 +338,12 @@ pub trait LoadPendingBlock: } else { // if the transaction is invalid, we can skip it and all of its // descendants - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); } continue } @@ -400,7 +403,7 @@ pub trait LoadPendingBlock: block_number, Vec::new(), ); - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + let hashed_state = db.database.hashed_post_state(execution_outcome.state()); let receipts_root = self.receipts_root(&block_env, &execution_outcome, block_number); @@ -443,6 +446,7 @@ pub trait LoadPendingBlock: extra_data: Default::default(), parent_beacon_block_root, requests_hash, + target_blobs_per_block: None, }; // Convert Vec> to Vec diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 7ff9fa4deff5..50ff1b557b52 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -1,6 +1,7 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. - +use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; +use crate::{EthApiTypes, FromEthApiError, RpcNodeCore, RpcNodeCoreExt}; use alloy_consensus::{constants::KECCAK_EMPTY, Header}; use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256, U256}; @@ -11,18 +12,13 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; use reth_provider::{ - BlockIdReader, BlockNumReader, ChainSpecProvider, StateProvider, StateProviderBox, - StateProviderFactory, + BlockIdReader, BlockNumReader, ChainSpecProvider, EvmEnvProvider as _, StateProvider, + StateProviderBox, StateProviderFactory, }; use reth_rpc_eth_types::{EthApiError, PendingBlockEnv, RpcInvalidTransactionError}; -use reth_rpc_types_compat::proof::from_primitive_account_proof; use reth_transaction_pool::TransactionPool; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; -use crate::{EthApiTypes, FromEthApiError, RpcNodeCore, RpcNodeCoreExt}; - -use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; - /// Helper methods for `eth_` methods relating to state (accounts). pub trait EthState: LoadState + SpawnBlocking { /// Returns the maximum number of blocks into the past for generating state proofs. @@ -122,7 +118,7 @@ pub trait EthState: LoadState + SpawnBlocking { let proof = state .proof(Default::default(), address, &storage_keys) .map_err(Self::Error::from_eth_err)?; - Ok(from_primitive_account_proof(proof, keys)) + Ok(proof.into_eip1186_response(keys)) }) .await }) @@ -233,12 +229,15 @@ pub trait LoadState: .block_hash_for_id(at) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(at))?; - let (cfg, env) = self - .cache() - .get_evm_env(block_hash) - .await + + let header = + self.cache().get_header(block_hash).await.map_err(Self::Error::from_eth_err)?; + let evm_config = self.evm_config().clone(); + let (cfg, block_env) = self + .provider() + .env_with_header(&header, evm_config) .map_err(Self::Error::from_eth_err)?; - Ok((cfg, env, block_hash.into())) + Ok((cfg, block_env, block_hash.into())) } } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 6ad8f8fd6ec0..3b4ecb9de273 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -8,7 +8,10 @@ use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; -use reth_primitives::{SealedBlockWithSenders, TransactionMeta, TransactionSigned}; +use reth_primitives::{ + transaction::SignedTransactionIntoRecoveredExt, SealedBlockWithSenders, TransactionMeta, + TransactionSigned, +}; use reth_provider::{ BlockNumReader, BlockReaderIdExt, ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider, @@ -120,10 +123,13 @@ pub trait EthTransactions: LoadTransaction { } /// Returns the _historical_ transaction and the block it was mined in + #[expect(clippy::type_complexity)] fn historical_transaction_by_hash_at( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future< + Output = Result>, B256)>, Self::Error>, + > + Send { async move { match self.transaction_by_hash_at(hash).await? { None => Ok(None), @@ -239,8 +245,8 @@ pub trait EthTransactions: LoadTransaction { if let Some(tx) = RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { - let transaction = tx.transaction.clone().into_consensus(); - return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder())?)); + let transaction = tx.transaction.clone_into_consensus(); + return Ok(Some(from_recovered(transaction, self.tx_resp_builder())?)); } } @@ -282,7 +288,7 @@ pub trait EthTransactions: LoadTransaction { block .transactions_with_sender() .enumerate() - .find(|(_, (signer, tx))| **signer == sender && tx.nonce() == nonce) + .find(|(_, (signer, tx))| **signer == sender && (*tx).nonce() == nonce) .map(|(index, (signer, tx))| { let tx_info = TransactionInfo { hash: Some(tx.hash()), @@ -385,7 +391,7 @@ pub trait EthTransactions: LoadTransaction { let pool_transaction = <::Pool as TransactionPool>::Transaction::try_from_consensus( - transaction.into(), + transaction, ) .map_err(|_| EthApiError::TransactionConversionError)?; @@ -475,11 +481,7 @@ pub trait EthTransactions: LoadTransaction { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` transactions RPC /// methods. -pub trait LoadTransaction: - SpawnBlocking - + FullEthApiTypes - + RpcNodeCoreExt -{ +pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt { /// Returns the transaction by hash. /// /// Checks the pool and state. @@ -539,11 +541,16 @@ pub trait LoadTransaction: /// Returns the transaction by including its corresponding [`BlockId`]. /// /// Note: this supports pending transactions + #[expect(clippy::type_complexity)] fn transaction_by_hash_at( &self, transaction_hash: B256, - ) -> impl Future, Self::Error>> + Send - { + ) -> impl Future< + Output = Result< + Option<(TransactionSource>, BlockId)>, + Self::Error, + >, + > + Send { async move { Ok(self.transaction_by_hash(transaction_hash).await?.map(|tx| match tx { tx @ TransactionSource::Pool(_) => (tx, BlockId::pending()), @@ -555,11 +562,15 @@ pub trait LoadTransaction: } /// Fetches the transaction and the transaction's block + #[expect(clippy::type_complexity)] fn transaction_and_block( &self, hash: B256, ) -> impl Future< - Output = Result)>, Self::Error>, + Output = Result< + Option<(TransactionSource>, Arc)>, + Self::Error, + >, > + Send { async move { let (transaction, at) = match self.transaction_by_hash_at(hash).await? { diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index cb97a03e8b80..c4a255985cbf 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -20,16 +20,15 @@ pub mod node; pub mod pubsub; pub mod types; -pub use reth_rpc_eth_types::error::{ - AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError, -}; -pub use reth_rpc_types_compat::TransactionCompat; - pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; pub use node::{RpcNodeCore, RpcNodeCoreExt}; pub use pubsub::EthPubSubApiServer; +pub use reth_rpc_eth_types::error::{ + AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError, +}; +pub use reth_rpc_types_compat::TransactionCompat; pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; #[cfg(feature = "client")] @@ -38,3 +37,5 @@ pub use bundle::{EthBundleApiClient, EthCallBundleApiClient}; pub use core::EthApiClient; #[cfg(feature = "client")] pub use filter::EthFilterApiClient; + +use reth_trie_common as _; diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 2bac068483c7..c97ea5735ee9 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -7,9 +7,9 @@ use std::{ use alloy_network::Network; use alloy_rpc_types_eth::Block; -use reth_primitives::TransactionSigned; -use reth_provider::{ReceiptProvider, TransactionsProvider}; +use reth_provider::{ProviderTx, ReceiptProvider, TransactionsProvider}; use reth_rpc_types_compat::TransactionCompat; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use crate::{AsEthApiError, FromEthApiError, FromEvmError, RpcNodeCore}; @@ -48,8 +48,10 @@ pub type RpcError = ::Error; pub trait FullEthApiTypes where Self: RpcNodeCore< - Provider: TransactionsProvider - + ReceiptProvider, + Provider: TransactionsProvider + ReceiptProvider, + Pool: TransactionPool< + Transaction: PoolTransaction>, + >, > + EthApiTypes< TransactionCompat: TransactionCompat< ::Transaction, @@ -62,8 +64,10 @@ where impl FullEthApiTypes for T where T: RpcNodeCore< - Provider: TransactionsProvider - + ReceiptProvider, + Provider: TransactionsProvider + ReceiptProvider, + Pool: TransactionPool< + Transaction: PoolTransaction>, + >, > + EthApiTypes< TransactionCompat: TransactionCompat< ::Transaction, diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 11bf6c6231d2..72b153ab0845 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -15,7 +15,6 @@ workspace = true reth-chainspec.workspace = true reth-chain-state.workspace = true reth-errors.workspace = true -reth-evm.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } diff --git a/crates/rpc/rpc-eth-types/src/cache/config.rs b/crates/rpc/rpc-eth-types/src/cache/config.rs index 64999bd6bf3e..001a5b4d4d59 100644 --- a/crates/rpc/rpc-eth-types/src/cache/config.rs +++ b/crates/rpc/rpc-eth-types/src/cache/config.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use reth_rpc_server_types::constants::cache::{ - DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_ENV_CACHE_MAX_LEN, + DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_HEADER_CACHE_MAX_LEN, DEFAULT_RECEIPT_CACHE_MAX_LEN, }; @@ -19,10 +19,10 @@ pub struct EthStateCacheConfig { /// /// Default is 2000. pub max_receipts: u32, - /// Max number of bytes for cached env data. + /// Max number of headers in cache. /// /// Default is 1000. - pub max_envs: u32, + pub max_headers: u32, /// Max number of concurrent database requests. /// /// Default is 512. @@ -34,7 +34,7 @@ impl Default for EthStateCacheConfig { Self { max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN, max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, - max_envs: DEFAULT_ENV_CACHE_MAX_LEN, + max_headers: DEFAULT_HEADER_CACHE_MAX_LEN, max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS, } } diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 1fbe16a2ed9c..ed107f3b0a9e 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -8,7 +8,7 @@ use alloy_primitives::{ }; use reth_errors::ProviderResult; use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; -use reth_storage_api::StateProvider; +use reth_storage_api::{HashedPostStateProvider, StateProvider}; use reth_trie::HashedStorage; use revm::Database; @@ -139,6 +139,15 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { } } +impl HashedPostStateProvider for StateProviderTraitObjWrapper<'_> { + fn hashed_post_state( + &self, + bundle_state: &revm::db::BundleState, + ) -> reth_trie::HashedPostState { + self.0.hashed_post_state(bundle_state) + } +} + impl StateProvider for StateProviderTraitObjWrapper<'_> { fn storage( &self, diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index 70c8b1a4f54f..7a0d9dfa0f01 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -1,17 +1,16 @@ //! Async caching support for eth RPC +use super::{EthStateCacheConfig, MultiConsumerLruCache}; use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::{future::Either, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; -use reth_evm::{provider::EvmEnvProvider, ConfigureEvm}; use reth_execution_types::Chain; use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSigned}; use reth_storage_api::{BlockReader, StateProviderFactory, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; -use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; use schnellru::{ByLength, Limiter}; use std::{ future::Future, @@ -25,8 +24,6 @@ use tokio::sync::{ }; use tokio_stream::wrappers::UnboundedReceiverStream; -use super::{EthStateCacheConfig, MultiConsumerLruCache}; - pub mod config; pub mod db; pub mod metrics; @@ -43,8 +40,8 @@ type BlockWithSendersResponseSender = /// The type that can send the response to the requested receipts of a block. type ReceiptsResponseSender = oneshot::Sender>>>>; -/// The type that can send the response to a requested env -type EnvResponseSender = oneshot::Sender>; +/// The type that can send the response to a requested header +type HeaderResponseSender = oneshot::Sender>; type BlockLruCache = MultiConsumerLruCache< B256, @@ -56,8 +53,7 @@ type BlockLruCache = MultiConsumerLruCache< type ReceiptsLruCache = MultiConsumerLruCache>, L, ReceiptsResponseSender>; -type EnvLruCache = - MultiConsumerLruCache; +type HeaderLruCache = MultiConsumerLruCache; /// Provides async access to cached eth data /// @@ -70,26 +66,24 @@ pub struct EthStateCache { impl EthStateCache { /// Creates and returns both [`EthStateCache`] frontend and the memory bound service. - fn create( + fn create( provider: Provider, action_task_spawner: Tasks, - evm_config: EvmConfig, max_blocks: u32, max_receipts: u32, - max_envs: u32, + max_headers: u32, max_concurrent_db_operations: usize, - ) -> (Self, EthStateCacheService) { + ) -> (Self, EthStateCacheService) { let (to_service, rx) = unbounded_channel(); let service = EthStateCacheService { provider, full_block_cache: BlockLruCache::new(max_blocks, "blocks"), receipts_cache: ReceiptsLruCache::new(max_receipts, "receipts"), - evm_env_cache: EnvLruCache::new(max_envs, "evm_env"), + headers_cache: HeaderLruCache::new(max_headers, "headers"), action_tx: to_service.clone(), action_rx: UnboundedReceiverStream::new(rx), action_task_spawner, rate_limiter: Arc::new(Semaphore::new(max_concurrent_db_operations)), - evm_config, }; let cache = Self { to_service }; (cache, service) @@ -99,52 +93,52 @@ impl EthStateCache { /// [`tokio::spawn`]. /// /// See also [`Self::spawn_with`] - pub fn spawn( - provider: Provider, - config: EthStateCacheConfig, - evm_config: EvmConfig, - ) -> Self + pub fn spawn(provider: Provider, config: EthStateCacheConfig) -> Self where Provider: StateProviderFactory - + BlockReader - + EvmEnvProvider - + Clone + + BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + Clone + Unpin + 'static, - EvmConfig: ConfigureEvm
, { - Self::spawn_with(provider, config, TokioTaskExecutor::default(), evm_config) + Self::spawn_with(provider, config, TokioTaskExecutor::default()) } /// Creates a new async LRU backed cache service task and spawns it to a new task via the given /// spawner. /// /// The cache is memory limited by the given max bytes values. - pub fn spawn_with( + pub fn spawn_with( provider: Provider, config: EthStateCacheConfig, executor: Tasks, - evm_config: EvmConfig, ) -> Self where Provider: StateProviderFactory - + BlockReader - + EvmEnvProvider - + Clone + + BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm
, { - let EthStateCacheConfig { max_blocks, max_receipts, max_envs, max_concurrent_db_requests } = - config; + let EthStateCacheConfig { + max_blocks, + max_receipts, + max_headers, + max_concurrent_db_requests, + } = config; let (this, service) = Self::create( provider, executor.clone(), - evm_config, max_blocks, max_receipts, - max_envs, + max_headers, max_concurrent_db_requests, ); executor.spawn_critical("eth state cache", Box::pin(service)); @@ -188,16 +182,12 @@ impl EthStateCache { Ok(block.zip(receipts)) } - /// Requests the evm env config for the block hash. + /// Requests the header for the given hash. /// - /// Returns an error if the corresponding header (required for populating the envs) was not - /// found. - pub async fn get_evm_env( - &self, - block_hash: B256, - ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> { + /// Returns an error if the header is not found. + pub async fn get_header(&self, block_hash: B256) -> ProviderResult
{ let (response_tx, rx) = oneshot::channel(); - let _ = self.to_service.send(CacheAction::GetEnv { block_hash, response_tx }); + let _ = self.to_service.send(CacheAction::GetHeader { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? } } @@ -222,14 +212,13 @@ impl EthStateCache { pub(crate) struct EthStateCacheService< Provider, Tasks, - EvmConfig, LimitBlocks = ByLength, LimitReceipts = ByLength, - LimitEnvs = ByLength, + LimitHeaders = ByLength, > where LimitBlocks: Limiter>, LimitReceipts: Limiter>>, - LimitEnvs: Limiter, + LimitHeaders: Limiter, { /// The type used to lookup data from disk provider: Provider, @@ -237,8 +226,11 @@ pub(crate) struct EthStateCacheService< full_block_cache: BlockLruCache, /// The LRU cache for full blocks grouped by their hash. receipts_cache: ReceiptsLruCache, - /// The LRU cache for revm environments - evm_env_cache: EnvLruCache, + /// The LRU cache for headers. + /// + /// Headers are cached because they are required to populate the environment for execution + /// (evm). + headers_cache: HeaderLruCache, /// Sender half of the action channel. action_tx: UnboundedSender, /// Receiver half of the action channel. @@ -247,15 +239,12 @@ pub(crate) struct EthStateCacheService< action_task_spawner: Tasks, /// Rate limiter rate_limiter: Arc, - /// The type that determines how to configure the EVM. - evm_config: EvmConfig, } -impl EthStateCacheService +impl EthStateCacheService where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + BlockReader + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm
, { fn on_new_block( &mut self, @@ -341,20 +330,21 @@ where fn update_cached_metrics(&self) { self.full_block_cache.update_cached_metrics(); self.receipts_cache.update_cached_metrics(); - self.evm_env_cache.update_cached_metrics(); + self.headers_cache.update_cached_metrics(); } } -impl Future for EthStateCacheService +impl Future for EthStateCacheService where Provider: StateProviderFactory - + BlockReader - + EvmEnvProvider - + Clone + + BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm
, { type Output = (); @@ -421,39 +411,30 @@ where })); } } - CacheAction::GetEnv { block_hash, response_tx } => { - // check if env data is cached - if let Some(env) = this.evm_env_cache.get(&block_hash).cloned() { - let _ = response_tx.send(Ok(env)); + CacheAction::GetHeader { block_hash, response_tx } => { + // check if the header is cached + if let Some(header) = this.headers_cache.get(&block_hash).cloned() { + let _ = response_tx.send(Ok(header)); continue } - // env data is not in the cache, request it if this is the first + // header is not in the cache, request it if this is the first // consumer - if this.evm_env_cache.queue(block_hash, response_tx) { + if this.headers_cache.queue(block_hash, response_tx) { let provider = this.provider.clone(); let action_tx = this.action_tx.clone(); let rate_limiter = this.rate_limiter.clone(); - let evm_config = this.evm_config.clone(); this.action_task_spawner.spawn_blocking(Box::pin(async move { // Acquire permit let _permit = rate_limiter.acquire().await; - let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id( - CfgEnv::default(), - SpecId::LATEST, - ); - let mut block_env = BlockEnv::default(); - let res = provider - .fill_env_at( - &mut cfg, - &mut block_env, - block_hash.into(), - evm_config, - ) - .map(|_| (cfg, block_env)); - let _ = action_tx.send(CacheAction::EnvResult { + let header = provider.header(&block_hash).and_then(|header| { + header.ok_or_else(|| { + ProviderError::HeaderNotFound(block_hash.into()) + }) + }); + let _ = action_tx.send(CacheAction::HeaderResult { block_hash, - res: Box::new(res), + res: Box::new(header), }); })); } @@ -472,18 +453,18 @@ where this.on_new_block(block_hash, Err(e)); } }, - CacheAction::EnvResult { block_hash, res } => { + CacheAction::HeaderResult { block_hash, res } => { let res = *res; - if let Some(queued) = this.evm_env_cache.remove(&block_hash) { + if let Some(queued) = this.headers_cache.remove(&block_hash) { // send the response to queued senders for tx in queued { let _ = tx.send(res.clone()); } } - // cache good env data + // cache good header if let Ok(data) = res { - this.evm_env_cache.insert(block_hash, data); + this.headers_cache.insert(block_hash, data); } } CacheAction::CacheNewCanonicalChain { chain_change } => { @@ -528,9 +509,9 @@ enum CacheAction { block_hash: B256, response_tx: BlockWithSendersResponseSender, }, - GetEnv { + GetHeader { block_hash: B256, - response_tx: EnvResponseSender, + response_tx: HeaderResponseSender, }, GetReceipts { block_hash: B256, @@ -544,9 +525,9 @@ enum CacheAction { block_hash: B256, res: ProviderResult>>>, }, - EnvResult { + HeaderResult { block_hash: B256, - res: Box>, + res: Box>, }, CacheNewCanonicalChain { chain_change: ChainChange, diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 922c3f9d474a..35233e6c2192 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -17,13 +17,13 @@ use metrics::atomics::AtomicU64; use reth_chain_state::CanonStateNotification; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_primitives::{NodePrimitives, Receipt, SealedBlock, TransactionSigned}; +use reth_primitives_traits::{Block, BlockBody}; +use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; use reth_storage_api::BlockReaderIdExt; use revm_primitives::{calc_blob_gasprice, calc_excess_blob_gas}; use serde::{Deserialize, Serialize}; use tracing::trace; -use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; - use super::{EthApiError, EthStateCache}; /// Contains cached fee history entries for blocks. @@ -366,7 +366,7 @@ impl FeeHistoryEntry { base_fee_per_gas: block.base_fee_per_gas.unwrap_or_default(), gas_used_ratio: block.gas_used as f64 / block.gas_limit as f64, base_fee_per_blob_gas: block.blob_fee(), - blob_gas_used_ratio: block.blob_gas_used() as f64 / + blob_gas_used_ratio: block.body().blob_gas_used() as f64 / alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, excess_blob_gas: block.excess_blob_gas, blob_gas_used: block.blob_gas_used, diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 3f8186ae1502..73cab209fd54 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -1,7 +1,7 @@ //! An implementation of the eth gas price oracle, used for providing gas price estimates based on //! previous blocks. -use alloy_consensus::constants::GWEI_TO_WEI; +use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader}; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockId; @@ -142,8 +142,8 @@ where let mut populated_blocks = 0; // we only check a maximum of 2 * max_block_history, or the number of blocks in the chain - let max_blocks = if self.oracle_config.max_block_history * 2 > header.number { - header.number + let max_blocks = if self.oracle_config.max_block_history * 2 > header.number() { + header.number() } else { self.oracle_config.max_block_history * 2 }; diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 116026c2ddde..bd23e3f42abf 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -8,7 +8,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::B256; use derive_more::Constructor; -use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlockWithSenders}; use reth_primitives_traits::Block; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; @@ -25,28 +25,28 @@ pub struct PendingBlockEnv { /// The origin for a configured [`PendingBlockEnv`] #[derive(Clone, Debug)] -pub enum PendingBlockEnvOrigin { +pub enum PendingBlockEnvOrigin { /// The pending block as received from the CL. - ActualPending(SealedBlockWithSenders), + ActualPending(SealedBlockWithSenders, Vec), /// The _modified_ header of the latest block. /// /// This derives the pending state based on the latest header by modifying: /// - the timestamp /// - the block number /// - fees - DerivedFromLatest(SealedHeader), + DerivedFromLatest(B256), } -impl PendingBlockEnvOrigin { +impl PendingBlockEnvOrigin { /// Returns true if the origin is the actual pending block as received from the CL. pub const fn is_actual_pending(&self) -> bool { - matches!(self, Self::ActualPending(_)) + matches!(self, Self::ActualPending(_, _)) } /// Consumes the type and returns the actual pending block. pub fn into_actual_pending(self) -> Option> { match self { - Self::ActualPending(block) => Some(block), + Self::ActualPending(block, _) => Some(block), _ => None, } } @@ -57,8 +57,8 @@ impl PendingBlockEnvOrigin { /// identify the block by its hash (latest block). pub fn state_block_id(&self) -> BlockId { match self { - Self::ActualPending(_) => BlockNumberOrTag::Pending.into(), - Self::DerivedFromLatest(header) => BlockId::Hash(header.hash().into()), + Self::ActualPending(_, _) => BlockNumberOrTag::Pending.into(), + Self::DerivedFromLatest(hash) => BlockId::Hash((*hash).into()), } } @@ -69,16 +69,8 @@ impl PendingBlockEnvOrigin { /// header. pub fn build_target_hash(&self) -> B256 { match self { - Self::ActualPending(block) => block.header().parent_hash(), - Self::DerivedFromLatest(header) => header.hash(), - } - } - - /// Returns the header this pending block is based on. - pub fn header(&self) -> &SealedHeader { - match self { - Self::ActualPending(block) => &block.header, - Self::DerivedFromLatest(header) => header, + Self::ActualPending(block, _) => block.header().parent_hash(), + Self::DerivedFromLatest(hash) => *hash, } } } diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 3136d42e9580..b7f82782b0bc 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -14,7 +14,7 @@ pub fn build_receipt( meta: TransactionMeta, receipt: &Receipt, all_receipts: &[Receipt], - build_envelope: impl FnOnce(ReceiptWithBloom) -> T, + build_envelope: impl FnOnce(ReceiptWithBloom>) -> T, ) -> EthResult> { // Note: we assume this transaction is valid, because it's mined (or part of pending block) // and we don't need to check for pre EIP-2 diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 5a0daa1b42f0..a10b4afff9d7 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -12,16 +12,12 @@ use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, BlockBody, BlockWithSenders, Receipt, TransactionSigned, }; -use reth_revm::database::StateProviderDatabase; use reth_rpc_server_types::result::rpc_err; use reth_rpc_types_compat::{block::from_block, TransactionCompat}; -use reth_storage_api::StateRootProvider; -use reth_trie::{HashedPostState, HashedStorage}; -use revm::{db::CacheDB, Database}; -use revm_primitives::{keccak256, Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; +use revm::Database; +use revm_primitives::{Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; use crate::{ - cache::db::StateProviderTraitObjWrapper, error::{api::FromEthApiError, ToRpcError}, EthApiError, RevertError, RpcInvalidTransactionError, }; @@ -143,7 +139,6 @@ where } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -#[expect(clippy::complexity)] pub fn build_block>( results: Vec<(Address, ExecutionResult)>, transactions: Vec, @@ -151,7 +146,6 @@ pub fn build_block>( parent_hash: B256, total_difficulty: U256, full_transactions: bool, - db: &CacheDB>>, tx_resp_builder: &T, ) -> Result>, T::Error> { let mut calls: Vec = Vec::with_capacity(results.len()); @@ -229,24 +223,27 @@ pub fn build_block>( calls.push(call); } - let mut hashed_state = HashedPostState::default(); - for (address, account) in &db.accounts { - let hashed_address = keccak256(address); - hashed_state.accounts.insert(hashed_address, Some(account.info.clone().into())); - - let storage = hashed_state - .storages - .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); - - for (slot, value) in &account.storage { - let slot = B256::from(*slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, *value); - } - } - - let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; + // TODO: uncomment once performance cost is acceptable + // + // let mut hashed_state = HashedPostState::default(); + // for (address, account) in &db.accounts { + // let hashed_address = keccak256(address); + // hashed_state.accounts.insert(hashed_address, Some(account.info.clone().into())); + + // let storage = hashed_state + // .storages + // .entry(hashed_address) + // .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); + + // for (slot, value) in &account.storage { + // let slot = B256::from(*slot); + // let hashed_slot = keccak256(slot); + // storage.storage.insert(hashed_slot, *value); + // } + // } + + // let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; + let state_root = B256::ZERO; let header = alloy_consensus::Header { beneficiary: block_env.coinbase, diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index 83ef97807de0..f994638d3af8 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -4,7 +4,7 @@ use alloy_primitives::B256; use alloy_rpc_types_eth::TransactionInfo; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, TransactionSigned}; use reth_primitives_traits::SignedTransaction; use reth_rpc_types_compat::{ transaction::{from_recovered, from_recovered_with_block_context}, @@ -15,13 +15,13 @@ use reth_rpc_types_compat::{ #[derive(Debug, Clone, Eq, PartialEq)] pub enum TransactionSource { /// Transaction exists in the pool (Pending) - Pool(TransactionSignedEcRecovered), + Pool(RecoveredTx), /// Transaction already included in a block /// /// This can be a historical block or a pending block (received from the CL) Block { /// Transaction fetched via provider - transaction: TransactionSignedEcRecovered, + transaction: RecoveredTx, /// Index of the transaction in the block index: u64, /// Hash of the block. @@ -37,7 +37,7 @@ pub enum TransactionSource { impl TransactionSource { /// Consumes the type and returns the wrapped transaction. - pub fn into_recovered(self) -> TransactionSignedEcRecovered { + pub fn into_recovered(self) -> RecoveredTx { self.into() } @@ -63,7 +63,7 @@ impl TransactionSource { } /// Returns the transaction and block related info, if not pending - pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { + pub fn split(self) -> (RecoveredTx, TransactionInfo) { match self { Self::Pool(tx) => { let hash = tx.trie_hash(); @@ -86,7 +86,7 @@ impl TransactionSource { } } -impl From> for TransactionSignedEcRecovered { +impl From> for RecoveredTx { fn from(value: TransactionSource) -> Self { match value { TransactionSource::Pool(tx) => tx, diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index 48019745a34f..89b496da0fca 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -113,8 +113,8 @@ pub mod cache { /// Default cache size for the receipts cache: 2000 receipts. pub const DEFAULT_RECEIPT_CACHE_MAX_LEN: u32 = 2000; - /// Default cache size for the env cache: 1000 envs. - pub const DEFAULT_ENV_CACHE_MAX_LEN: u32 = 1000; + /// Default cache size for the header cache: 1000 headers. + pub const DEFAULT_HEADER_CACHE_MAX_LEN: u32 = 1000; /// Default number of concurrent database requests. pub const DEFAULT_CONCURRENT_DB_REQUESTS: usize = 512; diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index 43e4a9374369..3eb34b34a7f2 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -269,6 +269,8 @@ pub enum RethRpcModule { Ots, /// `flashbots_` module Flashbots, + /// `miner_` module + Miner, } // === impl RethRpcModule === diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index 887986ada122..d39443561175 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -14,10 +14,8 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-trie-common.workspace = true # ethereum -alloy-serde.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 46bc9502c579..f504d57addc7 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -5,7 +5,6 @@ use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, Header, EMPTY_OMMER_RO use alloy_eips::{ eip2718::{Decodable2718, Encodable2718}, eip4895::Withdrawals, - eip7685::Requests, }; use alloy_primitives::{B256, U256}; use alloy_rpc_types_engine::{ @@ -77,6 +76,7 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result ExecutionPayload { /// Converts [`SealedBlock`] to [`ExecutionPayloadV1`] pub fn block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV1 { parent_hash: value.parent_hash, fee_recipient: value.beneficiary, @@ -145,7 +145,7 @@ pub fn block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { /// Converts [`SealedBlock`] to [`ExecutionPayloadV2`] pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { @@ -170,7 +170,7 @@ pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { /// Converts [`SealedBlock`] to [`ExecutionPayloadV3`], and returns the parent beacon block root. pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV3 { blob_gas_used: value.blob_gas_used.unwrap_or_default(), excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), @@ -270,7 +270,7 @@ pub fn try_into_block( }; base_payload.header.parent_beacon_block_root = sidecar.parent_beacon_block_root(); - base_payload.header.requests_hash = sidecar.requests().map(Requests::requests_hash); + base_payload.header.requests_hash = sidecar.requests_hash(); Ok(base_payload) } @@ -334,7 +334,7 @@ pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { /// Transforms a [`SealedBlock`] into a [`ExecutionPayloadV1`] pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPayloadV1 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV1 { parent_hash: value.parent_hash, fee_recipient: value.beneficiary, diff --git a/crates/rpc/rpc-types-compat/src/lib.rs b/crates/rpc/rpc-types-compat/src/lib.rs index c324eebc8723..206d502f87d3 100644 --- a/crates/rpc/rpc-types-compat/src/lib.rs +++ b/crates/rpc/rpc-types-compat/src/lib.rs @@ -12,7 +12,5 @@ pub mod block; pub mod engine; -pub mod proof; pub mod transaction; - pub use transaction::TransactionCompat; diff --git a/crates/rpc/rpc-types-compat/src/proof.rs b/crates/rpc/rpc-types-compat/src/proof.rs deleted file mode 100644 index b860bc3491d7..000000000000 --- a/crates/rpc/rpc-types-compat/src/proof.rs +++ /dev/null @@ -1,37 +0,0 @@ -//! Compatibility functions for rpc proof related types. - -use alloy_rpc_types_eth::{EIP1186AccountProofResponse, EIP1186StorageProof}; -use alloy_serde::JsonStorageKey; -use reth_trie_common::{AccountProof, StorageProof}; - -/// Creates a new rpc storage proof from a primitive storage proof type. -pub fn from_primitive_storage_proof( - proof: StorageProof, - slot: JsonStorageKey, -) -> EIP1186StorageProof { - EIP1186StorageProof { key: slot, value: proof.value, proof: proof.proof } -} - -/// Creates a new rpc account proof from a primitive account proof type. -pub fn from_primitive_account_proof( - proof: AccountProof, - slots: Vec, -) -> EIP1186AccountProofResponse { - let info = proof.info.unwrap_or_default(); - EIP1186AccountProofResponse { - address: proof.address, - balance: info.balance, - code_hash: info.get_bytecode_hash(), - nonce: info.nonce, - storage_hash: proof.storage_root, - account_proof: proof.proof, - storage_proof: proof - .storage_proofs - .into_iter() - .filter_map(|proof| { - let input_slot = slots.iter().find(|s| s.as_b256() == proof.key)?; - Some(from_primitive_storage_proof(proof, *input_slot)) - }) - .collect(), - } -} diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs index b439b61d44e8..d6180ca1ee20 100644 --- a/crates/rpc/rpc-types-compat/src/transaction.rs +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -8,7 +8,7 @@ use alloy_rpc_types_eth::{ request::{TransactionInput, TransactionRequest}, TransactionInfo, }; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, TransactionSigned}; use serde::{Deserialize, Serialize}; /// Create a new rpc transaction result for a mined transaction, using the given block hash, @@ -17,7 +17,7 @@ use serde::{Deserialize, Serialize}; /// The block hash, number, and tx index fields should be from the original block where the /// transaction was mined. pub fn from_recovered_with_block_context>( - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, tx_info: TransactionInfo, resp_builder: &T, ) -> Result { @@ -27,7 +27,7 @@ pub fn from_recovered_with_block_context>( /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. pub fn from_recovered>( - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, resp_builder: &T, ) -> Result { resp_builder.fill(tx, TransactionInfo::default()) @@ -53,7 +53,7 @@ pub trait TransactionCompat: /// environment related fields to `None`. fn fill( &self, - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, tx_inf: TransactionInfo, ) -> Result; @@ -63,8 +63,8 @@ pub trait TransactionCompat: fn otterscan_api_truncate_input(tx: &mut Self::Transaction); } -/// Convert [`TransactionSignedEcRecovered`] to [`TransactionRequest`] -pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> TransactionRequest { +/// Convert [`RecoveredTx`] to [`TransactionRequest`] +pub fn transaction_to_call_request(tx: RecoveredTx) -> TransactionRequest { let from = tx.signer(); let to = Some(tx.transaction.to().into()); let gas = tx.transaction.gas_limit(); diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 834b1a963bfe..5efae46f0061 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -34,7 +34,6 @@ reth-evm.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-types.workspace = true -reth-trie.workspace = true reth-consensus.workspace = true reth-payload-validator.workspace = true diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 9fc1be93a2f2..765b7e719b8f 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -18,17 +18,17 @@ use reth_evm::{ execute::{BlockExecutorProvider, Executor}, ConfigureEvmEnv, }; -use reth_primitives::{Block, BlockExt, SealedBlockWithSenders}; +use reth_primitives::{Block, BlockExt, NodePrimitives, SealedBlockWithSenders}; use reth_primitives_traits::SignedTransaction; use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, - TransactionVariant, + BlockReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, + StateProviderFactory, TransactionVariant, }; use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord}; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ helpers::{EthApiSpec, EthTransactions, TraceExt}, - EthApiTypes, FromEthApiError, + EthApiTypes, FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; @@ -81,7 +81,9 @@ where + StateProviderFactory + 'static, Eth: EthApiTypes + TraceExt + 'static, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives::Provider as BlockReader>::Block>, + >, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -382,10 +384,22 @@ where // let db = db.0; + let tx_info = TransactionInfo { + block_number: Some( + env.block.number.try_into().unwrap_or_default(), + ), + base_fee: Some( + env.block.basefee.try_into().unwrap_or_default(), + ), + hash: None, + block_hash: None, + index: None, + }; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector - .try_into_mux_frame(&res, db) + .try_into_mux_frame(&res, db, tx_info) .map_err(Eth::Error::from_eth_err)?; Ok(frame.into()) }) @@ -656,6 +670,17 @@ where ) -> Result<(GethTrace, revm_primitives::EvmState), Eth::Error> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; + let tx_info = TransactionInfo { + hash: transaction_context.as_ref().map(|c| c.tx_hash).unwrap_or_default(), + index: transaction_context + .as_ref() + .map(|c| c.tx_index.map(|i| i as u64)) + .unwrap_or_default(), + block_hash: transaction_context.as_ref().map(|c| c.block_hash).unwrap_or_default(), + block_number: Some(env.block.number.try_into().unwrap_or_default()), + base_fee: Some(env.block.basefee.try_into().unwrap_or_default()), + }; + if let Some(tracer) = tracer { return match tracer { GethDebugTracerType::BuiltInTracer(tracer) => match tracer { @@ -721,7 +746,7 @@ where let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector - .try_into_mux_frame(&res, db) + .try_into_mux_frame(&res, db, tx_info) .map_err(Eth::Error::from_eth_err)?; return Ok((frame.into(), res.state)) } @@ -736,14 +761,6 @@ where ); let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; - - let tx_info = TransactionInfo { - hash: transaction_context.unwrap().tx_hash, - index: transaction_context.unwrap().tx_index.map(|index| index as u64), - block_hash: transaction_context.unwrap().block_hash, - block_number: Some(env.block.number.try_into().unwrap_or_default()), - base_fee: Some(env.block.basefee.try_into().unwrap_or_default()), - }; let frame: FlatCallFrame = inspector .with_transaction_gas_limit(env.tx.gas_limit) .into_parity_builder() @@ -800,7 +817,9 @@ where + StateProviderFactory + 'static, Eth: EthApiSpec + EthTransactions + TraceExt + 'static, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives::Provider as BlockReader>::Block>, + >, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 10eec4dbf974..2924e6ea25fc 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -55,11 +55,14 @@ where let EthCallBundle { txs, block_number, + coinbase, state_block_number, + timeout: _, timestamp, gas_limit, difficulty, base_fee, + .. } = bundle; if txs.is_empty() { return Err(EthApiError::InvalidParams( @@ -79,7 +82,7 @@ where .map(recover_raw_transaction) .collect::, _>>()? .into_iter() - .map(|tx| tx.into_components()) + .map(|tx| tx.to_components()) .collect::>(); // Validate that the bundle does not contain more than MAX_BLOB_NUMBER_PER_BLOCK blob @@ -106,6 +109,10 @@ where // Note: the block number is considered the `parent` block: let (cfg, mut block_env, at) = self.eth_api().evm_env_at(block_id).await?; + if let Some(coinbase) = coinbase { + block_env.coinbase = coinbase; + } + // need to adjust the timestamp for the next block if let Some(timestamp) = timestamp { block_env.timestamp = U256::from(timestamp); @@ -117,8 +124,16 @@ where block_env.difficulty = U256::from(difficulty); } + // default to call gas limit unless user requests a smaller limit + block_env.gas_limit = U256::from(self.inner.eth_api.call_gas_limit()); if let Some(gas_limit) = gas_limit { - block_env.gas_limit = U256::from(gas_limit); + let gas_limit = U256::from(gas_limit); + if gas_limit > block_env.gas_limit { + return Err( + EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into() + ) + } + block_env.gas_limit = gas_limit; } if let Some(base_fee) = base_fee { diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index b6b37c9f393e..092d94851622 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -3,6 +3,7 @@ use std::sync::Arc; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; use alloy_network::Ethereum; use alloy_primitives::U256; @@ -286,7 +287,7 @@ where .header_by_number_or_tag(BlockNumberOrTag::Latest) .ok() .flatten() - .map(|header| header.number) + .map(|header| header.number()) .unwrap_or_default(), ); @@ -438,8 +439,11 @@ mod tests { use crate::EthApi; fn build_test_eth_api< - P: BlockReaderIdExt - + BlockReader + P: BlockReaderIdExt< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + BlockReader + ChainSpecProvider + EvmEnvProvider + StateProviderFactory @@ -450,7 +454,7 @@ mod tests { provider: P, ) -> EthApi { let evm_config = EthEvmConfig::new(provider.chain_spec()); - let cache = EthStateCache::spawn(provider.clone(), Default::default(), evm_config.clone()); + let cache = EthStateCache::spawn(provider.clone(), Default::default()); let fee_history_cache = FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 132d99a5c1a3..6ed72b6ca746 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1,14 +1,6 @@ //! `eth_` `Filter` RPC handler implementation -use std::{ - collections::HashMap, - fmt, - iter::StepBy, - ops::RangeInclusive, - sync::Arc, - time::{Duration, Instant}, -}; - +use alloy_consensus::BlockHeader; use alloy_primitives::TxHash; use alloy_rpc_types_eth::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, FilteredParams, Log, @@ -17,7 +9,7 @@ use alloy_rpc_types_eth::{ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; -use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSignedEcRecovered}; +use reth_primitives::{Receipt, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, ProviderError}; use reth_rpc_eth_api::{ EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat, @@ -30,6 +22,14 @@ use reth_rpc_server_types::{result::rpc_error_with_code, ToRpcResult}; use reth_rpc_types_compat::transaction::from_recovered; use reth_tasks::TaskSpawner; use reth_transaction_pool::{NewSubpoolTransactionStream, PoolTransaction, TransactionPool}; +use std::{ + collections::HashMap, + fmt, + iter::StepBy, + ops::RangeInclusive, + sync::Arc, + time::{Duration, Instant}, +}; use tokio::{ sync::{mpsc::Receiver, Mutex}, time::MissedTickBehavior, @@ -145,7 +145,7 @@ where impl EthFilter where Provider: BlockReader + BlockIdReader + 'static, - Pool: TransactionPool + 'static, + Pool: TransactionPool::Transaction> + 'static, Eth: FullEthApiTypes, { /// Returns all the filter changes for the given id, if any @@ -245,7 +245,7 @@ impl EthFilterApiServer> for EthFilter where Provider: BlockReader + BlockIdReader + 'static, - Pool: TransactionPool + 'static, + Pool: TransactionPool::Transaction> + 'static, Eth: FullEthApiTypes + 'static, { /// Handler for `eth_newFilter` @@ -381,7 +381,7 @@ where .header_by_hash_or_number(block_hash.into())? .ok_or_else(|| ProviderError::HeaderNotFound(block_hash.into()))?; - let block_num_hash = BlockNumHash::new(header.number, block_hash); + let block_num_hash = BlockNumHash::new(header.number(), block_hash); // we also need to ensure that the receipts are available and return an error if // not, in case the block hash been reorged @@ -403,7 +403,7 @@ where block_num_hash, &receipts, false, - header.timestamp, + header.timestamp(), )?; Ok(all_logs) @@ -484,20 +484,20 @@ where for (idx, header) in headers.iter().enumerate() { // only if filter matches - if FilteredParams::matches_address(header.logs_bloom, &address_filter) && - FilteredParams::matches_topics(header.logs_bloom, &topics_filter) + if FilteredParams::matches_address(header.logs_bloom(), &address_filter) && + FilteredParams::matches_topics(header.logs_bloom(), &topics_filter) { // these are consecutive headers, so we can use the parent hash of the next // block to get the current header's hash let block_hash = match headers.get(idx + 1) { - Some(parent) => parent.parent_hash, + Some(parent) => parent.parent_hash(), None => self .provider - .block_hash(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?, + .block_hash(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?, }; - let num_hash = BlockNumHash::new(header.number, block_hash); + let num_hash = BlockNumHash::new(header.number(), block_hash); if let Some((receipts, maybe_block)) = self.receipts_and_maybe_block(&num_hash, chain_info.best_number).await? { @@ -510,16 +510,18 @@ where num_hash, &receipts, false, - header.timestamp, + header.timestamp(), )?; // size check but only if range is multiple blocks, so we always return all // logs of a single block let is_multi_block_range = from_block != to_block; if is_multi_block_range && all_logs.len() > self.max_logs_per_response { - return Err(EthFilterError::QueryExceedsMaxResults( - self.max_logs_per_response, - )) + return Err(EthFilterError::QueryExceedsMaxResults { + max_logs: self.max_logs_per_response, + from_block, + to_block: num_hash.number.saturating_sub(1), + }); } } } @@ -609,7 +611,7 @@ struct FullTransactionsReceiver { impl FullTransactionsReceiver where T: PoolTransaction + 'static, - TxCompat: TransactionCompat, + TxCompat: TransactionCompat, { /// Creates a new `FullTransactionsReceiver` encapsulating the provided transaction stream. fn new(stream: NewSubpoolTransactionStream, tx_resp_builder: TxCompat) -> Self { @@ -617,15 +619,12 @@ where } /// Returns all new pending transactions received since the last poll. - async fn drain(&self) -> FilterChanges - where - T: PoolTransaction>, - { + async fn drain(&self) -> FilterChanges { let mut pending_txs = Vec::new(); let mut prepared_stream = self.txs_stream.lock().await; while let Ok(tx) = prepared_stream.try_recv() { - match from_recovered(tx.transaction.to_recovered_transaction(), &self.tx_resp_builder) { + match from_recovered(tx.transaction.to_consensus(), &self.tx_resp_builder) { Ok(tx) => pending_txs.push(tx), Err(err) => { error!(target: "rpc", @@ -649,8 +648,8 @@ trait FullTransactionsFilter: fmt::Debug + Send + Sync + Unpin + 'static { impl FullTransactionsFilter for FullTransactionsReceiver where - T: PoolTransaction> + 'static, - TxCompat: TransactionCompat + 'static, + T: PoolTransaction + 'static, + TxCompat: TransactionCompat + 'static, { async fn drain(&self) -> FilterChanges { Self::drain(self).await @@ -724,8 +723,15 @@ pub enum EthFilterError { #[error("query exceeds max block range {0}")] QueryExceedsMaxBlocks(u64), /// Query result is too large. - #[error("query exceeds max results {0}")] - QueryExceedsMaxResults(usize), + #[error("query exceeds max results {max_logs}, retry with the range {from_block}-{to_block}")] + QueryExceedsMaxResults { + /// Maximum number of logs allowed per response + max_logs: usize, + /// Start block of the suggested retry range + from_block: u64, + /// End block of the suggested retry range (last successfully processed block) + to_block: u64, + }, /// Error serving request in `eth_` namespace. #[error(transparent)] EthAPIError(#[from] EthApiError), @@ -747,7 +753,7 @@ impl From for jsonrpsee::types::error::ErrorObject<'static> { EthFilterError::EthAPIError(err) => err.into(), err @ (EthFilterError::InvalidBlockRangeParams | EthFilterError::QueryExceedsMaxBlocks(_) | - EthFilterError::QueryExceedsMaxResults(_)) => { + EthFilterError::QueryExceedsMaxResults { .. }) => { rpc_error_with_code(jsonrpsee::types::error::INVALID_PARAMS_CODE, err.to_string()) } } diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index a67522ce0326..afd69a2f4041 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -3,13 +3,15 @@ use alloy_consensus::Header; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; -use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; +use reth_provider::{ + BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderTx, StateProviderFactory, +}; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, RpcNodeCore, }; use reth_rpc_eth_types::PendingBlock; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use crate::EthApi; @@ -19,13 +21,17 @@ where Self: SpawnBlocking + RpcNodeCore< Provider: BlockReaderIdExt< + Transaction = reth_primitives::TransactionSigned, Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, - Pool: TransactionPool, - Evm: ConfigureEvm
, + Pool: TransactionPool< + Transaction: PoolTransaction>, + >, + Evm: ConfigureEvm
>, >, { #[inline] diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index a3e909cf6f6f..1b9c5bffd219 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -52,8 +52,7 @@ mod tests { let pool = testing_pool(); let evm_config = EthEvmConfig::new(MAINNET.clone()); - let cache = - EthStateCache::spawn(NoopProvider::default(), Default::default(), evm_config.clone()); + let cache = EthStateCache::spawn(NoopProvider::default(), Default::default()); EthApi::new( NoopProvider::default(), pool, @@ -79,8 +78,7 @@ mod tests { let evm_config = EthEvmConfig::new(mock_provider.chain_spec()); mock_provider.extend_accounts(accounts); - let cache = - EthStateCache::spawn(mock_provider.clone(), Default::default(), evm_config.clone()); + let cache = EthStateCache::spawn(mock_provider.clone(), Default::default()); EthApi::new( mock_provider.clone(), pool, diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 8ac0785b2620..7f2ca4f772a8 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -57,7 +57,7 @@ mod tests { let pool = testing_pool(); let evm_config = EthEvmConfig::new(noop_provider.chain_spec()); - let cache = EthStateCache::spawn(noop_provider, Default::default(), evm_config.clone()); + let cache = EthStateCache::spawn(noop_provider, Default::default()); let fee_history_cache = FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); let eth_api = EthApi::new( diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 157213b54e66..79fb6fcc907f 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -3,7 +3,7 @@ use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; use alloy_network::{Ethereum, Network}; use alloy_rpc_types_eth::{Transaction, TransactionInfo}; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, TransactionSigned}; use reth_rpc_eth_api::EthApiTypes; use reth_rpc_eth_types::EthApiError; use reth_rpc_types_compat::TransactionCompat; @@ -37,7 +37,7 @@ where fn fill( &self, - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, tx_info: TransactionInfo, ) -> Result { let from = tx.signer(); diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 8ad809b8b186..58c62133730d 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -23,7 +23,7 @@ use reth_rpc_eth_types::logs_utils; use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_rpc_types_compat::transaction::from_recovered; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; -use reth_transaction_pool::{NewTransactionEvent, TransactionPool}; +use reth_transaction_pool::{NewTransactionEvent, PoolConsensusTx, TransactionPool}; use serde::Serialize; use tokio_stream::{ wrappers::{BroadcastStream, ReceiverStream}, @@ -95,7 +95,7 @@ where > + Clone + 'static, Network: NetworkInfo + Clone + 'static, - Eth: TransactionCompat + 'static, + Eth: TransactionCompat> + 'static, { /// Handler for `eth_subscribe` async fn subscribe( @@ -135,7 +135,7 @@ where > + Clone + 'static, Network: NetworkInfo + Clone + 'static, - Eth: TransactionCompat, + Eth: TransactionCompat>, { match kind { SubscriptionKind::NewHeads => { @@ -165,7 +165,7 @@ where // full transaction objects requested let stream = pubsub.full_pending_transaction_stream().filter_map(|tx| { let tx_value = match from_recovered( - tx.transaction.to_recovered_transaction(), + tx.transaction.to_consensus(), &tx_resp_builder, ) { Ok(tx) => { diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index f77b7e79da0c..87778ec6e650 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -172,7 +172,7 @@ where BundleItem::Tx { tx, can_revert } => { let recovered_tx = recover_raw_transaction(tx.clone()).map_err(EthApiError::from)?; - let (tx, signer) = recovered_tx.into_components(); + let (tx, signer) = recovered_tx.to_components(); let tx = tx.into_transaction(); let refund_percent = diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index 76fb96f91629..d957913dffb9 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -36,6 +36,7 @@ mod admin; mod debug; mod engine; pub mod eth; +mod miner; mod net; mod otterscan; mod reth; @@ -49,6 +50,7 @@ pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; pub use eth::{EthApi, EthBundle, EthFilter, EthPubSub}; +pub use miner::MinerApi; pub use net::NetApi; pub use otterscan::OtterscanApi; pub use reth::RethApi; diff --git a/crates/rpc/rpc/src/miner.rs b/crates/rpc/rpc/src/miner.rs new file mode 100644 index 000000000000..ab8fa5e0cd28 --- /dev/null +++ b/crates/rpc/rpc/src/miner.rs @@ -0,0 +1,25 @@ +use alloy_primitives::{Bytes, U128}; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use reth_rpc_api::MinerApiServer; + +/// `miner` API implementation. +/// +/// This type provides the functionality for handling `miner` related requests. +#[derive(Clone, Debug, Default)] +pub struct MinerApi {} + +#[async_trait] +impl MinerApiServer for MinerApi { + fn set_extra(&self, _record: Bytes) -> RpcResult { + Ok(false) + } + + fn set_gas_price(&self, _gas_price: U128) -> RpcResult { + Ok(false) + } + + fn set_gas_limit(&self, _gas_price: U128) -> RpcResult { + Ok(false) + } +} diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 3e46183b4661..4709c9878faf 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -8,10 +8,11 @@ use alloy_rpc_types_txpool::{ }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; -use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_api::TxPoolApiServer; use reth_rpc_types_compat::{transaction::from_recovered, TransactionCompat}; -use reth_transaction_pool::{AllPoolTransactions, PoolTransaction, TransactionPool}; +use reth_transaction_pool::{ + AllPoolTransactions, PoolConsensusTx, PoolTransaction, TransactionPool, +}; use tracing::trace; /// `txpool` API implementation. @@ -33,8 +34,8 @@ impl TxPoolApi { impl TxPoolApi where - Pool: TransactionPool + 'static, - Eth: TransactionCompat, + Pool: TransactionPool> + 'static, + Eth: TransactionCompat>, { fn content(&self) -> Result, Eth::Error> { #[inline] @@ -44,12 +45,12 @@ where resp_builder: &RpcTxB, ) -> Result<(), RpcTxB::Error> where - Tx: PoolTransaction>, - RpcTxB: TransactionCompat, + Tx: PoolTransaction, + RpcTxB: TransactionCompat, { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), - from_recovered(tx.clone().into_consensus().into(), resp_builder)?, + from_recovered(tx.clone_into_consensus(), resp_builder)?, ); Ok(()) @@ -72,8 +73,8 @@ where #[async_trait] impl TxPoolApiServer for TxPoolApi where - Pool: TransactionPool + 'static, - Eth: TransactionCompat + 'static, + Pool: TransactionPool> + 'static, + Eth: TransactionCompat> + 'static, { /// Returns the number of transactions currently pending for inclusion in the next block(s), as /// well as the ones that are being scheduled for future execution only. @@ -96,19 +97,19 @@ where trace!(target: "rpc::eth", "Serving txpool_inspect"); #[inline] - fn insert>>( + fn insert>( tx: &T, inspect: &mut BTreeMap>, ) { let entry = inspect.entry(tx.sender()).or_default(); - let tx: TransactionSignedEcRecovered = tx.clone().into_consensus().into(); + let tx = tx.clone_into_consensus(); entry.insert( tx.nonce().to_string(), TxpoolInspectSummary { to: tx.to(), value: tx.value(), gas: tx.gas_limit() as u128, - gas_price: tx.transaction.max_fee_per_gas(), + gas_price: tx.max_fee_per_gas(), }, ); } diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index a5e29bb739f9..b72a5d35769f 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -1,21 +1,22 @@ -use alloy_consensus::{BlobTransactionValidationError, EnvKzgSettings, Transaction}; -use alloy_eips::eip4844::kzg_to_versioned_hash; +use alloy_consensus::{BlobTransactionValidationError, EnvKzgSettings, Transaction, TxReceipt}; +use alloy_eips::{eip4844::kzg_to_versioned_hash, eip7685::RequestsOrHash}; use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, }; use alloy_rpc_types_engine::{ BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, PayloadError, + PraguePayloadFields, }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_consensus::{Consensus, PostExecutionInput}; +use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Block, GotExpected, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Block, GotExpected, NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ AccountReader, BlockExecutionInput, BlockExecutionOutput, BlockReaderIdExt, HeaderProvider, StateProviderFactory, WithdrawalsProvider, @@ -24,7 +25,6 @@ use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; use reth_rpc_api::BlockSubmissionValidationApiServer; use reth_rpc_server_types::result::internal_rpc_err; use reth_tasks::TaskSpawner; -use reth_trie::HashedPostState; use revm_primitives::{Address, B256, U256}; use serde::{Deserialize, Serialize}; use std::{collections::HashSet, sync::Arc}; @@ -44,7 +44,7 @@ where /// Create a new instance of the [`ValidationApi`] pub fn new( provider: Provider, - consensus: Arc, + consensus: Arc, executor_provider: E, config: ValidationApiConfig, task_spawner: Box, @@ -88,14 +88,19 @@ where impl ValidationApi where - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt
+ ChainSpecProvider + StateProviderFactory + HeaderProvider + AccountReader + WithdrawalsProvider + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( @@ -180,7 +185,7 @@ where self.ensure_payment(&block, &output, &message)?; let state_root = - state_provider.state_root(HashedPostState::from_bundle_state(&output.state.state))?; + state_provider.state_root(state_provider.hashed_post_state(&output.state))?; if state_root != block.state_root { return Err(ConsensusError::BodyStateRootDiff( @@ -258,7 +263,7 @@ where fn ensure_payment( &self, block: &Block, - output: &BlockExecutionOutput, + output: &BlockExecutionOutput<::Receipt>, message: &BidTrace, ) -> Result<(), ValidationApiError> { let (mut balance_before, balance_after) = if let Some(acc) = @@ -292,7 +297,7 @@ where .zip(block.body.transactions.last()) .ok_or(ValidationApiError::ProposerPayment)?; - if !receipt.success { + if !receipt.status() { return Err(ValidationApiError::ProposerPayment) } @@ -381,7 +386,12 @@ where versioned_hashes: self .validate_blobs_bundle(request.request.blobs_bundle)?, }, - request.request.execution_requests.into(), + PraguePayloadFields { + requests: RequestsOrHash::Requests( + request.request.execution_requests.into(), + ), + target_blobs_per_block: request.request.target_blobs_per_block, + }, ), )? .try_seal_with_senders() @@ -399,7 +409,7 @@ where #[async_trait] impl BlockSubmissionValidationApiServer for ValidationApi where - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt
+ ChainSpecProvider + StateProviderFactory + HeaderProvider @@ -407,7 +417,12 @@ where + WithdrawalsProvider + Clone + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { async fn validate_builder_submission_v1( &self, @@ -465,7 +480,7 @@ pub struct ValidationApiInner { /// The provider that can interact with the chain. provider: Provider, /// Consensus implementation. - consensus: Arc, + consensus: Arc, /// Execution payload validator. payload_validator: ExecutionPayloadValidator, /// Block executor factory. diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index d04a96470a03..df5a4c542bfa 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -20,8 +20,9 @@ //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::StageConfig; //! # use reth_evm::execute::BlockExecutorProvider; +//! # use reth_primitives::EthPrimitives; //! -//! # fn create(exec: impl BlockExecutorProvider) { +//! # fn create(exec: impl BlockExecutorProvider) { //! //! let provider_factory = create_test_provider_factory(); //! let static_file_producer = diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index c1fde11c2354..0f90ff69e46f 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -75,7 +75,9 @@ impl BodyStage { unwind_block: Option, ) -> Result<(), StageError> where - Provider: DBProvider + BlockReader + StaticFileProviderFactory, + Provider: DBProvider + + BlockReader
+ + StaticFileProviderFactory, { // Get id for the next tx_num of zero if there are no transactions. let next_tx_num = provider @@ -152,7 +154,7 @@ where Provider: DBProvider + StaticFileProviderFactory + StatsReader - + BlockReader + + BlockReader
+ BlockWriter>, D: BodyDownloader>, { diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index ce969f2577d8..c8cc89080867 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -67,7 +67,10 @@ use super::missing_static_data_error; /// values to [`tables::PlainStorageState`] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] -pub struct ExecutionStage { +pub struct ExecutionStage +where + E: BlockExecutorProvider, +{ /// The stage's internal block executor executor_provider: E, /// The commit thresholds of the execution stage. @@ -82,25 +85,28 @@ pub struct ExecutionStage { /// Input for the post execute commit hook. /// Set after every [`ExecutionStage::execute`] and cleared after /// [`ExecutionStage::post_execute_commit`]. - post_execute_commit_input: Option, + post_execute_commit_input: Option>, /// Input for the post unwind commit hook. /// Set after every [`ExecutionStage::unwind`] and cleared after /// [`ExecutionStage::post_unwind_commit`]. - post_unwind_commit_input: Option, + post_unwind_commit_input: Option>, /// Handle to communicate with `ExEx` manager. - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, /// Executor metrics. metrics: ExecutorMetrics, } -impl ExecutionStage { +impl ExecutionStage +where + E: BlockExecutorProvider, +{ /// Create new execution stage with specified config. pub fn new( executor_provider: E, thresholds: ExecutionStageThresholds, external_clean_threshold: u64, prune_modes: PruneModes, - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, ) -> Self { Self { external_clean_threshold, @@ -187,7 +193,10 @@ impl ExecutionStage { unwind_to: Option, ) -> Result<(), StageError> where - Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider, + Provider: StaticFileProviderFactory + + DBProvider + + BlockReader + + HeaderProvider
, { // If thre's any receipts pruning configured, receipts are written directly to database and // inconsistencies are expected. @@ -257,13 +266,15 @@ impl ExecutionStage { impl Stage for ExecutionStage where - E: BlockExecutorProvider, + E: BlockExecutorProvider>, Provider: DBProvider - + BlockReader - + StaticFileProviderFactory + + BlockReader< + Block = ::Block, + Header = ::BlockHeader, + > + StaticFileProviderFactory + StatsReader + BlockHashReader - + StateWriter + + StateWriter::Receipt> + StateCommitmentProvider, { /// Return the id of the stage @@ -373,7 +384,7 @@ where } stage_progress = block_number; - stage_checkpoint.progress.processed += block.gas_used(); + stage_checkpoint.progress.processed += block.header().gas_used(); // If we have ExExes we need to save the block in memory for later if self.exex_manager_handle.has_exexs() { @@ -512,7 +523,8 @@ where stage_checkpoint.progress.processed -= provider .block_by_number(block_number)? .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))? - .gas_used; + .header() + .gas_used(); } } let checkpoint = if let Some(stage_checkpoint) = stage_checkpoint { diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index e6b1e548455f..551c10d7711f 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -63,8 +63,10 @@ impl AccountHashingStage { opts: SeedOpts, ) -> Result, StageError> where - N::Primitives: - reth_primitives_traits::FullNodePrimitives, + N::Primitives: reth_primitives_traits::FullNodePrimitives< + BlockBody = reth_primitives::BlockBody, + BlockHeader = reth_primitives::Header, + >, { use alloy_primitives::U256; use reth_db_api::models::AccountBeforeTx; diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index dcabbe83ee64..0be84665bee1 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -359,10 +359,7 @@ mod tests { transaction.hash(), next_tx_num, )?; - tx.put::( - next_tx_num, - transaction.clone().into(), - )?; + tx.put::(next_tx_num, transaction.clone())?; let (addr, _) = accounts.get_mut(rng.gen::() % n_accounts as usize).unwrap(); diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 100fe4e979a7..7b9b394b5615 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -1,3 +1,4 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; @@ -10,7 +11,7 @@ use reth_db_api::{ }; use reth_etl::Collector; use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}; -use reth_primitives::{SealedHeader, StaticFileSegment}; +use reth_primitives::{NodePrimitives, SealedHeader, StaticFileSegment}; use reth_primitives_traits::serde_bincode_compat; use reth_provider::{ providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, @@ -50,7 +51,7 @@ pub struct HeaderStage { /// Consensus client implementation consensus: Arc>, /// Current sync gap. - sync_gap: Option, + sync_gap: Option>, /// ETL collector with `HeaderHash` -> `BlockNumber` hash_collector: Collector, /// ETL collector with `BlockNumber` -> `BincodeSealedHeader` @@ -63,7 +64,7 @@ pub struct HeaderStage { impl HeaderStage where - Downloader: HeaderDownloader
, + Downloader: HeaderDownloader, { /// Create a new header stage pub fn new( @@ -89,10 +90,14 @@ where /// /// Writes to static files ( `Header | HeaderTD | HeaderHash` ) and [`tables::HeaderNumbers`] /// database table. - fn write_headers + StaticFileProviderFactory>( - &mut self, - provider: &P, - ) -> Result { + fn write_headers

(&mut self, provider: &P) -> Result + where + P: DBProvider + + StaticFileProviderFactory< + Primitives: NodePrimitives, + >, + Downloader: HeaderDownloader

::BlockHeader>, + { let total_headers = self.header_collector.len(); info!(target: "sync::stages::headers", total = total_headers, "Writing headers"); @@ -121,19 +126,19 @@ where info!(target: "sync::stages::headers", progress = %format!("{:.2}%", (index as f64 / total_headers as f64) * 100.0), "Writing headers"); } - let sealed_header: SealedHeader = - bincode::deserialize::>(&header_buf) + let sealed_header: SealedHeader = + bincode::deserialize::>(&header_buf) .map_err(|err| StageError::Fatal(Box::new(err)))? .into(); let (header, header_hash) = sealed_header.split(); - if header.number == 0 { + if header.number() == 0 { continue } - last_header_number = header.number; + last_header_number = header.number(); // Increase total difficulty - td += header.difficulty; + td += header.difficulty(); // Header validation self.consensus.validate_header_with_total_difficulty(&header, td).map_err(|error| { @@ -193,9 +198,10 @@ where impl Stage for HeaderStage where - P: HeaderSyncGapProvider, - D: HeaderDownloader
, Provider: DBProvider + StaticFileProviderFactory, + Provider::Primitives: NodePrimitives, + P: HeaderSyncGapProvider
::BlockHeader>, + D: HeaderDownloader
::BlockHeader>, { /// Return the id of the stage fn id(&self) -> StageId { @@ -232,7 +238,7 @@ where } debug!(target: "sync::stages::headers", ?tip, head = ?gap.local_head.hash(), "Commencing sync"); - let local_head_number = gap.local_head.number; + let local_head_number = gap.local_head.number(); // let the downloader know what to sync self.downloader.update_sync_gap(gap.local_head, gap.target); @@ -241,9 +247,9 @@ where loop { match ready!(self.downloader.poll_next_unpin(cx)) { Some(Ok(headers)) => { - info!(target: "sync::stages::headers", total = headers.len(), from_block = headers.first().map(|h| h.number), to_block = headers.last().map(|h| h.number), "Received headers"); + info!(target: "sync::stages::headers", total = headers.len(), from_block = headers.first().map(|h| h.number()), to_block = headers.last().map(|h| h.number()), "Received headers"); for header in headers { - let header_number = header.number; + let header_number = header.number(); self.hash_collector.insert(header.hash(), header_number)?; self.header_collector.insert( diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index 2d2503b53919..8095dfed9048 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -1,3 +1,4 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256}; use reth_codecs::Compact; use reth_consensus::ConsensusError; @@ -135,7 +136,7 @@ where Provider: DBProvider + TrieWriter + StatsReader - + HeaderProvider + + HeaderProvider
+ StageCheckpointReader + StageCheckpointWriter, { @@ -168,7 +169,7 @@ where let target_block = provider .header_by_number(to_block)? .ok_or_else(|| ProviderError::HeaderNotFound(to_block.into()))?; - let target_block_root = target_block.state_root; + let target_block_root = target_block.state_root(); let mut checkpoint = self.get_execution_checkpoint(provider)?; let (trie_root, entities_checkpoint) = if range.is_empty() { diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 527f53766972..7e5d7af46eef 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -1,4 +1,5 @@ -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives::NodePrimitives; use reth_provider::{ BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, @@ -41,7 +42,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory>, { fn id(&self) -> StageId { StageId::Prune @@ -130,7 +131,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory>, { fn id(&self) -> StageId { StageId::PruneSenderRecovery diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index a6c2537c1855..d34a4b07921a 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -1,13 +1,14 @@ use alloy_primitives::{Address, TxNumber}; use reth_config::config::SenderRecoveryConfig; use reth_consensus::ConsensusError; -use reth_db::{static_file::TransactionMask, tables, RawValue}; +use reth_db::{static_file::TransactionMask, table::Value, tables, RawValue}; use reth_db_api::{ cursor::DbCursorRW, transaction::{DbTx, DbTxMut}, DbTxUnwindExt, }; -use reth_primitives::{GotExpected, StaticFileSegment, TransactionSignedNoHash}; +use reth_primitives::{GotExpected, NodePrimitives, StaticFileSegment}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, StaticFileProviderFactory, StatsReader, @@ -58,8 +59,8 @@ impl Default for SenderRecoveryStage { impl Stage for SenderRecoveryStage where Provider: DBProvider - + BlockReader - + StaticFileProviderFactory + + BlockReader
+ + StaticFileProviderFactory> + StatsReader + PruneCheckpointReader, { @@ -145,7 +146,8 @@ fn recover_range( senders_cursor: &mut CURSOR, ) -> Result<(), StageError> where - Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, + Provider: + DBProvider + HeaderProvider
+ StaticFileProviderFactory, CURSOR: DbCursorRW, { debug!(target: "sync::stages::sender_recovery", ?tx_range, "Sending batch for processing"); @@ -233,7 +235,9 @@ fn setup_range_recovery( provider: &Provider, ) -> mpsc::Sender, RecoveryResultSender)>> where - Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, + Provider: DBProvider + + HeaderProvider + + StaticFileProviderFactory>, { let (tx_sender, tx_receiver) = mpsc::channel::, RecoveryResultSender)>>(); let static_file_provider = provider.static_file_provider(); @@ -254,9 +258,9 @@ where chunk_range.clone(), |cursor, number| { Ok(cursor - .get_one::>>( - number.into(), - )? + .get_one::::SignedTx>, + >>(number.into())? .map(|tx| (number, tx))) }, |_| true, @@ -300,17 +304,18 @@ where } #[inline] -fn recover_sender( - (tx_id, tx): (TxNumber, TransactionSignedNoHash), +fn recover_sender( + (tx_id, tx): (TxNumber, T), rlp_buf: &mut Vec, ) -> Result<(u64, Address), Box> { + rlp_buf.clear(); // We call [Signature::encode_and_recover_unchecked] because transactions run in the pipeline // are known to be valid - this means that we do not need to check whether or not the `s` // value is greater than `secp256k1n / 2` if past EIP-2. There are transactions // pre-homestead which have large `s` values, so using [Signature::recover_signer] here // would not be backwards-compatible. let sender = tx - .encode_and_recover_unchecked(rlp_buf) + .recover_signer_unchecked_with_buf(rlp_buf) .ok_or(SenderRecoveryStageError::FailedRecovery(FailedSenderRecoveryError { tx: tx_id }))?; Ok((tx_id, sender)) diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 5aa1f3f880c3..34aaeee44beb 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -258,7 +258,7 @@ pub(crate) fn missing_static_data_error( segment: StaticFileSegment, ) -> Result where - Provider: BlockReader + StaticFileProviderFactory, + Provider: BlockReader
+ StaticFileProviderFactory, { let mut last_block = static_file_provider.get_highest_static_file_block(segment).unwrap_or_default(); diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 2f9712f84364..5a6c12d8e00f 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -267,7 +267,7 @@ impl TestStageDB { if let Some(txs_writer) = &mut txs_writer { txs_writer.append_transaction(next_tx_num, body_tx)?; } else { - tx.put::(next_tx_num, body_tx.clone().into())? + tx.put::(next_tx_num, body_tx.clone())? } next_tx_num += 1; Ok::<(), ProviderError>(()) diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index e06e1f09a177..dff80a23f83b 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -1,7 +1,9 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; -use reth_db::tables; +use reth_codecs::Compact; +use reth_db::{table::Value, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{providers::StaticFileWriter, DBProvider, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; @@ -11,7 +13,11 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Headers; -impl Segment for Headers { +impl Segment for Headers +where + Provider: StaticFileProviderFactory> + + DBProvider, +{ fn segment(&self) -> StaticFileSegment { StaticFileSegment::Headers } @@ -25,7 +31,10 @@ impl Segment for Hea let mut static_file_writer = static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Headers)?; - let mut headers_cursor = provider.tx_ref().cursor_read::()?; + let mut headers_cursor = provider + .tx_ref() + .cursor_read::::BlockHeader>>( + )?; let headers_walker = headers_cursor.walk_range(block_range.clone())?; let mut header_td_cursor = diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 371a344d8727..30a72561b235 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -90,7 +90,7 @@ where Provider: StaticFileProviderFactory + DatabaseProviderFactory< Provider: StaticFileProviderFactory< - Primitives: NodePrimitives, + Primitives: NodePrimitives, > + StageCheckpointReader + BlockReader, >, diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 4e9bf90f1c90..4fc9c545e7c1 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -92,7 +92,7 @@ impl StaticFileTargets { ] .iter() .all(|(target_block_range, highest_static_fileted_block)| { - target_block_range.map_or(true, |target_block_range| { + target_block_range.is_none_or(|target_block_range| { *target_block_range.start() == highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { highest_static_fileted_block + 1 diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 57fe9f726c7b..8fbf1632403a 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -67,7 +67,7 @@ alloy = [ "dep:modular-bitfield", "dep:alloy-trie", ] -optimism = ["alloy", "dep:op-alloy-consensus"] +op = ["alloy", "dep:op-alloy-consensus"] test-utils = [ "std", "alloy", diff --git a/crates/storage/codecs/src/alloy/header.rs b/crates/storage/codecs/src/alloy/header.rs index 04b7d6ab718b..418b8b9032b6 100644 --- a/crates/storage/codecs/src/alloy/header.rs +++ b/crates/storage/codecs/src/alloy/header.rs @@ -58,6 +58,7 @@ pub(crate) struct Header { #[reth_codecs(crate = "crate")] pub(crate) struct HeaderExt { requests_hash: Option, + target_blobs_per_block: Option, } impl HeaderExt { @@ -65,7 +66,7 @@ impl HeaderExt { /// /// Required since [`Header`] uses `Option` as a field. const fn into_option(self) -> Option { - if self.requests_hash.is_some() { + if self.requests_hash.is_some() || self.target_blobs_per_block.is_some() { Some(self) } else { None @@ -78,7 +79,7 @@ impl Compact for AlloyHeader { where B: bytes::BufMut + AsMut<[u8]>, { - let extra_fields = HeaderExt { requests_hash: self.requests_hash }; + let extra_fields = HeaderExt { requests_hash: self.requests_hash, target_blobs_per_block: self.target_blobs_per_block }; let header = Header { parent_hash: self.parent_hash, @@ -128,8 +129,9 @@ impl Compact for AlloyHeader { blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, parent_beacon_block_root: header.parent_beacon_block_root, - requests_hash: header.extra_fields.and_then(|h| h.requests_hash), + requests_hash: header.extra_fields.as_ref().and_then(|h| h.requests_hash), extra_data: header.extra_data, + target_blobs_per_block: header.extra_fields.as_ref().and_then(|h| h.target_blobs_per_block), }; (alloy_header, buf) } @@ -188,7 +190,7 @@ mod tests { #[test] fn test_extra_fields() { let mut header = HOLESKY_BLOCK; - header.extra_fields = Some(HeaderExt { requests_hash: Some(B256::random()) }); + header.extra_fields = Some(HeaderExt { requests_hash: Some(B256::random()), target_blobs_per_block: Some(3) }); let mut encoded_header = vec![]; let len = header.to_compact(&mut encoded_header); diff --git a/crates/storage/codecs/src/alloy/transaction/mod.rs b/crates/storage/codecs/src/alloy/transaction/mod.rs index dc27eacfacc2..fe31293cd18f 100644 --- a/crates/storage/codecs/src/alloy/transaction/mod.rs +++ b/crates/storage/codecs/src/alloy/transaction/mod.rs @@ -9,9 +9,9 @@ cond_mod!( ); -#[cfg(all(feature = "test-utils", feature = "optimism"))] +#[cfg(all(feature = "test-utils", feature = "op"))] pub mod optimism; -#[cfg(all(not(feature = "test-utils"), feature = "optimism"))] +#[cfg(all(not(feature = "test-utils"), feature = "op"))] mod optimism; #[cfg(test)] @@ -41,7 +41,7 @@ mod tests { assert_eq!(TxEip7702::bitflag_encoded_bytes(), 4); } - #[cfg(feature = "optimism")] + #[cfg(feature = "op")] #[test] fn test_ensure_backwards_compatibility_optimism() { assert_eq!(crate::alloy::transaction::optimism::TxDeposit::bitflag_encoded_bytes(), 2); @@ -89,11 +89,11 @@ mod tests { )); } - #[cfg(feature = "optimism")] + #[cfg(feature = "op")] #[test] fn test_decode_deposit() { test_decode::(&hex!( "8108ac8f15983d59b6ae4911a00ff7bfcd2e53d2950926f8c82c12afad02861c46fcb293e776204052725e1c08ff2e9ff602ca916357601fa972a14094891fe3598b718758f22c46f163c18bcaa6296ce87e5267ef3fd932112842fbbf79011548cdf067d93ce6098dfc0aaf5a94531e439f30d6dfd0c6" - )); + )); } } diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index bb970b581775..631f5c406eeb 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -1,9 +1,11 @@ //! Compact implementation for [`AlloyTxDeposit`] +use alloy_consensus::constants::EIP7702_TX_TYPE_ID; use crate::Compact; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; -use op_alloy_consensus::TxDeposit as AlloyTxDeposit; +use op_alloy_consensus::{OpTxType, TxDeposit as AlloyTxDeposit}; use reth_codecs_derive::add_arbitrary_tests; +use crate::txtype::{COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY}; /// Deposit transactions, also known as deposits are initiated on L1, and executed on L2. /// @@ -65,3 +67,51 @@ impl Compact for AlloyTxDeposit { (alloy_tx, buf) } } + + +impl crate::Compact for OpTxType { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + use crate::txtype::*; + + match self { + Self::Legacy => COMPACT_IDENTIFIER_LEGACY, + Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, + Self::Eip1559 => COMPACT_IDENTIFIER_EIP1559, + Self::Eip7702 => { + buf.put_u8(EIP7702_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + Self::Deposit => { + buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + } + } + + // For backwards compatibility purposes only 2 bits of the type are encoded in the identifier + // parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type + // is read from the buffer as a single byte. + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + ( + match identifier { + COMPACT_IDENTIFIER_LEGACY => Self::Legacy, + COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, + COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, + COMPACT_EXTENDED_IDENTIFIER_FLAG => { + let extended_identifier = buf.get_u8(); + match extended_identifier { + EIP7702_TX_TYPE_ID => Self::Eip7702, + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self::Deposit, + _ => panic!("Unsupported TxType identifier: {extended_identifier}"), + } + } + _ => panic!("Unknown identifier for TxType: {identifier}"), + }, + buf, + ) + } +} \ No newline at end of file diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 3aa908a60093..4f9c2d76b3f4 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -29,6 +29,7 @@ alloy-consensus.workspace = true # codecs modular-bitfield.workspace = true +roaring = "0.10.2" parity-scale-codec = { version = "3.2.1", features = ["bytes"] } serde = { workspace = true, default-features = false } @@ -81,4 +82,4 @@ arbitrary = [ "reth-stages-types/arbitrary", "alloy-consensus/arbitrary", ] -optimism = ["reth-primitives/optimism", "reth-codecs/optimism"] +optimism = ["reth-primitives/optimism", "reth-codecs/op"] diff --git a/crates/storage/db-api/src/models/blocks.rs b/crates/storage/db-api/src/models/blocks.rs index 0145ceb52b5b..7c4b37b254db 100644 --- a/crates/storage/db-api/src/models/blocks.rs +++ b/crates/storage/db-api/src/models/blocks.rs @@ -8,12 +8,30 @@ use serde::{Deserialize, Serialize}; /// The storage representation of a block's ommers. /// /// It is stored as the headers of the block's uncles. -#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize, Compact)] +#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] -pub struct StoredBlockOmmers { +pub struct StoredBlockOmmers { /// The block headers of this block's uncles. - pub ommers: Vec
, + pub ommers: Vec, +} + +impl Compact for StoredBlockOmmers { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let mut buffer = bytes::BytesMut::new(); + self.ommers.to_compact(&mut buffer); + let total_length = buffer.len(); + buf.put(buffer); + total_length + } + + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { + let (ommers, new_buf) = Vec::from_compact(buf, buf.len()); + (Self { ommers }, new_buf) + } } /// Hash of the block header. @@ -31,4 +49,18 @@ mod tests { ommer.ommers.push(Header::default()); assert_eq!(ommer.clone(), StoredBlockOmmers::decompress(&ommer.compress()).unwrap()); } + + #[test] + fn fuzz_stored_block_ommers() { + fuzz_test_stored_block_ommers(StoredBlockOmmers::default()) + } + + #[test_fuzz::test_fuzz] + fn fuzz_test_stored_block_ommers(obj: StoredBlockOmmers) { + use reth_codecs::Compact; + let mut buf = vec![]; + let len = obj.to_compact(&mut buf); + let (same_obj, _) = StoredBlockOmmers::from_compact(buf.as_ref(), len); + assert_eq!(obj, same_obj); + } } diff --git a/crates/storage/db-api/src/models/integer_list.rs b/crates/storage/db-api/src/models/integer_list.rs index 480b52a9e2c0..5301ec303e50 100644 --- a/crates/storage/db-api/src/models/integer_list.rs +++ b/crates/storage/db-api/src/models/integer_list.rs @@ -4,7 +4,159 @@ use crate::{ table::{Compress, Decompress}, DatabaseError, }; -use reth_primitives_traits::IntegerList; +use bytes::BufMut; +use core::fmt; +use derive_more::Deref; +use roaring::RoaringTreemap; + +/// A data structure that uses Roaring Bitmaps to efficiently store a list of integers. +/// +/// This structure provides excellent compression while allowing direct access to individual +/// elements without the need for full decompression. +/// +/// Key features: +/// - Efficient compression: the underlying Roaring Bitmaps significantly reduce memory usage. +/// - Direct access: elements can be accessed or queried without needing to decode the entire list. +/// - [`RoaringTreemap`] backing: internally backed by [`RoaringTreemap`], which supports 64-bit +/// integers. +#[derive(Clone, PartialEq, Default, Deref)] +pub struct IntegerList(pub RoaringTreemap); + +impl fmt::Debug for IntegerList { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("IntegerList")?; + f.debug_list().entries(self.0.iter()).finish() + } +} + +impl IntegerList { + /// Creates a new empty [`IntegerList`]. + pub fn empty() -> Self { + Self(RoaringTreemap::new()) + } + + /// Creates an [`IntegerList`] from a list of integers. + /// + /// Returns an error if the list is not pre-sorted. + pub fn new(list: impl IntoIterator) -> Result { + RoaringTreemap::from_sorted_iter(list) + .map(Self) + .map_err(|_| IntegerListError::UnsortedInput) + } + + /// Creates an [`IntegerList`] from a pre-sorted list of integers. + /// + /// # Panics + /// + /// Panics if the list is not pre-sorted. + #[inline] + #[track_caller] + pub fn new_pre_sorted(list: impl IntoIterator) -> Self { + Self::new(list).expect("IntegerList must be pre-sorted and non-empty") + } + + /// Appends a list of integers to the current list. + pub fn append(&mut self, list: impl IntoIterator) -> Result { + self.0.append(list).map_err(|_| IntegerListError::UnsortedInput) + } + + /// Pushes a new integer to the list. + pub fn push(&mut self, value: u64) -> Result<(), IntegerListError> { + self.0.push(value).then_some(()).ok_or(IntegerListError::UnsortedInput) + } + + /// Clears the list. + pub fn clear(&mut self) { + self.0.clear(); + } + + /// Serializes a [`IntegerList`] into a sequence of bytes. + pub fn to_bytes(&self) -> Vec { + let mut vec = Vec::with_capacity(self.0.serialized_size()); + self.0.serialize_into(&mut vec).expect("not able to encode IntegerList"); + vec + } + + /// Serializes a [`IntegerList`] into a sequence of bytes. + pub fn to_mut_bytes(&self, buf: &mut B) { + self.0.serialize_into(buf.writer()).unwrap(); + } + + /// Deserializes a sequence of bytes into a proper [`IntegerList`]. + pub fn from_bytes(data: &[u8]) -> Result { + RoaringTreemap::deserialize_from(data) + .map(Self) + .map_err(|_| IntegerListError::FailedToDeserialize) + } +} + +impl serde::Serialize for IntegerList { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + use serde::ser::SerializeSeq; + + let mut seq = serializer.serialize_seq(Some(self.len() as usize))?; + for e in &self.0 { + seq.serialize_element(&e)?; + } + seq.end() + } +} + +struct IntegerListVisitor; + +impl<'de> serde::de::Visitor<'de> for IntegerListVisitor { + type Value = IntegerList; + + fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("a usize array") + } + + fn visit_seq(self, mut seq: E) -> Result + where + E: serde::de::SeqAccess<'de>, + { + let mut list = IntegerList::empty(); + while let Some(item) = seq.next_element()? { + list.push(item).map_err(serde::de::Error::custom)?; + } + Ok(list) + } +} + +impl<'de> serde::Deserialize<'de> for IntegerList { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_byte_buf(IntegerListVisitor) + } +} + +#[cfg(any(test, feature = "arbitrary"))] +use arbitrary::{Arbitrary, Unstructured}; + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> Arbitrary<'a> for IntegerList { + fn arbitrary(u: &mut Unstructured<'a>) -> Result { + let mut nums: Vec = Vec::arbitrary(u)?; + nums.sort_unstable(); + Self::new(nums).map_err(|_| arbitrary::Error::IncorrectFormat) + } +} + +/// Primitives error type. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum IntegerListError { + /// The provided input is unsorted. + #[display("the provided input is unsorted")] + UnsortedInput, + /// Failed to deserialize data into type. + #[display("failed to deserialize data into type")] + FailedToDeserialize, +} impl Compress for IntegerList { type Compressed = Vec; @@ -23,3 +175,30 @@ impl Decompress for IntegerList { Self::from_bytes(value).map_err(|_| DatabaseError::Decode) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn empty_list() { + assert_eq!(IntegerList::empty().len(), 0); + assert_eq!(IntegerList::new_pre_sorted(std::iter::empty()).len(), 0); + } + + #[test] + fn test_integer_list() { + let original_list = [1, 2, 3]; + let ef_list = IntegerList::new(original_list).unwrap(); + assert_eq!(ef_list.iter().collect::>(), original_list); + } + + #[test] + fn test_integer_list_serialization() { + let original_list = [1, 2, 3]; + let ef_list = IntegerList::new(original_list).unwrap(); + + let blist = ef_list.to_bytes(); + assert_eq!(IntegerList::from_bytes(&blist).unwrap(), ef_list) + } +} diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 5d18711922ed..7ded84e17208 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -8,9 +8,8 @@ use alloy_consensus::Header; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::{ - Account, Bytecode, Receipt, StorageEntry, TransactionSigned, TransactionSignedNoHash, TxType, -}; +use reth_primitives::{Receipt, StorageEntry, TransactionSigned, TxType}; +use reth_primitives_traits::{Account, Bytecode}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *}; @@ -24,6 +23,7 @@ pub mod storage_sharded_key; pub use accounts::*; pub use blocks::*; +pub use integer_list::IntegerList; pub use reth_db_models::{ AccountBeforeTx, ClientVersion, StoredBlockBodyIndices, StoredBlockWithdrawals, }; @@ -189,9 +189,9 @@ impl Decode for ClientVersion { /// Implements compression for Compact type. macro_rules! impl_compression_for_compact { - ($($name:tt),+) => { + ($($name:ident$(<$($generic:ident),*>)?),+) => { $( - impl Compress for $name { + impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? Compress for $name$(<$($generic),*>)? { type Compressed = Vec; fn compress_to_buf>(self, buf: &mut B) { @@ -199,8 +199,8 @@ macro_rules! impl_compression_for_compact { } } - impl Decompress for $name { - fn decompress(value: &[u8]) -> Result<$name, $crate::DatabaseError> { + impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? Decompress for $name$(<$($generic),*>)? { + fn decompress(value: &[u8]) -> Result<$name$(<$($generic),*>)?, $crate::DatabaseError> { let (obj, _) = Compact::from_compact(value, value.len()); Ok(obj) } @@ -222,11 +222,10 @@ impl_compression_for_compact!( StoredNibblesSubKey, StorageTrieEntry, StoredBlockBodyIndices, - StoredBlockOmmers, + StoredBlockOmmers, StoredBlockWithdrawals, Bytecode, AccountBeforeTx, - TransactionSignedNoHash, TransactionSigned, CompactU256, StageCheckpoint, @@ -339,7 +338,6 @@ mod tests { assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); @@ -360,7 +358,6 @@ mod tests { validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); - validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); } diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 9e4954357f84..28dbc33e90dc 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -24,6 +24,7 @@ reth-fs-util.workspace = true reth-node-types.workspace = true # eth +alloy-consensus.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index ec31edd06823..493b27be7808 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -1,5 +1,6 @@ //! Reth genesis initialization utility functions. +use alloy_consensus::BlockHeader; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, B256, U256}; use reth_chainspec::EthChainSpec; @@ -8,7 +9,9 @@ use reth_config::config::EtlConfig; use reth_db::tables; use reth_db_api::{transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; -use reth_primitives::{Account, Bytecode, GotExpected, Receipts, StaticFileSegment, StorageEntry}; +use reth_primitives::{ + Account, Bytecode, GotExpected, NodePrimitives, Receipts, StaticFileSegment, StorageEntry, +}; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, writer::UnifiedStorageWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, @@ -69,8 +72,11 @@ impl From for InitDatabaseError { /// Write the genesis block if it has not already been written pub fn init_genesis(factory: &PF) -> Result where - PF: DatabaseProviderFactory + StaticFileProviderFactory + ChainSpecProvider + BlockHashReader, - PF::ProviderRW: StaticFileProviderFactory + PF: DatabaseProviderFactory + + StaticFileProviderFactory> + + ChainSpecProvider + + BlockHashReader, + PF::ProviderRW: StaticFileProviderFactory + StageCheckpointWriter + HistoryWriter + HeaderProvider @@ -78,6 +84,7 @@ where + StateWriter + StateWriter + AsRef, + PF::ChainSpec: EthChainSpec
::BlockHeader>, { let chain = factory.chain_spec(); @@ -306,15 +313,16 @@ pub fn insert_genesis_header( chain: &Spec, ) -> ProviderResult<()> where - Provider: StaticFileProviderFactory + DBProvider, - Spec: EthChainSpec, + Provider: StaticFileProviderFactory> + + DBProvider, + Spec: EthChainSpec
::BlockHeader>, { let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash()); let static_file_provider = provider.static_file_provider(); match static_file_provider.block_hash(0) { Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => { - let (difficulty, hash) = (header.difficulty, block_hash); + let (difficulty, hash) = (header.difficulty(), block_hash); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; writer.append_header(header, difficulty, &hash)?; } @@ -358,7 +366,7 @@ where let expected_state_root = provider_rw .header_by_number(block)? .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))? - .state_root; + .state_root(); // first line can be state root let dump_state_root = parse_state_root(&mut reader)?; @@ -601,12 +609,11 @@ mod tests { use reth_db::DatabaseEnv; use reth_db_api::{ cursor::DbCursorRO, - models::{storage_sharded_key::StorageShardedKey, ShardedKey}, + models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey}, table::{Table, TableRow}, transaction::DbTx, Database, }; - use reth_primitives_traits::IntegerList; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, ProviderFactory, diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 006213e4cb91..8a6811b15391 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -507,12 +507,11 @@ mod tests { use alloy_primitives::{Address, B256, U256}; use reth_db_api::{ cursor::{DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker}, - models::{AccountBeforeTx, ShardedKey}, + models::{AccountBeforeTx, IntegerList, ShardedKey}, table::{Encode, Table}, }; use reth_libmdbx::Error; - use reth_primitives::{Account, StorageEntry}; - use reth_primitives_traits::IntegerList; + use reth_primitives_traits::{Account, StorageEntry}; use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use std::str::FromStr; use tempfile::TempDir; diff --git a/crates/storage/db/src/metrics.rs b/crates/storage/db/src/metrics.rs index 2d908c68156f..ed265d6e3aa3 100644 --- a/crates/storage/db/src/metrics.rs +++ b/crates/storage/db/src/metrics.rs @@ -104,10 +104,11 @@ impl DatabaseEnvMetrics { value_size: Option, f: impl FnOnce() -> R, ) -> R { - self.operations - .get(&(table, operation)) - .expect("operation & table metric handle not found") - .record(value_size, f) + if let Some(metrics) = self.operations.get(&(table, operation)) { + metrics.record(value_size, f) + } else { + f() + } } /// Record metrics for opening a database transaction. diff --git a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs index bb26e8b9e217..da15c112e628 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs @@ -1,6 +1,6 @@ //! Curates the input coming from the fuzzer for certain types. -use reth_primitives_traits::IntegerList; +use reth_db_api::models::IntegerList; use serde::{Deserialize, Serialize}; /// Makes sure that the list provided by the fuzzer is not empty and pre-sorted diff --git a/crates/storage/db/src/tables/codecs/fuzz/mod.rs b/crates/storage/db/src/tables/codecs/fuzz/mod.rs index e64a3841df49..f6b68897e349 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/mod.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/mod.rs @@ -16,9 +16,6 @@ macro_rules! impl_fuzzer_with_input { pub mod $name { use reth_db_api::table; - #[allow(unused_imports)] - - #[allow(unused_imports)] use reth_primitives_traits::*; diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index a1fea62f0d8b..88cfdde44aaf 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -26,13 +26,13 @@ use reth_db_api::{ accounts::BlockNumberAddress, blocks::{HeaderHash, StoredBlockOmmers}, storage_sharded_key::StorageShardedKey, - AccountBeforeTx, ClientVersion, CompactU256, ShardedKey, StoredBlockBodyIndices, - StoredBlockWithdrawals, + AccountBeforeTx, ClientVersion, CompactU256, IntegerList, ShardedKey, + StoredBlockBodyIndices, StoredBlockWithdrawals, }, table::{Decode, DupSort, Encode, Table}, }; -use reth_primitives::{Account, Bytecode, Receipt, StorageEntry, TransactionSignedNoHash}; -use reth_primitives_traits::IntegerList; +use reth_primitives::{Receipt, StorageEntry, TransactionSigned}; +use reth_primitives_traits::{Account, Bytecode}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey}; @@ -98,6 +98,15 @@ pub trait TableViewer { } } +/// General trait for defining the set of tables +/// Used to initialize database +pub trait TableSet { + /// Returns all the table names in the database. + fn table_names(&self) -> Vec<&'static str>; + /// Returns `true` if the table at the given index is a `DUPSORT` table. + fn is_dupsort(&self, idx: usize) -> bool; +} + /// Defines all the tables in the database. #[macro_export] macro_rules! tables { @@ -139,6 +148,7 @@ macro_rules! tables { impl$(<$($generic),*>)? reth_db_api::table::Table for $name$(<$($generic),*>)? where $value: reth_db_api::table::Value + 'static + $($(,$generic: Send + Sync)*)? { const NAME: &'static str = table_names::$name; const DUPSORT: bool = tables!(@bool $($subkey)?); @@ -242,6 +252,18 @@ macro_rules! tables { } } + impl TableSet for Tables { + fn table_names(&self) -> Vec<&'static str> { + //vec![$(table_names::$name,)*] + Self::ALL.iter().map(|t| t.name()).collect() + } + + fn is_dupsort(&self, idx: usize) -> bool { + let table: Self = self.table_names()[idx].parse().expect("should be valid table name"); + table.is_dupsort() + } + } + // Need constants to match on in the `FromStr` implementation. #[allow(non_upper_case_globals)] mod table_names { @@ -314,9 +336,9 @@ tables! { } /// Stores the uncles/ommers of the block. - table BlockOmmers { + table BlockOmmers { type Key = BlockNumber; - type Value = StoredBlockOmmers; + type Value = StoredBlockOmmers; } /// Stores the block withdrawals. @@ -326,7 +348,7 @@ tables! { } /// Canonical only Stores the transaction body for canonical transactions. - table Transactions { + table Transactions { type Key = TxNumber; type Value = T; } diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index ecefa5f6aca7..2e864e09d43a 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -12,8 +12,9 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-fs-util.workspace = true +reth-static-file-types.workspace = true # ethereum alloy-eips.workspace = true @@ -26,7 +27,9 @@ derive_more.workspace = true [features] default = ["std"] std = [ - "reth-primitives/std", "alloy-eips/std", - "alloy-primitives/std" + "alloy-primitives/std", + "alloy-rlp/std", + "derive_more/std", + "reth-primitives-traits/std" ] diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index e69c0343f564..d4b69cffb08e 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -3,7 +3,8 @@ use alloc::{boxed::Box, string::String}; use alloy_eips::{BlockHashOrNumber, HashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; use derive_more::Display; -use reth_primitives::{GotExpected, StaticFileSegment}; +use reth_primitives_traits::GotExpected; +use reth_static_file_types::StaticFileSegment; /// Provider result type. pub type ProviderResult = Result; @@ -165,7 +166,6 @@ impl core::error::Error for ProviderError { fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { Self::Database(source) => core::error::Error::source(source), - Self::Rlp(source) => core::error::Error::source(source), Self::StorageLockError(source) => core::error::Error::source(source), Self::UnifiedStorageWriterError(source) => core::error::Error::source(source), _ => Option::None, diff --git a/crates/storage/errors/src/writer.rs b/crates/storage/errors/src/writer.rs index 10d4ad96ed3f..3e060d7005d4 100644 --- a/crates/storage/errors/src/writer.rs +++ b/crates/storage/errors/src/writer.rs @@ -1,5 +1,5 @@ use crate::db::DatabaseError; -use reth_primitives::StaticFileSegment; +use reth_static_file_types::StaticFileSegment; /// `UnifiedStorageWriter` related errors /// `StorageWriter` related errors diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index fa10a73cb330..4679f4fe9149 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -15,7 +15,7 @@ workspace = true reth-mdbx-sys.workspace = true bitflags.workspace = true -byteorder = "1" +byteorder.workspace = true derive_more.workspace = true indexmap = "2" parking_lot.workspace = true diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h index dfcba66063a7..2665931de527 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h @@ -1413,7 +1413,7 @@ enum MDBX_env_flags_t { * \ref mdbx_env_set_syncbytes() and \ref mdbx_env_set_syncperiod() functions * could be very useful with `MDBX_SAFE_NOSYNC` flag. * - * The number and volume of of disk IOPs with MDBX_SAFE_NOSYNC flag will + * The number and volume of disk IOPs with MDBX_SAFE_NOSYNC flag will * exactly the as without any no-sync flags. However, you should expect a * larger process's [work set](https://bit.ly/2kA2tFX) and significantly worse * a [locality of reference](https://bit.ly/2mbYq2J), due to the more @@ -2079,7 +2079,7 @@ enum MDBX_option_t { * for all processes interacting with the database. * * \details This defines the number of slots in the lock table that is used to - * track readers in the the environment. The default is about 100 for 4K + * track readers in the environment. The default is about 100 for 4K * system page size. Starting a read-only transaction normally ties a lock * table slot to the current thread until the environment closes or the thread * exits. If \ref MDBX_NOTLS is in use, \ref mdbx_txn_begin() instead ties the @@ -3343,7 +3343,7 @@ mdbx_limits_txnsize_max(intptr_t pagesize); * \ingroup c_settings * * \details This defines the number of slots in the lock table that is used to - * track readers in the the environment. The default is about 100 for 4K system + * track readers in the environment. The default is about 100 for 4K system * page size. Starting a read-only transaction normally ties a lock table slot * to the current thread until the environment closes or the thread exits. If * \ref MDBX_NOTLS is in use, \ref mdbx_txn_begin() instead ties the slot to the diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index d733327cefa8..1457195be78c 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -56,7 +56,7 @@ pub enum SyncMode { /// flag could be used with [`Environment::sync()`](crate::Environment::sync) as alternatively /// for batch committing or nested transaction (in some cases). /// - /// The number and volume of of disk IOPs with [`SyncMode::SafeNoSync`] flag will exactly the + /// The number and volume of disk IOPs with [`SyncMode::SafeNoSync`] flag will exactly the /// as without any no-sync flags. However, you should expect a larger process's work set /// and significantly worse a locality of reference, due to the more intensive allocation /// of previously unused pages and increase the size of the database. diff --git a/crates/storage/nippy-jar/Cargo.toml b/crates/storage/nippy-jar/Cargo.toml index 9f212bf44e82..56f140afbda4 100644 --- a/crates/storage/nippy-jar/Cargo.toml +++ b/crates/storage/nippy-jar/Cargo.toml @@ -34,7 +34,6 @@ derive_more.workspace = true rand = { workspace = true, features = ["small_rng"] } tempfile.workspace = true - [features] default = [] test-utils = [] diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index f69bb44a068b..385e39357a0f 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -90,6 +90,6 @@ pub enum NippyJarError { InconsistentState, /// A specified file is missing. - #[error("Missing file: {0}.")] + #[error("Missing file: {}", .0.display())] MissingFile(PathBuf), } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 2875b91149c5..f6d577aadbec 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -92,10 +92,11 @@ optimism = [ "reth-primitives/optimism", "reth-execution-types/optimism", "reth-optimism-primitives", - "reth-codecs/optimism", + "reth-codecs/op", "reth-db/optimism", "reth-db-api/optimism", "revm/optimism", + "reth-optimism-primitives/op", ] serde = [ "dashmap/serde", diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 08f5e4680a2b..521e1d959b3c 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -4,8 +4,8 @@ use crate::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, - DatabaseProviderFactory, EvmEnvProvider, FullProvider, HeaderProvider, ProviderError, - ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, + DatabaseProviderFactory, EvmEnvProvider, FullProvider, HashedPostStateProvider, HeaderProvider, + ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; @@ -25,18 +25,25 @@ use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; -use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ Account, Block, BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, - TransactionSigned, TransactionSignedNoHash, + TransactionSigned, }; use reth_primitives_traits::BlockBody as _; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{DBProvider, NodePrimitivesProvider, StorageChangeSetReader}; +use reth_storage_api::{ + DBProvider, NodePrimitivesProvider, StateCommitmentProvider, StorageChangeSetReader, +}; use reth_storage_errors::provider::ProviderResult; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; +use reth_trie::HashedPostState; +use reth_trie_db::StateCommitment; +use revm::{ + db::BundleState, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, +}; use std::{ ops::{Add, RangeBounds, RangeInclusive, Sub}, sync::Arc, @@ -89,7 +96,10 @@ impl BlockchainProvider2 { /// /// This returns a `ProviderResult` since it tries the retrieve the last finalized header from /// `database`. - pub fn with_latest(storage: ProviderFactory, latest: SealedHeader) -> ProviderResult { + pub fn with_latest( + storage: ProviderFactory, + latest: SealedHeader>, + ) -> ProviderResult { let provider = storage.provider()?; let finalized_header = provider .last_finalized_block_number()? @@ -168,6 +178,10 @@ impl DatabaseProviderFactory for BlockchainProvider2 { } } +impl StateCommitmentProvider for BlockchainProvider2 { + type StateCommitment = N::StateCommitment; +} + impl StaticFileProviderFactory for BlockchainProvider2 { fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() @@ -175,11 +189,13 @@ impl StaticFileProviderFactory for BlockchainProvider2 } impl HeaderProvider for BlockchainProvider2 { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.consistent_provider()?.header(block_hash) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.consistent_provider()?.header_by_number(num) } @@ -191,26 +207,32 @@ impl HeaderProvider for BlockchainProvider2 { self.consistent_provider()?.header_td_by_number(number) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.consistent_provider()?.headers_range(range) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { self.consistent_provider()?.sealed_header(number) } fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_headers_range(range) } fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.consistent_provider()?.sealed_headers_while(range, predicate) } } @@ -292,7 +314,7 @@ impl BlockReader for BlockchainProvider2 { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { self.consistent_provider()?.ommers(id) } @@ -470,7 +492,7 @@ impl StageCheckpointReader for BlockchainProvider2 { } } -impl EvmEnvProvider for BlockchainProvider2 { +impl EvmEnvProvider> for BlockchainProvider2 { fn fill_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -479,7 +501,7 @@ impl EvmEnvProvider for BlockchainProvider2 { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.consistent_provider()?.fill_env_at(cfg, block_env, at, evm_config) } @@ -488,11 +510,11 @@ impl EvmEnvProvider for BlockchainProvider2 { &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.consistent_provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } @@ -504,7 +526,7 @@ impl EvmEnvProvider for BlockchainProvider2 { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.consistent_provider()?.fill_cfg_env_at(cfg, at, evm_config) } @@ -512,11 +534,11 @@ impl EvmEnvProvider for BlockchainProvider2 { fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.consistent_provider()?.fill_cfg_env_with_header(cfg, header, evm_config) } @@ -652,10 +674,17 @@ impl StateProviderFactory for BlockchainProvider2 { } } -impl CanonChainTracker for BlockchainProvider2 -where - Self: BlockReader, -{ +impl HashedPostStateProvider for BlockchainProvider2 { + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::<::KeyHasher>( + bundle_state.state(), + ) + } +} + +impl CanonChainTracker for BlockchainProvider2 { + type Header = HeaderTy; + fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { // update timestamp self.canonical_in_memory_state.on_forkchoice_update_received(); @@ -673,15 +702,15 @@ where self.canonical_in_memory_state.last_exchanged_transition_configuration_timestamp() } - fn set_canonical_head(&self, header: SealedHeader) { + fn set_canonical_head(&self, header: SealedHeader) { self.canonical_in_memory_state.set_canonical_head(header); } - fn set_safe(&self, header: SealedHeader) { + fn set_safe(&self, header: SealedHeader) { self.canonical_in_memory_state.set_safe(header); } - fn set_finalized(&self, header: SealedHeader) { + fn set_finalized(&self, header: SealedHeader) { self.canonical_in_memory_state.set_finalized(header); } } @@ -694,26 +723,32 @@ where self.consistent_provider()?.block_by_id(id) } - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { self.consistent_provider()?.header_by_number_or_tag(id) } fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_header_by_number_or_tag(id) } - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>> { self.consistent_provider()?.sealed_header_by_id(id) } - fn header_by_id(&self, id: BlockId) -> ProviderResult> { + fn header_by_id(&self, id: BlockId) -> ProviderResult> { self.consistent_provider()?.header_by_id(id) } - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { self.consistent_provider()?.ommers_by_id(id) } } @@ -727,12 +762,14 @@ impl> CanonStateSubscriptions } impl ForkChoiceSubscriptions for BlockchainProvider2 { - fn subscribe_safe_block(&self) -> ForkChoiceNotifications { + type Header = HeaderTy; + + fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let receiver = self.canonical_in_memory_state.subscribe_safe_block(); ForkChoiceNotifications(receiver) } - fn subscribe_finalized_block(&self) -> ForkChoiceNotifications { + fn subscribe_finalized_block(&self) -> ForkChoiceNotifications { let receiver = self.canonical_in_memory_state.subscribe_finalized_block(); ForkChoiceNotifications(receiver) } @@ -813,9 +850,7 @@ mod tests { use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives::{ - BlockExt, Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash, - }; + use reth_primitives::{BlockExt, Receipt, SealedBlock, StaticFileSegment}; use reth_primitives_traits::{BlockBody as _, SignedTransaction}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index 652f6fb33fd2..619296b57f38 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -6,7 +6,7 @@ use alloy_primitives::{ Address, BlockNumber, Bytes, B256, }; use reth_primitives::{Account, Bytecode}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_storage_api::{HashedPostStateProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, @@ -87,7 +87,7 @@ impl StateRootProvider { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut state = self.hashed_post_state(bundle_state); state.extend(hashed_state); self.state_provider.state_root(state) } @@ -101,7 +101,7 @@ impl StateRootProvider hashed_state: HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut state = self.hashed_post_state(bundle_state); state.extend(hashed_state); self.state_provider.state_root_with_updates(state) } @@ -111,7 +111,7 @@ impl StateRootProvider mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.state_root_from_nodes_with_updates(input) } } @@ -162,7 +162,7 @@ impl StateProofProvider slots: &[B256], ) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.proof(input, address, slots) } @@ -172,7 +172,7 @@ impl StateProofProvider targets: HashMap>, ) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.multiproof(input, targets) } @@ -182,11 +182,19 @@ impl StateProofProvider target: HashedPostState, ) -> ProviderResult> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.witness(input, target) } } +impl HashedPostStateProvider + for BundleStateProvider +{ + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + self.state_provider.hashed_post_state(bundle_state) + } +} + impl StateProvider for BundleStateProvider { fn storage( &self, diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index e70f4b4e5e1d..5aea5be27d45 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -6,7 +6,7 @@ use crate::{ StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_eips::{ eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, @@ -19,7 +19,7 @@ use reth_db::models::BlockNumberAddress; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; -use reth_node_types::{BlockTy, ReceiptTy, TxTy}; +use reth_node_types::{BlockTy, HeaderTy, ReceiptTy, TxTy}; use reth_primitives::{ Account, BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, @@ -628,7 +628,9 @@ impl StaticFileProviderFactory for ConsistentProvider { } impl HeaderProvider for ConsistentProvider { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.get_in_memory_or_storage_by_block( (*block_hash).into(), |db_provider| db_provider.header(block_hash), @@ -636,7 +638,7 @@ impl HeaderProvider for ConsistentProvider { ) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.get_in_memory_or_storage_by_block( num.into(), |db_provider| db_provider.header_by_number(num), @@ -675,7 +677,10 @@ impl HeaderProvider for ConsistentProvider { self.storage_provider.header_td_by_number(number) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.headers_range(range), @@ -684,7 +689,10 @@ impl HeaderProvider for ConsistentProvider { ) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( number.into(), |db_provider| db_provider.sealed_header(number), @@ -695,7 +703,7 @@ impl HeaderProvider for ConsistentProvider { fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_headers_range(range), @@ -707,8 +715,8 @@ impl HeaderProvider for ConsistentProvider { fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), @@ -832,7 +840,7 @@ impl BlockReader for ConsistentProvider { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.ommers(id), @@ -868,7 +876,7 @@ impl BlockReader for ConsistentProvider { // Iterate from the lowest block in memory until our target block for state in block_state.chain().collect::>().into_iter().rev() { let block_tx_count = state.block_ref().block.body.transactions().len() as u64; - if state.block_ref().block().number == number { + if state.block_ref().block().number() == number { stored_indices.tx_count = block_tx_count; } else { stored_indices.first_tx_num += block_tx_count; @@ -1017,7 +1025,7 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.transaction_block(id), - |_, _, block_state| Ok(Some(block_state.block_ref().block().number)), + |_, _, block_state| Ok(Some(block_state.block_ref().block().number())), ) } @@ -1222,7 +1230,7 @@ impl StageCheckpointReader for ConsistentProvider { } } -impl EvmEnvProvider for ConsistentProvider { +impl EvmEnvProvider> for ConsistentProvider { fn fill_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -1231,7 +1239,7 @@ impl EvmEnvProvider for ConsistentProvider { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -1242,15 +1250,15 @@ impl EvmEnvProvider for ConsistentProvider { &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); Ok(()) } @@ -1262,7 +1270,7 @@ impl EvmEnvProvider for ConsistentProvider { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -1272,15 +1280,15 @@ impl EvmEnvProvider for ConsistentProvider { fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; evm_config.fill_cfg_env(cfg, header, total_difficulty); Ok(()) } @@ -1326,7 +1334,7 @@ impl BlockReaderIdExt for ConsistentProvider { } } - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult>> { Ok(match id { BlockNumberOrTag::Latest => { Some(self.canonical_in_memory_state.get_canonical_head().unseal()) @@ -1347,7 +1355,7 @@ impl BlockReaderIdExt for ConsistentProvider { fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> ProviderResult> { + ) -> ProviderResult>>> { match id { BlockNumberOrTag::Latest => { Ok(Some(self.canonical_in_memory_state.get_canonical_head())) @@ -1366,21 +1374,24 @@ impl BlockReaderIdExt for ConsistentProvider { } } - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>>> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), }) } - fn header_by_id(&self, id: BlockId) -> ProviderResult> { + fn header_by_id(&self, id: BlockId) -> ProviderResult>> { Ok(match id { BlockId::Number(num) => self.header_by_number_or_tag(num)?, BlockId::Hash(hash) => self.header(&hash.block_hash)?, }) } - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>> { match id { BlockId::Number(num) => self.ommers_by_number_or_tag(num), BlockId::Hash(hash) => { diff --git a/crates/storage/provider/src/providers/consistent_view.rs b/crates/storage/provider/src/providers/consistent_view.rs index 4640f4603354..479537f120cc 100644 --- a/crates/storage/provider/src/providers/consistent_view.rs +++ b/crates/storage/provider/src/providers/consistent_view.rs @@ -2,11 +2,11 @@ use crate::{BlockNumReader, DatabaseProviderFactory, HeaderProvider}; use alloy_primitives::B256; use reth_errors::ProviderError; use reth_primitives::GotExpected; -use reth_storage_api::{BlockReader, DBProvider}; +use reth_storage_api::{BlockReader, DBProvider, StateCommitmentProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::HashedPostState; -use reth_trie_db::DatabaseHashedPostState; +use reth_trie_db::{DatabaseHashedPostState, StateCommitment}; pub use reth_storage_errors::provider::ConsistentViewError; @@ -33,7 +33,7 @@ pub struct ConsistentDbView { impl ConsistentDbView where - Factory: DatabaseProviderFactory, + Factory: DatabaseProviderFactory + StateCommitmentProvider, { /// Creates new consistent database view. pub const fn new(factory: Factory, tip: Option) -> Self { @@ -59,7 +59,9 @@ where { Ok(HashedPostState::default()) } else { - Ok(HashedPostState::from_reverts(provider.tx_ref(), block_number + 1)?) + Ok(HashedPostState::from_reverts::< + ::KeyHasher, + >(provider.tx_ref(), block_number + 1)?) } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 3c22a1a73a23..39230e253ed6 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -3,11 +3,10 @@ use crate::{ to_range, traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, - EvmEnvProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, ProviderError, - PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + EvmEnvProvider, HashedPostStateProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, + ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, @@ -19,16 +18,23 @@ use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{NodePrimitivesProvider, TryIntoHistoricalStateProvider}; +use reth_storage_api::{ + NodePrimitivesProvider, StateCommitmentProvider, TryIntoHistoricalStateProvider, +}; use reth_storage_errors::provider::ProviderResult; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; +use reth_trie::HashedPostState; +use reth_trie_db::StateCommitment; +use revm::{ + db::BundleState, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, +}; use std::{ ops::{RangeBounds, RangeInclusive}, path::Path, @@ -220,6 +226,10 @@ impl DatabaseProviderFactory for ProviderFactory { } } +impl StateCommitmentProvider for ProviderFactory { + type StateCommitment = N::StateCommitment; +} + impl StaticFileProviderFactory for ProviderFactory { /// Returns static file provider fn static_file_provider(&self) -> StaticFileProvider { @@ -228,21 +238,24 @@ impl StaticFileProviderFactory for ProviderFactory { } impl HeaderSyncGapProvider for ProviderFactory { + type Header = HeaderTy; fn sync_gap( &self, tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, - ) -> ProviderResult { + ) -> ProviderResult> { self.provider()?.sync_gap(tip, highest_uninterrupted_block) } } impl HeaderProvider for ProviderFactory { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.provider()?.header(block_hash) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, num, @@ -270,7 +283,10 @@ impl HeaderProvider for ProviderFactory { ) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), @@ -280,7 +296,10 @@ impl HeaderProvider for ProviderFactory { ) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, number, @@ -292,15 +311,15 @@ impl HeaderProvider for ProviderFactory { fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.sealed_headers_while(range, |_| true) } fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), @@ -385,7 +404,7 @@ impl BlockReader for ProviderFactory { self.provider()?.pending_block_and_receipts() } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { self.provider()?.ommers(id) } @@ -570,7 +589,7 @@ impl StageCheckpointReader for ProviderFactory { } } -impl EvmEnvProvider for ProviderFactory { +impl EvmEnvProvider> for ProviderFactory { fn fill_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -579,7 +598,7 @@ impl EvmEnvProvider for ProviderFactory { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.provider()?.fill_env_at(cfg, block_env, at, evm_config) } @@ -588,11 +607,11 @@ impl EvmEnvProvider for ProviderFactory { &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } @@ -604,7 +623,7 @@ impl EvmEnvProvider for ProviderFactory { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.provider()?.fill_cfg_env_at(cfg, at, evm_config) } @@ -612,11 +631,11 @@ impl EvmEnvProvider for ProviderFactory { fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.provider()?.fill_cfg_env_with_header(cfg, header, evm_config) } @@ -643,6 +662,14 @@ impl PruneCheckpointReader for ProviderFactory { } } +impl HashedPostStateProvider for ProviderFactory { + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::<::KeyHasher>( + bundle_state.state(), + ) + } +} + impl Clone for ProviderFactory { fn clone(&self) -> Self { Self { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index cfbe20cf4b44..9dddbb9c0a7c 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -19,7 +19,7 @@ use crate::{ StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::{ eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, @@ -50,10 +50,11 @@ use reth_db_api::{ use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; -use reth_node_types::{BlockTy, BodyTy, NodeTypes, ReceiptTy, TxTy}; +use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives::{ - Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, SealedBlock, SealedBlockFor, - SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, + Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, NodePrimitives, SealedBlock, + SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, + TransactionMeta, }; use reth_primitives_traits::{Block as _, BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; @@ -326,7 +327,7 @@ impl DatabaseProvider StateCommitmentProvider for DatabaseProvi type StateCommitment = N::StateCommitment; } -impl DatabaseProvider { +impl< + Tx: DbTx + DbTxMut + 'static, + N: NodeTypesForProvider>, + > DatabaseProvider +{ // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] /// Inserts an historical block. **Used for setting up test environments** @@ -560,8 +565,7 @@ impl DatabaseProvider { construct_block: BF, ) -> ProviderResult> where - N::ChainSpec: EthereumHardforks, - H: AsRef
, + H: AsRef>, HF: FnOnce(BlockNumber) -> ProviderResult>, BF: FnOnce(H, BodyTy, Vec
) -> ProviderResult>, { @@ -610,8 +614,7 @@ impl DatabaseProvider { mut assemble_block: F, ) -> ProviderResult> where - N::ChainSpec: EthereumHardforks, - H: AsRef
, + H: AsRef>, HF: FnOnce(RangeInclusive) -> ProviderResult>, F: FnMut(H, BodyTy, Range) -> ProviderResult, { @@ -634,7 +637,7 @@ impl DatabaseProvider { // have enough information to return the block anyways, so // we skip the block. if let Some((_, block_body_indices)) = - block_body_cursor.seek_exact(header.as_ref().number)? + block_body_cursor.seek_exact(header.as_ref().number())? { let tx_range = block_body_indices.tx_num_range(); present_headers.push((header, tx_range)); @@ -678,8 +681,7 @@ impl DatabaseProvider { assemble_block: BF, ) -> ProviderResult> where - N::ChainSpec: EthereumHardforks, - H: AsRef
, + H: AsRef>, HF: Fn(RangeInclusive) -> ProviderResult>, BF: Fn(H, BodyTy, Vec
) -> ProviderResult, { @@ -943,12 +945,16 @@ impl ChangeSetReader for DatabaseProvider { } } -impl HeaderSyncGapProvider for DatabaseProvider { +impl HeaderSyncGapProvider + for DatabaseProvider +{ + type Header = HeaderTy; + fn sync_gap( &self, tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, - ) -> ProviderResult { + ) -> ProviderResult> { let static_file_provider = self.static_file_provider(); // Make sure Headers static file is at the same height. If it's further, this @@ -987,10 +993,10 @@ impl HeaderSyncGapProvider for DatabaseProvide } } -impl> HeaderProvider - for DatabaseProvider -{ - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { +impl HeaderProvider for DatabaseProvider { + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(num) = self.block_number(*block_hash)? { Ok(self.header_by_number(num)?) } else { @@ -998,12 +1004,12 @@ impl> HeaderProvi } } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, num, |static_file| static_file.header_by_number(num), - || Ok(self.tx.get::(num)?), + || Ok(self.tx.get::>(num)?), ) } @@ -1030,17 +1036,25 @@ impl> HeaderProvi ) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), |static_file, range, _| static_file.headers_range(range), - |range, _| self.cursor_read_collect::(range).map_err(Into::into), + |range, _| { + self.cursor_read_collect::>(range).map_err(Into::into) + }, |_| true, ) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, number, @@ -1061,15 +1075,17 @@ impl> HeaderProvi fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), |static_file, range, predicate| static_file.sealed_headers_while(range, predicate), |range, mut predicate| { let mut headers = vec![]; - for entry in self.tx.cursor_read::()?.walk_range(range)? { + for entry in + self.tx.cursor_read::>()?.walk_range(range)? + { let (number, header) = entry?; let hash = self .block_hash(number)? @@ -1210,7 +1226,7 @@ impl BlockReader for DatabaseProvid /// /// If the block is not found, this returns `None`. /// If the block exists, but doesn't contain ommers, this returns `None`. - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { if let Some(number) = self.convert_hash_or_number(id)? { // If the Paris (Merge) hardfork block is known and block is after it, return empty // ommers. @@ -1218,7 +1234,8 @@ impl BlockReader for DatabaseProvid return Ok(Some(Vec::new())) } - let ommers = self.tx.get::(number)?.map(|o| o.ommers); + let ommers = + self.tx.get::>(number)?.map(|o| o.ommers); return Ok(ommers) } @@ -1450,9 +1467,9 @@ impl TransactionsProvider for Datab index, block_hash, block_number, - base_fee: header.base_fee_per_gas, - excess_blob_gas: header.excess_blob_gas, - timestamp: header.timestamp, + base_fee: header.base_fee_per_gas(), + excess_blob_gas: header.excess_blob_gas(), + timestamp: header.timestamp(), }; return Ok(Some((transaction, meta))) @@ -1618,7 +1635,7 @@ impl> Withdrawals } } -impl> EvmEnvProvider +impl EvmEnvProvider> for DatabaseProvider { fn fill_env_at( @@ -1629,7 +1646,7 @@ impl> EvmEnvProvi evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -1640,15 +1657,15 @@ impl> EvmEnvProvi &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); Ok(()) } @@ -1660,7 +1677,7 @@ impl> EvmEnvProvi evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -1670,15 +1687,15 @@ impl> EvmEnvProvi fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; evm_config.fill_cfg_env(cfg, header, total_difficulty); Ok(()) } @@ -2813,18 +2830,18 @@ impl BlockWrite block: SealedBlockWithSenders, write_to: StorageLocation, ) -> ProviderResult { - let block_number = block.number; + let block_number = block.number(); let mut durations_recorder = metrics::DurationsRecorder::default(); // total difficulty let ttd = if block_number == 0 { - block.difficulty + block.difficulty() } else { let parent_block_number = block_number - 1; let parent_ttd = self.header_td_by_number(parent_block_number)?.unwrap_or_default(); durations_recorder.record_relative(metrics::Action::GetParentTD); - parent_ttd + block.difficulty + parent_ttd + block.difficulty() }; if write_to.database() { @@ -2832,7 +2849,8 @@ impl BlockWrite durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); // Put header with canonical hashes. - self.tx.put::(block_number, block.header.as_ref().clone())?; + self.tx + .put::>>(block_number, block.header.as_ref().clone())?; durations_recorder.record_relative(metrics::Action::InsertHeaders); self.tx.put::(block_number, ttd.into())?; @@ -2979,7 +2997,7 @@ impl BlockWrite self.tx.delete::(hash, None)?; rev_headers.delete_current()?; } - self.remove::(block + 1..)?; + self.remove::>>(block + 1..)?; self.remove::(block + 1..)?; // First transaction to be removed @@ -3063,10 +3081,10 @@ impl BlockWrite return Ok(()) } - let first_number = blocks.first().unwrap().number; + let first_number = blocks.first().unwrap().number(); let last = blocks.last().unwrap(); - let last_block_number = last.number; + let last_block_number = last.number(); let mut durations_recorder = metrics::DurationsRecorder::default(); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 6631b5b1b31a..44cd5554bee6 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -1,6 +1,6 @@ use crate::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, + BlockSource, BlockchainTreePendingStateProvider, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, NodePrimitivesProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, @@ -14,6 +14,7 @@ use alloy_eips::{ BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, }; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_rpc_types_engine::ForkchoiceState; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, @@ -24,13 +25,16 @@ use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::{BlockTy, FullNodePrimitives, NodeTypes, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_node_types::{ + BlockTy, FullNodePrimitives, HeaderTy, NodeTypes, NodeTypesWithDB, ReceiptTy, TxTy, +}; use reth_primitives::{ Account, BlockWithSenders, EthPrimitives, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::CanonChainTracker; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -61,7 +65,6 @@ mod bundle_state_provider; pub use bundle_state_provider::BundleStateProvider; mod consistent_view; -use alloy_rpc_types_engine::ForkchoiceState; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; mod blockchain_provider; @@ -77,11 +80,7 @@ where Self: NodeTypes< ChainSpec: EthereumHardforks, Storage: ChainStorage, - Primitives: FullNodePrimitives< - SignedTx: Value, - Receipt: Value, - BlockHeader = alloy_consensus::Header, - >, + Primitives: FullNodePrimitives, >, { } @@ -90,11 +89,7 @@ impl NodeTypesForProvider for T where T: NodeTypes< ChainSpec: EthereumHardforks, Storage: ChainStorage, - Primitives: FullNodePrimitives< - SignedTx: Value, - Receipt: Value, - BlockHeader = alloy_consensus::Header, - >, + Primitives: FullNodePrimitives, > { } @@ -151,7 +146,7 @@ impl BlockchainProvider { } } -impl BlockchainProvider { +impl BlockchainProvider { /// Create new provider instance that wraps the database and the blockchain tree, using the /// provided latest header to initialize the chain info tracker, alongside the finalized header /// if it exists. @@ -261,7 +256,9 @@ impl StaticFileProviderFactory for BlockchainProvider { } } -impl HeaderProvider for BlockchainProvider { +impl HeaderProvider for BlockchainProvider { + type Header = Header; + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.database.header(block_hash) } @@ -593,7 +590,7 @@ impl StageCheckpointReader for BlockchainProvider { } } -impl EvmEnvProvider for BlockchainProvider { +impl EvmEnvProvider for BlockchainProvider { fn fill_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -666,7 +663,7 @@ impl ChainSpecProvider for BlockchainProvider { } } -impl StateProviderFactory for BlockchainProvider { +impl StateProviderFactory for BlockchainProvider { /// Storage provider for latest block fn latest(&self) -> ProviderResult { trace!(target: "providers::blockchain", "Getting latest block state provider"); @@ -840,10 +837,9 @@ impl BlockchainTreeViewer for BlockchainProvider { } } -impl CanonChainTracker for BlockchainProvider -where - Self: BlockReader, -{ +impl CanonChainTracker for BlockchainProvider { + type Header = HeaderTy; + fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { // update timestamp self.chain_info.on_forkchoice_update_received(); @@ -874,10 +870,7 @@ where } } -impl BlockReaderIdExt for BlockchainProvider -where - Self: BlockReader + ReceiptProviderIdExt, -{ +impl BlockReaderIdExt for BlockchainProvider { fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), @@ -896,7 +889,10 @@ where } } - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { Ok(match id { BlockNumberOrTag::Latest => Some(self.chain_info.get_canonical_head().unseal()), BlockNumberOrTag::Finalized => { @@ -912,7 +908,7 @@ where fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> ProviderResult> { + ) -> ProviderResult>> { match id { BlockNumberOrTag::Latest => Ok(Some(self.chain_info.get_canonical_head())), BlockNumberOrTag::Finalized => Ok(self.chain_info.get_finalized_header()), @@ -927,21 +923,24 @@ where } } - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), }) } - fn header_by_id(&self, id: BlockId) -> ProviderResult> { + fn header_by_id(&self, id: BlockId) -> ProviderResult> { Ok(match id { BlockId::Number(num) => self.header_by_number_or_tag(num)?, BlockId::Hash(hash) => self.header(&hash.block_hash)?, }) } - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { match id { BlockId::Number(num) => self.ommers_by_number_or_tag(num), BlockId::Hash(hash) => { @@ -968,7 +967,9 @@ impl CanonStateSubscriptions for BlockchainProvider { } } -impl ForkChoiceSubscriptions for BlockchainProvider { +impl ForkChoiceSubscriptions for BlockchainProvider { + type Header = HeaderTy; + fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let receiver = self.chain_info.subscribe_safe_block(); ForkChoiceNotifications(receiver) diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index ad36a4a5ab3e..93752c1e278d 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,6 +1,6 @@ use crate::{ providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - ProviderError, StateProvider, StateRootProvider, + HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{ @@ -28,7 +28,7 @@ use reth_trie::{ }; use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, - DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, + DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, StateCommitment, }; use std::fmt::Debug; @@ -136,7 +136,9 @@ impl<'b, Provider: DBProvider + BlockNumReader + StateCommitmentProvider> ); } - Ok(HashedPostState::from_reverts(self.tx(), self.block_number)?) + Ok(HashedPostState::from_reverts::< + ::KeyHasher, + >(self.tx(), self.block_number)?) } /// Retrieve revert hashed storage for this history provider and target address. @@ -394,6 +396,16 @@ impl StateProof } } +impl HashedPostStateProvider + for HistoricalStateProviderRef<'_, Provider> +{ + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::< + ::KeyHasher, + >(bundle_state.state()) + } +} + impl StateProvider for HistoricalStateProviderRef<'_, Provider> { @@ -433,6 +445,12 @@ impl StateCommitmentProvider + for HistoricalStateProviderRef<'_, Provider> +{ + type StateCommitment = Provider::StateCommitment; +} + /// State provider for a given block number. /// For more detailed description, see [`HistoricalStateProviderRef`]. #[derive(Debug)] @@ -482,6 +500,12 @@ impl } } +impl StateCommitmentProvider + for HistoricalStateProvider +{ + type StateCommitment = Provider::StateCommitment; +} + // Delegates all provider impls to [HistoricalStateProviderRef] delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + StateCommitmentProvider]); diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index a2ec4972d105..bdb6de1e569e 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -1,6 +1,6 @@ use crate::{ providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - StateProvider, StateRootProvider, + HashedPostStateProvider, StateProvider, StateRootProvider, }; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -22,7 +22,7 @@ use reth_trie::{ }; use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, - DatabaseTrieWitness, + DatabaseTrieWitness, StateCommitment, }; /// State provider over latest state that takes tx reference. @@ -157,6 +157,16 @@ impl StateProofProvider } } +impl HashedPostStateProvider + for LatestStateProviderRef<'_, Provider> +{ + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::< + ::KeyHasher, + >(bundle_state.state()) + } +} + impl StateProvider for LatestStateProviderRef<'_, Provider> { @@ -181,11 +191,17 @@ impl StateProv } } +impl StateCommitmentProvider + for LatestStateProviderRef<'_, Provider> +{ + type StateCommitment = Provider::StateCommitment; +} + /// State provider for the latest state. #[derive(Debug)] pub struct LatestStateProvider(Provider); -impl LatestStateProvider { +impl LatestStateProvider { /// Create new state provider pub const fn new(db: Provider) -> Self { Self(db) @@ -198,6 +214,10 @@ impl LatestStateProvider { } } +impl StateCommitmentProvider for LatestStateProvider { + type StateCommitment = Provider::StateCommitment; +} + // Delegates all provider impls to [LatestStateProviderRef] delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader + StateCommitmentProvider]); diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index f2648fb15e6a..1fa15214e9a9 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -57,6 +57,9 @@ macro_rules! delegate_provider_impls { fn multiproof(&self, input: reth_trie::TrieInput, targets: alloy_primitives::map::HashMap>) -> reth_storage_errors::provider::ProviderResult; fn witness(&self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult>; } + HashedPostStateProvider $(where [$($generics)*])? { + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> reth_trie::HashedPostState; + } ); } } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 659b093d9d6a..8f2d002ab898 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -6,7 +6,6 @@ use crate::{ to_range, BlockHashReader, BlockNumReader, HeaderProvider, ReceiptProvider, TransactionsProvider, }; -use alloy_consensus::Header; use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; @@ -15,7 +14,7 @@ use reth_db::{ BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, TDWithHashMask, TotalDifficultyMask, TransactionMask, }, - table::Decompress, + table::{Decompress, Value}, }; use reth_node_types::NodePrimitives; use reth_primitives::{transaction::recover_signers, SealedHeader, TransactionMeta}; @@ -90,17 +89,19 @@ impl<'a, N: NodePrimitives> StaticFileJarProvider<'a, N> { } } -impl HeaderProvider for StaticFileJarProvider<'_, N> { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { +impl> HeaderProvider for StaticFileJarProvider<'_, N> { + type Header = N::BlockHeader; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .filter(|(_, hash)| hash == block_hash) .map(|(header, _)| header)) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.cursor()?.get_one::>(num.into()) + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } fn header_td(&self, block_hash: &BlockHash) -> ProviderResult> { @@ -115,14 +116,17 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { Ok(self.cursor()?.get_one::(num.into())?.map(Into::into)) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut headers = Vec::with_capacity((range.end - range.start) as usize); for num in range { - if let Some(header) = cursor.get_one::>(num.into())? { + if let Some(header) = cursor.get_one::>(num.into())? { headers.push(header); } } @@ -130,18 +134,21 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { Ok(headers) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { Ok(self .cursor()? - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) } fn sealed_headers_while( &self, range: impl RangeBounds, - mut predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + mut predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { let range = to_range(range); let mut cursor = self.cursor()?; @@ -149,7 +156,7 @@ impl HeaderProvider for StaticFileJarProvider<'_, N> { for number in range { if let Some((header, hash)) = - cursor.get_two::>(number.into())? + cursor.get_two::>(number.into())? { let sealed = SealedHeader::new(header, hash); if !predicate(&sealed) { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 3b49f8d401f9..7af071299cdf 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -39,7 +39,7 @@ use reth_primitives::{ }, transaction::recover_signers, BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSignedNoHash, + StaticFileSegment, TransactionMeta, TransactionSigned, }; use reth_primitives_traits::SignedTransaction; use reth_stages_types::{PipelineTarget, StageId}; @@ -1235,12 +1235,14 @@ impl StaticFileWriter for StaticFileProvider { } } -impl HeaderProvider for StaticFileProvider { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { +impl> HeaderProvider for StaticFileProvider { + type Header = N::BlockHeader; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .and_then(|(header, hash)| { if &hash == block_hash { return Some(header) @@ -1250,7 +1252,7 @@ impl HeaderProvider for StaticFileProvider { }) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.header_by_number(num)) .or_else(|err| { @@ -1283,16 +1285,22 @@ impl HeaderProvider for StaticFileProvider { }) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.fetch_range_with_predicate( StaticFileSegment::Headers, to_range(range), - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::>(number.into()), |_| true, ) } - fn sealed_header(&self, num: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + num: BlockNumber, + ) -> ProviderResult>> { self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.sealed_header(num)) .or_else(|err| { @@ -1307,14 +1315,14 @@ impl HeaderProvider for StaticFileProvider { fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.fetch_range_with_predicate( StaticFileSegment::Headers, to_range(range), |cursor, number| { Ok(cursor - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) }, predicate, @@ -1385,8 +1393,8 @@ impl> Rec } } -impl> TransactionsProviderExt - for StaticFileProvider +impl> + TransactionsProviderExt for StaticFileProvider { fn transaction_hashes_by_range( &self, @@ -1582,7 +1590,9 @@ impl BlockNumReader for StaticFileProvider { } } -impl> BlockReader for StaticFileProvider { +impl> BlockReader + for StaticFileProvider +{ type Block = N::Block; fn find_block_by_hash( @@ -1618,7 +1628,7 @@ impl> BlockReader for Sta Err(ProviderError::UnsupportedProvider) } - fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1696,7 +1706,7 @@ impl StatsReader for StaticFileProvider { .get_highest_static_file_tx(StaticFileSegment::Receipts) .map(|receipts| receipts + 1) .unwrap_or_default() as usize), - tables::Transactions::::NAME => Ok(self + tables::Transactions::::NAME => Ok(self .get_highest_static_file_tx(StaticFileSegment::Transactions) .map(|txs| txs + 1) .unwrap_or_default() diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 6f5335ec6657..b7f60c164423 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -2,7 +2,7 @@ use super::{ manager::StaticFileProviderInner, metrics::StaticFileProviderMetrics, StaticFileProvider, }; use crate::providers::static_file::metrics::StaticFileProviderOperation; -use alloy_consensus::Header; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, TxNumber, U256}; use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock}; use reth_codecs::Compact; @@ -526,16 +526,19 @@ impl StaticFileProviderRW { /// Returns the current [`BlockNumber`] as seen in the static file. pub fn append_header( &mut self, - header: &Header, + header: &N::BlockHeader, total_difficulty: U256, hash: &BlockHash, - ) -> ProviderResult<()> { + ) -> ProviderResult<()> + where + N::BlockHeader: Compact, + { let start = Instant::now(); self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Headers); - self.increment_block(header.number)?; + self.increment_block(header.number())?; self.append_column(header)?; self.append_column(CompactU256::from(total_difficulty))?; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 12c0330ac0e0..6815bbcb1238 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -29,7 +29,8 @@ use reth_primitives::{ use reth_primitives_traits::SignedTransaction; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ - DatabaseProviderFactory, StageCheckpointReader, StateProofProvider, StorageRootProvider, + DatabaseProviderFactory, HashedPostStateProvider, StageCheckpointReader, + StateCommitmentProvider, StateProofProvider, StorageRootProvider, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ @@ -164,6 +165,10 @@ impl NodeTypes for MockNode { type Storage = EthStorage; } +impl StateCommitmentProvider for MockEthProvider { + type StateCommitment = ::StateCommitment; +} + impl DatabaseProviderFactory for MockEthProvider { type DB = DatabaseMock; type Provider = DatabaseProvider; @@ -179,6 +184,8 @@ impl DatabaseProviderFactory for MockEthProvider { } impl HeaderProvider for MockEthProvider { + type Header = Header; + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { let lock = self.headers.lock(); Ok(lock.get(block_hash).cloned()) @@ -680,6 +687,12 @@ impl StateProofProvider for MockEthProvider { } } +impl HashedPostStateProvider for MockEthProvider { + fn hashed_post_state(&self, _state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } +} + impl StateProvider for MockEthProvider { fn storage( &self, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index ff6b3fccbe10..a33e4159be22 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -27,7 +27,9 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{NodePrimitivesProvider, StateProofProvider, StorageRootProvider}; +use reth_storage_api::{ + HashedPostStateProvider, NodePrimitivesProvider, StateProofProvider, StorageRootProvider, +}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, @@ -284,6 +286,8 @@ impl ReceiptProvider for NoopProvider { impl ReceiptProviderIdExt for NoopProvider {} impl HeaderProvider for NoopProvider { + type Header = Header; + fn header(&self, _block_hash: &BlockHash) -> ProviderResult> { Ok(None) } @@ -411,6 +415,12 @@ impl StateProofProvider for NoopProvider { } } +impl HashedPostStateProvider for NoopProvider { + fn hashed_post_state(&self, _bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } +} + impl StateProvider for NoopProvider { fn storage( &self, @@ -586,6 +596,8 @@ impl CanonStateSubscriptions for NoopProvider { } impl ForkChoiceSubscriptions for NoopProvider { + type Header = Header; + fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let (_, rx) = watch::channel(None); ForkChoiceNotifications(rx) diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 0d28f83739b0..be485839f00b 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -7,7 +7,7 @@ use crate::{ }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_chainspec::EthereumHardforks; -use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_storage_api::NodePrimitivesProvider; /// Helper trait to unify all provider traits for simplicity. @@ -15,14 +15,18 @@ pub trait FullProvider: DatabaseProviderFactory + NodePrimitivesProvider + StaticFileProviderFactory - + BlockReaderIdExt, Block = BlockTy, Receipt = ReceiptTy> - + AccountReader + + BlockReaderIdExt< + Transaction = TxTy, + Block = BlockTy, + Receipt = ReceiptTy, + Header = HeaderTy, + > + AccountReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions - + ForkChoiceSubscriptions + + ForkChoiceSubscriptions
> + StageCheckpointReader + Clone + Unpin @@ -34,14 +38,18 @@ impl FullProvider for T where T: DatabaseProviderFactory + NodePrimitivesProvider + StaticFileProviderFactory - + BlockReaderIdExt, Block = BlockTy, Receipt = ReceiptTy> - + AccountReader + + BlockReaderIdExt< + Transaction = TxTy, + Block = BlockTy, + Receipt = ReceiptTy, + Header = HeaderTy, + > + AccountReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions - + ForkChoiceSubscriptions + + ForkChoiceSubscriptions
> + StageCheckpointReader + Clone + Unpin diff --git a/crates/storage/provider/src/traits/header_sync_gap.rs b/crates/storage/provider/src/traits/header_sync_gap.rs index 5ce7e1197300..b572750d4a23 100644 --- a/crates/storage/provider/src/traits/header_sync_gap.rs +++ b/crates/storage/provider/src/traits/header_sync_gap.rs @@ -1,3 +1,4 @@ +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use reth_network_p2p::headers::downloader::SyncTarget; @@ -7,21 +8,21 @@ use tokio::sync::watch; /// Represents a gap to sync: from `local_head` to `target` #[derive(Clone, Debug)] -pub struct HeaderSyncGap { +pub struct HeaderSyncGap { /// The local head block. Represents lower bound of sync range. - pub local_head: SealedHeader, + pub local_head: SealedHeader, /// The sync target. Represents upper bound of sync range. pub target: SyncTarget, } -impl HeaderSyncGap { +impl HeaderSyncGap { /// Returns `true` if the gap from the head to the target was closed #[inline] pub fn is_closed(&self) -> bool { match self.target.tip() { BlockHashOrNumber::Hash(hash) => self.local_head.hash() == hash, - BlockHashOrNumber::Number(num) => self.local_head.number == num, + BlockHashOrNumber::Number(num) => self.local_head.number() == num, } } } @@ -29,6 +30,9 @@ impl HeaderSyncGap { /// Client trait for determining the current headers sync gap. #[auto_impl::auto_impl(&, Arc)] pub trait HeaderSyncGapProvider: Send + Sync { + /// The header type. + type Header: Send + Sync; + /// Find a current sync gap for the headers depending on the last /// uninterrupted block number. Last uninterrupted block represents the block number before /// which there are no gaps. It's up to the caller to ensure that last uninterrupted block is @@ -37,5 +41,5 @@ pub trait HeaderSyncGapProvider: Send + Sync { &self, tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, - ) -> ProviderResult; + ) -> ProviderResult>; } diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 02e912050d5e..dc5af491efc2 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -234,7 +234,7 @@ mod tests { }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{Account, Receipt, Receipts, StorageEntry}; - use reth_storage_api::DatabaseProviderFactory; + use reth_storage_api::{DatabaseProviderFactory, HashedPostStateProvider}; use reth_trie::{ test_utils::{state_root, storage_root_prehashed}, HashedPostState, HashedStorage, StateRoot, StorageRoot, @@ -1118,13 +1118,7 @@ mod tests { assert_eq!( StateRoot::overlay_root( tx, - ExecutionOutcome::::new( - state.bundle_state.clone(), - Receipts::default(), - 0, - Vec::new() - ) - .hash_state_slow(), + provider_factory.hashed_post_state(&state.bundle_state) ) .unwrap(), state_root(expected.clone().into_iter().map(|(address, (account, storage))| ( diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index ba2ccf1b1573..7ebff976d135 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -25,6 +25,7 @@ reth-storage-errors.workspace = true reth-trie.workspace = true reth-trie-db.workspace = true reth-db.workspace = true +revm.workspace = true # ethereum alloy-eips.workspace = true diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 204e9027da28..917796038e9d 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -2,7 +2,6 @@ use crate::{ BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, B256}; use reth_db_models::StoredBlockBodyIndices; @@ -57,6 +56,7 @@ pub trait BlockReader: /// The block type this provider reads. type Block: reth_primitives_traits::Block< Body: reth_primitives_traits::BlockBody, + Header = Self::Header, >; /// Tries to find in the given block source. @@ -98,7 +98,7 @@ pub trait BlockReader: /// Returns the ommers/uncle headers of the given block from the database. /// /// Returns `None` if block is not found. - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>; + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>; /// Returns the block with matching hash from the database. /// @@ -187,7 +187,7 @@ impl BlockReader for std::sync::Arc { ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { T::ommers(self, id) } fn block_by_hash(&self, hash: B256) -> ProviderResult> { @@ -256,7 +256,7 @@ impl BlockReader for &T { ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { T::ommers(self, id) } fn block_by_hash(&self, hash: B256) -> ProviderResult> { @@ -321,7 +321,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn pending_header(&self) -> ProviderResult> { + fn pending_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Pending.into()) } @@ -329,7 +329,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn latest_header(&self) -> ProviderResult> { + fn latest_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Latest.into()) } @@ -337,7 +337,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn safe_header(&self) -> ProviderResult> { + fn safe_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Safe.into()) } @@ -345,7 +345,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn finalized_header(&self) -> ProviderResult> { + fn finalized_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Finalized.into()) } @@ -378,7 +378,10 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the header with matching tag from the database /// /// Returns `None` if header is not found. - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into())) } @@ -389,7 +392,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))? .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) @@ -398,22 +401,28 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the sealed header with the matching `BlockId` from the database. /// /// Returns `None` if header is not found. - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult>; + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>>; /// Returns the header with the matching `BlockId` from the database. /// /// Returns `None` if header is not found. - fn header_by_id(&self, id: BlockId) -> ProviderResult>; + fn header_by_id(&self, id: BlockId) -> ProviderResult>; /// Returns the ommers with the matching tag from the database. - fn ommers_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult>> { + fn ommers_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult>> { self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.ommers(num.into())) } /// Returns the ommers with the matching `BlockId` from the database. /// /// Returns `None` if block is not found. - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; } /// Functionality to read the last known chain blocks from the database. diff --git a/crates/storage/storage-api/src/chain_info.rs b/crates/storage/storage-api/src/chain_info.rs index 39f8639dd274..b6f58b7e73f0 100644 --- a/crates/storage/storage-api/src/chain_info.rs +++ b/crates/storage/storage-api/src/chain_info.rs @@ -4,6 +4,9 @@ use std::time::Instant; /// A type that can track updates related to fork choice updates. pub trait CanonChainTracker: Send + Sync { + /// The header type. + type Header: Send + Sync; + /// Notify the tracker about a received fork choice update. fn on_forkchoice_update_received(&self, update: &ForkchoiceState); @@ -19,11 +22,11 @@ pub trait CanonChainTracker: Send + Sync { fn last_exchanged_transition_configuration_timestamp(&self) -> Option; /// Sets the canonical head of the chain. - fn set_canonical_head(&self, header: SealedHeader); + fn set_canonical_head(&self, header: SealedHeader); /// Sets the safe block of the chain. - fn set_safe(&self, header: SealedHeader); + fn set_safe(&self, header: SealedHeader); /// Sets the finalized block of the chain. - fn set_finalized(&self, header: SealedHeader); + fn set_finalized(&self, header: SealedHeader); } diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index c068f7c1d295..2f1c9750edb1 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -1,34 +1,40 @@ -use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockHash, BlockNumber, U256}; use reth_primitives::SealedHeader; +use reth_primitives_traits::BlockHeader; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; /// Client trait for fetching `Header` related data. #[auto_impl::auto_impl(&, Arc)] pub trait HeaderProvider: Send + Sync { + /// The header type this provider supports. + type Header: BlockHeader; + /// Check if block is known fn is_known(&self, block_hash: &BlockHash) -> ProviderResult { self.header(block_hash).map(|header| header.is_some()) } /// Get header by block hash - fn header(&self, block_hash: &BlockHash) -> ProviderResult>; + fn header(&self, block_hash: &BlockHash) -> ProviderResult>; /// Retrieves the header sealed by the given block hash. - fn sealed_header_by_hash(&self, block_hash: BlockHash) -> ProviderResult> { + fn sealed_header_by_hash( + &self, + block_hash: BlockHash, + ) -> ProviderResult>> { Ok(self.header(&block_hash)?.map(|header| SealedHeader::new(header, block_hash))) } /// Get header by block number - fn header_by_number(&self, num: u64) -> ProviderResult>; + fn header_by_number(&self, num: u64) -> ProviderResult>; /// Get header by block number or hash fn header_by_hash_or_number( &self, hash_or_num: BlockHashOrNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { match hash_or_num { BlockHashOrNumber::Hash(hash) => self.header(&hash), BlockHashOrNumber::Number(num) => self.header_by_number(num), @@ -42,16 +48,22 @@ pub trait HeaderProvider: Send + Sync { fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult>; /// Get headers in range of block numbers - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult>; + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult>; /// Get a single sealed header by block number. - fn sealed_header(&self, number: BlockNumber) -> ProviderResult>; + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>>; /// Get headers in range of block numbers. fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.sealed_headers_while(range, |_| true) } @@ -59,6 +71,6 @@ pub trait HeaderProvider: Send + Sync { fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult>; + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>>; } diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 7325e2b74360..0a6341cc4b42 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -1,28 +1,81 @@ //! Various noop implementations for traits. -use std::sync::Arc; - -use crate::{BlockHashReader, BlockNumReader}; -use alloy_primitives::{BlockNumber, B256}; -use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec}; +use crate::{ + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + BlockSource, ChangeSetReader, HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, + PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, + StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, +}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, +}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, +}; +use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, MAINNET}; +use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; +use reth_primitives::{ + BlockWithSenders, EthPrimitives, SealedBlockFor, SealedBlockWithSenders, TransactionMeta, +}; +use reth_primitives_traits::{Account, Bytecode, NodePrimitives, SealedHeader}; +use reth_prune_types::{PruneCheckpoint, PruneSegment}; +use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::ProviderResult; +use reth_trie::{ + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, +}; +use std::{ + marker::PhantomData, + ops::{RangeBounds, RangeInclusive}, + sync::Arc, +}; /// Supports various api interfaces for testing purposes. -#[derive(Debug, Clone)] +#[derive(Debug)] #[non_exhaustive] -pub struct NoopBlockReader { +pub struct NoopProvider { chain_spec: Arc, + _phantom: PhantomData, +} + +impl NoopProvider { + /// Create a new instance for specific primitive types. + pub fn new(chain_spec: Arc) -> Self { + Self { chain_spec, _phantom: Default::default() } + } } -impl NoopBlockReader { +impl NoopProvider { /// Create a new instance of the `NoopBlockReader`. - pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + pub fn eth(chain_spec: Arc) -> Self { + Self { chain_spec, _phantom: Default::default() } + } +} + +impl NoopProvider { + /// Create a new instance of the [`NoopProvider`] with the mainnet chain spec. + pub fn mainnet() -> Self { + Self::eth(MAINNET.clone()) + } +} + +impl Default for NoopProvider { + fn default() -> Self { + Self::mainnet() + } +} + +impl Clone for NoopProvider { + fn clone(&self) -> Self { + Self { chain_spec: Arc::clone(&self.chain_spec), _phantom: Default::default() } } } /// Noop implementation for testing purposes -impl BlockHashReader for NoopBlockReader { +impl BlockHashReader for NoopProvider { fn block_hash(&self, _number: u64) -> ProviderResult> { Ok(None) } @@ -36,7 +89,7 @@ impl BlockHashReader for NoopBlockReader { } } -impl BlockNumReader for NoopBlockReader { +impl BlockNumReader for NoopProvider { fn chain_info(&self) -> ProviderResult { Ok(ChainInfo::default()) } @@ -54,10 +107,467 @@ impl BlockNumReader for NoopBlockReader { } } -impl ChainSpecProvider for NoopBlockReader { +impl ChainSpecProvider + for NoopProvider +{ type ChainSpec = ChainSpec; fn chain_spec(&self) -> Arc { self.chain_spec.clone() } } + +impl BlockIdReader for NoopProvider { + fn pending_block_num_hash(&self) -> ProviderResult> { + Ok(None) + } + + fn safe_block_num_hash(&self) -> ProviderResult> { + Ok(None) + } + + fn finalized_block_num_hash(&self) -> ProviderResult> { + Ok(None) + } +} + +impl BlockReaderIdExt for NoopProvider { + fn block_by_id(&self, _id: BlockId) -> ProviderResult> { + Ok(None) + } + + fn sealed_header_by_id( + &self, + _id: BlockId, + ) -> ProviderResult>> { + Ok(None) + } + + fn header_by_id(&self, _id: BlockId) -> ProviderResult> { + Ok(None) + } + + fn ommers_by_id(&self, _id: BlockId) -> ProviderResult>> { + Ok(None) + } +} + +impl BlockReader for NoopProvider { + type Block = N::Block; + + fn find_block_by_hash( + &self, + _hash: B256, + _source: BlockSource, + ) -> ProviderResult> { + Ok(None) + } + + fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { + Ok(None) + } + + fn pending_block(&self) -> ProviderResult>> { + Ok(None) + } + + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + Ok(None) + } + + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + Ok(None) + } + + fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { + Ok(None) + } + + fn block_body_indices(&self, _num: u64) -> ProviderResult> { + Ok(None) + } + + fn block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + Ok(None) + } + + fn sealed_block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + Ok(None) + } + + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { + Ok(vec![]) + } + + fn block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult>> { + Ok(vec![]) + } + + fn sealed_block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult>> { + Ok(vec![]) + } +} + +impl TransactionsProvider for NoopProvider { + type Transaction = N::SignedTx; + + fn transaction_id(&self, _tx_hash: TxHash) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_id_unhashed( + &self, + _id: TxNumber, + ) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_hash_with_meta( + &self, + _hash: TxHash, + ) -> ProviderResult> { + Ok(None) + } + + fn transaction_block(&self, _id: TxNumber) -> ProviderResult> { + todo!() + } + + fn transactions_by_block( + &self, + _block_id: BlockHashOrNumber, + ) -> ProviderResult>> { + Ok(None) + } + + fn transactions_by_block_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult>> { + Ok(Vec::default()) + } + + fn transactions_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn senders_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn transaction_sender(&self, _id: TxNumber) -> ProviderResult> { + Ok(None) + } +} + +impl ReceiptProvider for NoopProvider { + type Receipt = N::Receipt; + + fn receipt(&self, _id: TxNumber) -> ProviderResult> { + Ok(None) + } + + fn receipt_by_hash(&self, _hash: TxHash) -> ProviderResult> { + Ok(None) + } + + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { + Ok(None) + } + + fn receipts_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(vec![]) + } +} + +impl ReceiptProviderIdExt for NoopProvider {} + +impl HeaderProvider for NoopProvider { + type Header = N::BlockHeader; + + fn header(&self, _block_hash: &BlockHash) -> ProviderResult> { + Ok(None) + } + + fn header_by_number(&self, _num: u64) -> ProviderResult> { + Ok(None) + } + + fn header_td(&self, _hash: &BlockHash) -> ProviderResult> { + Ok(None) + } + + fn header_td_by_number(&self, _number: BlockNumber) -> ProviderResult> { + Ok(None) + } + + fn headers_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(vec![]) + } + + fn sealed_header( + &self, + _number: BlockNumber, + ) -> ProviderResult>> { + Ok(None) + } + + fn sealed_headers_while( + &self, + _range: impl RangeBounds, + _predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { + Ok(vec![]) + } +} + +impl AccountReader for NoopProvider { + fn basic_account(&self, _address: Address) -> ProviderResult> { + Ok(None) + } +} + +impl ChangeSetReader for NoopProvider { + fn account_block_changeset( + &self, + _block_number: BlockNumber, + ) -> ProviderResult> { + Ok(Vec::default()) + } +} + +impl StateRootProvider for NoopProvider { + fn state_root(&self, _state: HashedPostState) -> ProviderResult { + Ok(B256::default()) + } + + fn state_root_from_nodes(&self, _input: TrieInput) -> ProviderResult { + Ok(B256::default()) + } + + fn state_root_with_updates( + &self, + _state: HashedPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + Ok((B256::default(), TrieUpdates::default())) + } + + fn state_root_from_nodes_with_updates( + &self, + _input: TrieInput, + ) -> ProviderResult<(B256, TrieUpdates)> { + Ok((B256::default(), TrieUpdates::default())) + } +} + +impl StorageRootProvider for NoopProvider { + fn storage_root( + &self, + _address: Address, + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(B256::default()) + } + + fn storage_proof( + &self, + _address: Address, + slot: B256, + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(reth_trie::StorageProof::new(slot)) + } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(reth_trie::StorageMultiProof::empty()) + } +} + +impl StateProofProvider for NoopProvider { + fn proof( + &self, + _input: TrieInput, + address: Address, + _slots: &[B256], + ) -> ProviderResult { + Ok(AccountProof::new(address)) + } + + fn multiproof( + &self, + _input: TrieInput, + _targets: HashMap>, + ) -> ProviderResult { + Ok(MultiProof::default()) + } + + fn witness( + &self, + _input: TrieInput, + _target: HashedPostState, + ) -> ProviderResult> { + Ok(HashMap::default()) + } +} + +impl HashedPostStateProvider for NoopProvider { + fn hashed_post_state(&self, _bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } +} + +impl StateProvider for NoopProvider { + fn storage( + &self, + _account: Address, + _storage_key: StorageKey, + ) -> ProviderResult> { + Ok(None) + } + + fn bytecode_by_hash(&self, _code_hash: B256) -> ProviderResult> { + Ok(None) + } +} + +// impl EvmEnvProvider for NoopProvider { +// fn fill_env_at( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _block_env: &mut BlockEnv, +// _at: BlockHashOrNumber, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv
, +// { +// Ok(()) +// } +// +// fn fill_env_with_header( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _block_env: &mut BlockEnv, +// _header: &Header, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv
, +// { +// Ok(()) +// } +// +// fn fill_cfg_env_at( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _at: BlockHashOrNumber, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv
, +// { +// Ok(()) +// } +// +// fn fill_cfg_env_with_header( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _header: &Header, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv
, +// { +// Ok(()) +// } +// } + +impl StageCheckpointReader for NoopProvider { + fn get_stage_checkpoint(&self, _id: StageId) -> ProviderResult> { + Ok(None) + } + + fn get_stage_checkpoint_progress(&self, _id: StageId) -> ProviderResult>> { + Ok(None) + } + + fn get_all_checkpoints(&self) -> ProviderResult> { + Ok(Vec::new()) + } +} + +impl WithdrawalsProvider for NoopProvider { + fn withdrawals_by_block( + &self, + _id: BlockHashOrNumber, + _timestamp: u64, + ) -> ProviderResult> { + Ok(None) + } + fn latest_withdrawal(&self) -> ProviderResult> { + Ok(None) + } +} + +impl PruneCheckpointReader for NoopProvider { + fn get_prune_checkpoint( + &self, + _segment: PruneSegment, + ) -> ProviderResult> { + Ok(None) + } + + fn get_prune_checkpoints(&self) -> ProviderResult> { + Ok(Vec::new()) + } +} + +impl NodePrimitivesProvider for NoopProvider { + type Primitives = N; +} diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 0cb26d307434..dc53319f4c5f 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -8,7 +8,9 @@ use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue use auto_impl::auto_impl; use reth_primitives::Bytecode; use reth_storage_errors::provider::ProviderResult; +use reth_trie::HashedPostState; use reth_trie_db::StateCommitment; +use revm::db::states::BundleState; /// Type alias of boxed [`StateProvider`]. pub type StateProviderBox = Box; @@ -21,6 +23,7 @@ pub trait StateProvider: + StateRootProvider + StorageRootProvider + StateProofProvider + + HashedPostStateProvider + Send + Sync { @@ -83,11 +86,18 @@ pub trait StateProvider: } /// Trait implemented for database providers that can provide the [`StateCommitment`] type. -pub trait StateCommitmentProvider { +pub trait StateCommitmentProvider: Send + Sync { /// The [`StateCommitment`] type that can be used to perform state commitment operations. type StateCommitment: StateCommitment; } +/// Trait that provides the hashed state from various sources. +#[auto_impl(&, Arc, Box)] +pub trait HashedPostStateProvider: Send + Sync { + /// Returns the `HashedPostState` of the provided [`BundleState`]. + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState; +} + /// Trait implemented for database providers that can be converted into a historical state provider. pub trait TryIntoHistoricalStateProvider { /// Returns a historical [`StateProvider`] indexed by the given historic block number. diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index 59631365d603..d944b5eeeb61 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] tracing.workspace = true -tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "fmt", "json"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "fmt", "ansi", "json"] } tracing-appender.workspace = true tracing-journald = "0.3" tracing-logfmt = "0.3.3" diff --git a/crates/tracing/src/formatter.rs b/crates/tracing/src/formatter.rs index 1322377f1c9f..202a92136d26 100644 --- a/crates/tracing/src/formatter.rs +++ b/crates/tracing/src/formatter.rs @@ -54,7 +54,7 @@ impl LogFormat { .unwrap_or_else(|_| // If `RUST_LOG_TARGET` is not set, show target in logs only if the max enabled // level is higher than INFO (DEBUG, TRACE) - filter.max_level_hint().map_or(true, |max_level| max_level > tracing::Level::INFO)); + filter.max_level_hint().is_none_or(|max_level| max_level > tracing::Level::INFO)); match self { Self::Json => { diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 7c0f34765591..214633188167 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -52,7 +52,6 @@ bitflags.workspace = true auto_impl.workspace = true smallvec.workspace = true - # testing rand = { workspace = true, optional = true } paste = { workspace = true, optional = true } diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 212df34bd371..a9603215c834 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -196,15 +196,15 @@ impl LocalTransactionConfig { /// Returns whether the local addresses vector contains the given address. #[inline] - pub fn contains_local_address(&self, address: Address) -> bool { - self.local_addresses.contains(&address) + pub fn contains_local_address(&self, address: &Address) -> bool { + self.local_addresses.contains(address) } /// Returns whether the particular transaction should be considered local. /// /// This always returns false if the local exemptions are disabled. #[inline] - pub fn is_local(&self, origin: TransactionOrigin, sender: Address) -> bool { + pub fn is_local(&self, origin: TransactionOrigin, sender: &Address) -> bool { if self.no_local_exemptions() { return false } @@ -286,10 +286,10 @@ mod tests { let config = LocalTransactionConfig { local_addresses, ..Default::default() }; // Should contain the inserted address - assert!(config.contains_local_address(address)); + assert!(config.contains_local_address(&address)); // Should not contain another random address - assert!(!config.contains_local_address(Address::new([2; 20]))); + assert!(!config.contains_local_address(&Address::new([2; 20]))); } #[test] @@ -302,7 +302,7 @@ mod tests { }; // Should return false as no exemptions is set to true - assert!(!config.is_local(TransactionOrigin::Local, address)); + assert!(!config.is_local(TransactionOrigin::Local, &address)); } #[test] @@ -315,13 +315,13 @@ mod tests { LocalTransactionConfig { no_exemptions: false, local_addresses, ..Default::default() }; // Should return true as the transaction origin is local - assert!(config.is_local(TransactionOrigin::Local, Address::new([2; 20]))); - assert!(config.is_local(TransactionOrigin::Local, address)); + assert!(config.is_local(TransactionOrigin::Local, &Address::new([2; 20]))); + assert!(config.is_local(TransactionOrigin::Local, &address)); // Should return true as the address is in the local_addresses set - assert!(config.is_local(TransactionOrigin::External, address)); + assert!(config.is_local(TransactionOrigin::External, &address)); // Should return false as the address is not in the local_addresses set - assert!(!config.is_local(TransactionOrigin::External, Address::new([2; 20]))); + assert!(!config.is_local(TransactionOrigin::External, &Address::new([2; 20]))); } #[test] diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 02f218d4b098..fa7b75e34ad1 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -7,8 +7,10 @@ use crate::{ traits::{CanonicalStateUpdate, TransactionPool, TransactionPoolExt}, BlockInfo, PoolTransaction, PoolUpdateKind, }; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{Address, BlockHash, BlockNumber}; +use alloy_rlp::Encodable; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -18,8 +20,8 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives::{ - PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, - TransactionSignedEcRecovered, + transaction::SignedTransactionIntoRecoveredExt, PooledTransactionsElementEcRecovered, + SealedHeader, TransactionSigned, }; use reth_primitives_traits::SignedTransaction; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; @@ -78,7 +80,7 @@ pub fn maintain_transaction_pool_future( ) -> BoxFuture<'static, ()> where Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + Send + 'static, - P: TransactionPoolExt + 'static, + P: TransactionPoolExt> + 'static, St: Stream + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, { @@ -99,7 +101,7 @@ pub async fn maintain_transaction_pool( config: MaintainPoolConfig, ) where Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + Send + 'static, - P: TransactionPoolExt + 'static, + P: TransactionPoolExt> + 'static, St: Stream + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, { @@ -110,11 +112,13 @@ pub async fn maintain_transaction_pool( let latest = SealedHeader::seal(latest); let chain_spec = client.chain_spec(); let info = BlockInfo { - block_gas_limit: latest.gas_limit, + block_gas_limit: latest.gas_limit(), last_seen_block_hash: latest.hash(), - last_seen_block_number: latest.number, + last_seen_block_number: latest.number(), pending_basefee: latest - .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(latest.timestamp + 12)) + .next_block_base_fee( + chain_spec.base_fee_params_at_timestamp(latest.timestamp() + 12), + ) .unwrap_or_default(), pending_blob_fee: latest.next_block_blob_fee(), }; @@ -340,7 +344,7 @@ pub async fn maintain_transaction_pool(

::Transaction::from_pooled(tx.into()) }) } else { -

::Transaction::try_from_consensus(tx.into()).ok() +

::Transaction::try_from_consensus(tx).ok() } }) .collect::>(); @@ -461,7 +465,7 @@ impl FinalizedBlockTracker { let finalized = finalized_block?; self.last_finalized_block .replace(finalized) - .map_or(true, |last| last < finalized) + .is_none_or(|last| last < finalized) .then_some(finalized) } } @@ -557,7 +561,7 @@ async fn load_and_reinsert_transactions

( file_path: &Path, ) -> Result<(), TransactionsBackupError> where - P: TransactionPool, + P: TransactionPool>, { if !file_path.exists() { return Ok(()) @@ -570,14 +574,15 @@ where return Ok(()) } - let txs_signed: Vec = alloy_rlp::Decodable::decode(&mut data.as_slice())?; + let txs_signed: Vec<::Consensus> = + alloy_rlp::Decodable::decode(&mut data.as_slice())?; let pool_transactions = txs_signed .into_iter() .filter_map(|tx| tx.try_ecrecovered()) .filter_map(|tx| { // Filter out errors - ::try_from_consensus(tx.into()).ok() + ::try_from_consensus(tx).ok() }) .collect(); @@ -590,7 +595,7 @@ where fn save_local_txs_backup

(pool: P, file_path: &Path) where - P: TransactionPool, + P: TransactionPool>, { let local_transactions = pool.get_local_transactions(); if local_transactions.is_empty() { @@ -600,11 +605,7 @@ where let local_transactions = local_transactions .into_iter() - .map(|tx| { - let recovered: TransactionSignedEcRecovered = - tx.transaction.clone().into_consensus().into(); - recovered.into_signed() - }) + .map(|tx| tx.transaction.clone_into_consensus().into_signed()) .collect::>(); let num_txs = local_transactions.len(); @@ -644,7 +645,7 @@ pub async fn backup_local_transactions_task

( pool: P, config: LocalTransactionBackupConfig, ) where - P: TransactionPool + Clone, + P: TransactionPool> + Clone, { let Some(transactions_path) = config.transactions_path else { // nothing to do diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 171faccf7c2a..b770e3da4b0e 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,4 +1,5 @@ use crate::{ + error::{Eip4844PoolTransactionError, InvalidPoolTransactionError}, identifier::{SenderId, TransactionId}, pool::pending::PendingTransaction, PoolTransaction, TransactionOrdering, ValidPoolTransaction, @@ -6,7 +7,7 @@ use crate::{ use alloy_primitives::Address; use core::fmt; use reth_payload_util::PayloadTransactions; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{InvalidTransactionError, RecoveredTx}; use std::{ collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, @@ -27,8 +28,8 @@ pub(crate) struct BestTransactionsWithFees { } impl crate::traits::BestTransactions for BestTransactionsWithFees { - fn mark_invalid(&mut self, tx: &Self::Item) { - BestTransactions::mark_invalid(&mut self.best, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + BestTransactions::mark_invalid(&mut self.best, tx, kind) } fn no_updates(&mut self) { @@ -56,11 +57,15 @@ impl Iterator for BestTransactionsWithFees { if best.transaction.max_fee_per_gas() >= self.base_fee as u128 && best.transaction .max_fee_per_blob_gas() - .map_or(true, |fee| fee >= self.base_fee_per_blob_gas as u128) + .is_none_or(|fee| fee >= self.base_fee_per_blob_gas as u128) { return Some(best); } - crate::traits::BestTransactions::mark_invalid(self, &best); + crate::traits::BestTransactions::mark_invalid( + self, + &best, + InvalidPoolTransactionError::Underpriced, + ); } } } @@ -95,7 +100,11 @@ pub(crate) struct BestTransactions { impl BestTransactions { /// Mark the transaction and it's descendants as invalid. - pub(crate) fn mark_invalid(&mut self, tx: &Arc>) { + pub(crate) fn mark_invalid( + &mut self, + tx: &Arc>, + _kind: InvalidPoolTransactionError, + ) { self.invalid.insert(tx.sender_id()); } @@ -154,8 +163,8 @@ impl BestTransactions { } impl crate::traits::BestTransactions for BestTransactions { - fn mark_invalid(&mut self, tx: &Self::Item) { - Self::mark_invalid(self, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + Self::mark_invalid(self, tx, kind) } fn no_updates(&mut self) { @@ -199,7 +208,12 @@ impl Iterator for BestTransactions { if self.skip_blobs && best.transaction.transaction.is_eip4844() { // blobs should be skipped, marking them as invalid will ensure that no dependent // transactions are returned - self.mark_invalid(&best.transaction) + self.mark_invalid( + &best.transaction, + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::NoEip4844Blobs, + ), + ) } else { return Some(best.transaction) } @@ -212,7 +226,7 @@ impl Iterator for BestTransactions { #[derive(Debug)] pub struct BestPayloadTransactions where - T: PoolTransaction>, + T: PoolTransaction, I: Iterator>>, { invalid: HashSet

, @@ -221,7 +235,7 @@ where impl BestPayloadTransactions where - T: PoolTransaction>, + T: PoolTransaction, I: Iterator>>, { /// Create a new `BestPayloadTransactions` with the given iterator. @@ -232,16 +246,18 @@ where impl PayloadTransactions for BestPayloadTransactions where - T: PoolTransaction>, + T: PoolTransaction, I: Iterator>>, { - fn next(&mut self, _ctx: ()) -> Option { + type Transaction = T::Consensus; + + fn next(&mut self, _ctx: ()) -> Option> { loop { let tx = self.best.next()?; if self.invalid.contains(&tx.sender()) { continue } - return Some(tx.to_recovered_transaction()) + return Some(tx.to_consensus()) } } @@ -280,7 +296,10 @@ where if (self.predicate)(&best) { return Some(best) } - self.best.mark_invalid(&best); + self.best.mark_invalid( + &best, + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); } } } @@ -290,8 +309,8 @@ where I: crate::traits::BestTransactions, P: FnMut(&::Item) -> bool + Send, { - fn mark_invalid(&mut self, tx: &Self::Item) { - crate::traits::BestTransactions::mark_invalid(&mut self.best, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + crate::traits::BestTransactions::mark_invalid(&mut self.best, tx, kind) } fn no_updates(&mut self) { @@ -379,8 +398,8 @@ where I: crate::traits::BestTransactions>>, T: PoolTransaction, { - fn mark_invalid(&mut self, tx: &Self::Item) { - self.inner.mark_invalid(tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + self.inner.mark_invalid(tx, kind) } fn no_updates(&mut self) { @@ -450,7 +469,10 @@ mod tests { // mark the first tx as invalid let invalid = best.independent.iter().next().unwrap(); - best.mark_invalid(&invalid.transaction.clone()); + best.mark_invalid( + &invalid.transaction.clone(), + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); // iterator is empty assert!(best.next().is_none()); @@ -475,7 +497,11 @@ mod tests { > = Box::new(pool.best()); let tx = Iterator::next(&mut best).unwrap(); - crate::traits::BestTransactions::mark_invalid(&mut *best, &tx); + crate::traits::BestTransactions::mark_invalid( + &mut *best, + &tx, + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); assert!(Iterator::next(&mut best).is_none()); } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 86bf5f741c3d..c9d4e0a488e3 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1462,7 +1462,7 @@ impl AllTransactions { transaction: ValidPoolTransaction, on_chain_nonce: u64, ) -> Result, InsertErr> { - if !self.local_transactions_config.is_local(transaction.origin, transaction.sender()) { + if !self.local_transactions_config.is_local(transaction.origin, transaction.sender_ref()) { let current_txs = self.tx_counter.get(&transaction.sender_id()).copied().unwrap_or_default(); diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index afa1638c8516..0e8b26faf83b 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -26,7 +26,7 @@ use rand::{ }; use reth_primitives::{ transaction::TryFromRecoveredTransactionError, PooledTransactionsElementEcRecovered, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, + RecoveredTx, Transaction, TransactionSigned, TxType, }; use reth_primitives_traits::InMemorySize; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; @@ -592,15 +592,17 @@ impl MockTransaction { impl PoolTransaction for MockTransaction { type TryFromConsensusError = TryFromRecoveredTransactionError; - type Consensus = TransactionSignedEcRecovered; + type Consensus = TransactionSigned; type Pooled = PooledTransactionsElementEcRecovered; - fn try_from_consensus(tx: Self::Consensus) -> Result { + fn try_from_consensus( + tx: RecoveredTx, + ) -> Result { tx.try_into() } - fn into_consensus(self) -> Self::Consensus { + fn into_consensus(self) -> RecoveredTx { self.into() } @@ -609,7 +611,7 @@ impl PoolTransaction for MockTransaction { } fn try_consensus_into_pooled( - tx: Self::Consensus, + tx: RecoveredTx, ) -> Result { Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) } @@ -622,6 +624,10 @@ impl PoolTransaction for MockTransaction { *self.get_sender() } + fn sender_ref(&self) -> &Address { + self.get_sender() + } + fn nonce(&self) -> u64 { *self.get_nonce() } @@ -719,6 +725,16 @@ impl PoolTransaction for MockTransaction { } } + /// Returns true if the transaction is a contract creation. + fn is_create(&self) -> bool { + match self { + Self::Legacy { to, .. } | Self::Eip1559 { to, .. } | Self::Eip2930 { to, .. } => { + to.is_create() + } + Self::Eip4844 { .. } => false, + } + } + /// Returns the input data associated with the transaction. fn input(&self) -> &[u8] { self.get_input() @@ -794,10 +810,10 @@ impl EthPoolTransaction for MockTransaction { } } -impl TryFrom for MockTransaction { +impl TryFrom for MockTransaction { type Error = TryFromRecoveredTransactionError; - fn try_from(tx: TransactionSignedEcRecovered) -> Result { + fn try_from(tx: RecoveredTx) -> Result { let sender = tx.signer(); let transaction = tx.into_signed(); let hash = transaction.hash(); @@ -916,7 +932,7 @@ impl From for MockTransaction { } } -impl From for TransactionSignedEcRecovered { +impl From for RecoveredTx { fn from(tx: MockTransaction) -> Self { let signed_tx = TransactionSigned::new(tx.clone().into(), Signature::test_signature(), *tx.hash()); @@ -1019,11 +1035,9 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { arb::<(TransactionSigned, Address)>() .prop_map(|(signed_transaction, signer)| { - TransactionSignedEcRecovered::from_signed_transaction(signed_transaction, signer) + RecoveredTx::from_signed_transaction(signed_transaction, signer) .try_into() - .expect( - "Failed to create an Arbitrary MockTransaction via TransactionSignedEcRecovered", - ) + .expect("Failed to create an Arbitrary MockTransaction via RecoveredTx") }) .boxed() } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 8945d7139769..15f824e7d436 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -20,8 +20,7 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSigned, - TransactionSignedEcRecovered, + PooledTransactionsElementEcRecovered, RecoveredTx, SealedBlock, Transaction, TransactionSigned, }; use reth_primitives_traits::SignedTransaction; #[cfg(feature = "serde")] @@ -39,6 +38,9 @@ use tokio::sync::mpsc::Receiver; /// The `PeerId` type. pub type PeerId = alloy_primitives::B512; +/// Helper type alias to access [`PoolTransaction::Consensus`] for a given [`TransactionPool`]. +pub type PoolConsensusTx

= <

::Transaction as PoolTransaction>::Consensus; + /// General purpose abstraction of a transaction-pool. /// /// This is intended to be used by API-consumers such as RPC that need inject new incoming, @@ -577,18 +579,18 @@ pub struct AllPoolTransactions { // === impl AllPoolTransactions === impl AllPoolTransactions { - /// Returns an iterator over all pending [`TransactionSignedEcRecovered`] transactions. - pub fn pending_recovered(&self) -> impl Iterator + '_ { + /// Returns an iterator over all pending [`RecoveredTx`] transactions. + pub fn pending_recovered(&self) -> impl Iterator> + '_ { self.pending.iter().map(|tx| tx.transaction.clone().into()) } - /// Returns an iterator over all queued [`TransactionSignedEcRecovered`] transactions. - pub fn queued_recovered(&self) -> impl Iterator + '_ { + /// Returns an iterator over all queued [`RecoveredTx`] transactions. + pub fn queued_recovered(&self) -> impl Iterator> + '_ { self.queued.iter().map(|tx| tx.transaction.clone().into()) } /// Returns an iterator over all transactions, both pending and queued. - pub fn all(&self) -> impl Iterator + '_ { + pub fn all(&self) -> impl Iterator> + '_ { self.pending.iter().chain(self.queued.iter()).map(|tx| tx.transaction.clone().into()) } } @@ -806,7 +808,7 @@ pub trait BestTransactions: Iterator + Send { /// Implementers must ensure all subsequent transaction _don't_ depend on this transaction. /// In other words, this must remove the given transaction _and_ drain all transaction that /// depend on it. - fn mark_invalid(&mut self, transaction: &Self::Item); + fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError); /// An iterator may be able to receive additional pending transactions that weren't present it /// the pool when it was created. @@ -868,8 +870,8 @@ impl BestTransactions for Box where T: BestTransactions + ?Sized, { - fn mark_invalid(&mut self, transaction: &Self::Item) { - (**self).mark_invalid(transaction); + fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError) { + (**self).mark_invalid(transaction, kind) } fn no_updates(&mut self) { @@ -887,7 +889,7 @@ where /// A no-op implementation that yields no transactions. impl BestTransactions for std::iter::Empty { - fn mark_invalid(&mut self, _tx: &T) {} + fn mark_invalid(&mut self, _tx: &T, _kind: InvalidPoolTransactionError) {} fn no_updates(&mut self) {} @@ -964,23 +966,39 @@ impl BestTransactionsAttributes { /// This distinction is necessary for the EIP-4844 blob transactions, which require an additional /// sidecar when they are gossiped around the network. It is expected that the `Consensus` format is /// a subset of the `Pooled` format. -pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { +pub trait PoolTransaction: + fmt::Debug + + Send + + Sync + + Clone + + TryFrom, Error = Self::TryFromConsensusError> + + Into> +{ /// Associated error type for the `try_from_consensus` method. type TryFromConsensusError: fmt::Display; /// Associated type representing the raw consensus variant of the transaction. - type Consensus: From + TryInto; + type Consensus; /// Associated type representing the recovered pooled variant of the transaction. type Pooled: Encodable2718 + Into; /// Define a method to convert from the `Consensus` type to `Self` - fn try_from_consensus(tx: Self::Consensus) -> Result { + fn try_from_consensus( + tx: RecoveredTx, + ) -> Result { tx.try_into() } + /// Clone the transaction into a consensus variant. + /// + /// This method is preferred when the [`PoolTransaction`] already wraps the consensus variant. + fn clone_into_consensus(&self) -> RecoveredTx { + self.clone().into_consensus() + } + /// Define a method to convert from the `Self` type to `Consensus` - fn into_consensus(self) -> Self::Consensus { + fn into_consensus(self) -> RecoveredTx { self.into() } @@ -996,7 +1014,7 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Tries to convert the `Consensus` type into the `Pooled` type. fn try_consensus_into_pooled( - tx: Self::Consensus, + tx: RecoveredTx, ) -> Result; /// Hash of the transaction. @@ -1005,6 +1023,9 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// The Sender of the transaction. fn sender(&self) -> Address; + /// Reference to the Sender of the transaction. + fn sender_ref(&self) -> &Address; + /// Returns the nonce for this transaction. fn nonce(&self) -> u64; @@ -1054,6 +1075,11 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// [`TxKind::Create`] if the transaction is a contract creation. fn kind(&self) -> TxKind; + /// Returns true if the transaction is a contract creation. + /// We don't provide a default implementation via `kind` as it copies the 21-byte + /// [`TxKind`] for this simple check. A proper implementation shouldn't allocate. + fn is_create(&self) -> bool; + /// Returns the recipient of the transaction if it is not a [`TxKind::Create`] /// transaction. fn to(&self) -> Option

{ @@ -1102,7 +1128,7 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { &self, max_init_code_size: usize, ) -> Result<(), InvalidPoolTransactionError> { - if self.kind().is_create() && self.input().len() > max_init_code_size { + if self.is_create() && self.input().len() > max_init_code_size { Err(InvalidPoolTransactionError::ExceedsMaxInitCodeSize( self.size(), max_init_code_size, @@ -1120,9 +1146,6 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Ethereum pool. pub trait EthPoolTransaction: PoolTransaction< - Consensus: From - + Into - + Into, Pooled: From + Into + Into, @@ -1154,10 +1177,10 @@ pub trait EthPoolTransaction: /// The default [`PoolTransaction`] for the [Pool](crate::Pool) for Ethereum. /// -/// This type is essentially a wrapper around [`TransactionSignedEcRecovered`] with additional +/// This type is essentially a wrapper around [`RecoveredTx`] with additional /// fields derived from the transaction that are frequently used by the pools for ordering. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct EthPooledTransaction { +pub struct EthPooledTransaction { /// `EcRecovered` transaction, the consensus format. pub(crate) transaction: T, @@ -1180,7 +1203,7 @@ impl EthPooledTransaction { /// /// Caution: In case of blob transactions, this does marks the blob sidecar as /// [`EthBlobTransactionSidecar::Missing`] - pub fn new(transaction: TransactionSignedEcRecovered, encoded_length: usize) -> Self { + pub fn new(transaction: RecoveredTx, encoded_length: usize) -> Self { let mut blob_sidecar = EthBlobTransactionSidecar::None; let gas_cost = U256::from(transaction.transaction.max_fee_per_gas()) @@ -1203,7 +1226,7 @@ impl EthPooledTransaction { } /// Return the reference to the underlying transaction. - pub const fn transaction(&self) -> &TransactionSignedEcRecovered { + pub const fn transaction(&self) -> &RecoveredTx { &self.transaction } } @@ -1212,12 +1235,12 @@ impl EthPooledTransaction { impl From for EthPooledTransaction { fn from(tx: PooledTransactionsElementEcRecovered) -> Self { let encoded_length = tx.encode_2718_len(); - let (tx, signer) = tx.into_components(); + let (tx, signer) = tx.to_components(); match tx { PooledTransactionsElement::BlobTransaction(tx) => { // include the blob sidecar let (tx, blob) = tx.into_parts(); - let tx = TransactionSignedEcRecovered::from_signed_transaction(tx, signer); + let tx = RecoveredTx::from_signed_transaction(tx, signer); let mut pooled = Self::new(tx, encoded_length); pooled.blob_sidecar = EthBlobTransactionSidecar::Present(blob); pooled @@ -1233,12 +1256,16 @@ impl From for EthPooledTransaction { impl PoolTransaction for EthPooledTransaction { type TryFromConsensusError = TryFromRecoveredTransactionError; - type Consensus = TransactionSignedEcRecovered; + type Consensus = TransactionSigned; type Pooled = PooledTransactionsElementEcRecovered; + fn clone_into_consensus(&self) -> RecoveredTx { + self.transaction().clone() + } + fn try_consensus_into_pooled( - tx: Self::Consensus, + tx: RecoveredTx, ) -> Result { Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) } @@ -1253,6 +1280,11 @@ impl PoolTransaction for EthPooledTransaction { self.transaction.signer() } + /// Returns a reference to the Sender of the transaction. + fn sender_ref(&self) -> &Address { + self.transaction.signer_ref() + } + /// Returns the nonce for this transaction. fn nonce(&self) -> u64 { self.transaction.nonce() @@ -1317,6 +1349,11 @@ impl PoolTransaction for EthPooledTransaction { self.transaction.kind() } + /// Returns true if the transaction is a contract creation. + fn is_create(&self) -> bool { + self.transaction.is_create() + } + fn input(&self) -> &[u8] { self.transaction.input() } @@ -1385,10 +1422,10 @@ impl EthPoolTransaction for EthPooledTransaction { } } -impl TryFrom for EthPooledTransaction { +impl TryFrom for EthPooledTransaction { type Error = TryFromRecoveredTransactionError; - fn try_from(tx: TransactionSignedEcRecovered) -> Result { + fn try_from(tx: RecoveredTx) -> Result { // ensure we can handle the transaction type and its format match tx.tx_type() as u8 { 0..=EIP1559_TX_TYPE_ID | EIP7702_TX_TYPE_ID => { @@ -1412,7 +1449,7 @@ impl TryFrom for EthPooledTransaction { } } -impl From for TransactionSignedEcRecovered { +impl From for RecoveredTx { fn from(tx: EthPooledTransaction) -> Self { tx.transaction } @@ -1624,8 +1661,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly @@ -1646,8 +1682,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly @@ -1668,8 +1703,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly @@ -1692,8 +1726,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 300); // Check that the pooled transaction is created correctly @@ -1716,8 +1749,7 @@ mod tests { }); let signature = Signature::test_signature(); let signed_tx = TransactionSigned::new_unhashed(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index ca7452225755..e3b7af736cd8 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -263,7 +263,7 @@ where // Drop non-local transactions with a fee lower than the configured fee for acceptance into // the pool. - if !self.local_transactions_config.is_local(origin, transaction.sender()) && + if !self.local_transactions_config.is_local(origin, transaction.sender_ref()) && transaction.is_eip1559() && transaction.max_priority_fee_per_gas() < self.minimum_priority_fee { @@ -815,7 +815,7 @@ pub fn ensure_intrinsic_gas( let gas_after_merge = validate_initial_tx_gas( spec_id, transaction.input(), - transaction.kind().is_create(), + transaction.is_create(), transaction.access_list().map(|list| list.0.as_slice()).unwrap_or(&[]), transaction.authorization_count() as u64, ); diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 35e3a85537ee..84caae6e7caa 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -9,7 +9,7 @@ use crate::{ use alloy_eips::eip4844::BlobTransactionSidecar; use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; -use reth_primitives::{SealedBlock, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, SealedBlock}; use std::{fmt, future::Future, time::Instant}; mod constants; @@ -282,6 +282,11 @@ impl ValidPoolTransaction { self.transaction.sender() } + /// Returns a reference to the address of the sender + pub fn sender_ref(&self) -> &Address { + self.transaction.sender_ref() + } + /// Returns the recipient of the transaction if it is not a CREATE transaction. pub fn to(&self) -> Option
{ self.transaction.to() @@ -375,6 +380,13 @@ impl ValidPoolTransaction { self.is_eip4844() != other.is_eip4844() } + /// Converts to this type into the consensus transaction of the pooled transaction. + /// + /// Note: this takes `&self` since indented usage is via `Arc`. + pub fn to_consensus(&self) -> RecoveredTx { + self.transaction.clone_into_consensus() + } + /// Determines whether a candidate transaction (`maybe_replacement`) is underpriced compared to /// an existing transaction in the pool. /// @@ -428,15 +440,6 @@ impl ValidPoolTransaction { } } -impl>> ValidPoolTransaction { - /// Converts to this type into a [`TransactionSignedEcRecovered`]. - /// - /// Note: this takes `&self` since indented usage is via `Arc`. - pub fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { - self.transaction.clone().into_consensus().into() - } -} - #[cfg(test)] impl Clone for ValidPoolTransaction { fn clone(&self) -> Self { diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 8b0d930b0c2c..73fce5f8e7bd 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -17,12 +17,14 @@ alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-trie.workspace = true alloy-consensus.workspace = true -alloy-genesis.workspace = true - reth-primitives-traits.workspace = true reth-codecs.workspace = true revm-primitives.workspace = true +alloy-genesis.workspace = true +alloy-rpc-types-eth = { workspace = true, optional = true } +alloy-serde = { workspace = true, optional = true } + bytes.workspace = true derive_more.workspace = true itertools.workspace = true @@ -54,6 +56,10 @@ serde_json.workspace = true serde_with.workspace = true [features] +eip1186 = [ + "dep:alloy-rpc-types-eth", + "dep:alloy-serde", +] serde = [ "dep:serde", "bytes/serde", @@ -61,6 +67,7 @@ serde = [ "alloy-primitives/serde", "alloy-consensus/serde", "alloy-trie/serde", + "alloy-rpc-types-eth?/serde", "revm-primitives/serde", "reth-primitives-traits/serde", "reth-codecs/serde" @@ -79,14 +86,16 @@ test-utils = [ "reth-codecs/test-utils", ] arbitrary = [ - "alloy-trie/arbitrary", - "dep:arbitrary", - "reth-primitives-traits/arbitrary", - "alloy-consensus/arbitrary", - "alloy-primitives/arbitrary", - "nybbles/arbitrary", - "revm-primitives/arbitrary", - "reth-codecs/arbitrary", + "alloy-trie/arbitrary", + "dep:arbitrary", + "alloy-serde/arbitrary", + "reth-primitives-traits/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "nybbles/arbitrary", + "revm-primitives/arbitrary", + "reth-codecs/arbitrary", + "alloy-rpc-types-eth?/arbitrary" ] [[bench]] diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 78659116c3e2..517f9fb7ca8d 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -190,6 +190,33 @@ pub struct AccountProof { pub storage_proofs: Vec, } +#[cfg(feature = "eip1186")] +impl AccountProof { + /// Convert into an EIP-1186 account proof response + pub fn into_eip1186_response( + self, + slots: Vec, + ) -> alloy_rpc_types_eth::EIP1186AccountProofResponse { + let info = self.info.unwrap_or_default(); + alloy_rpc_types_eth::EIP1186AccountProofResponse { + address: self.address, + balance: info.balance, + code_hash: info.get_bytecode_hash(), + nonce: info.nonce, + storage_hash: self.storage_root, + account_proof: self.proof, + storage_proof: self + .storage_proofs + .into_iter() + .filter_map(|proof| { + let input_slot = slots.iter().find(|s| s.as_b256() == proof.key)?; + Some(proof.into_eip1186_proof(*input_slot)) + }) + .collect(), + } + } +} + impl Default for AccountProof { fn default() -> Self { Self::new(Address::default()) @@ -244,6 +271,17 @@ pub struct StorageProof { pub proof: Vec, } +impl StorageProof { + /// Convert into an EIP-1186 storage proof + #[cfg(feature = "eip1186")] + pub fn into_eip1186_proof( + self, + slot: alloy_serde::JsonStorageKey, + ) -> alloy_rpc_types_eth::EIP1186StorageProof { + alloy_rpc_types_eth::EIP1186StorageProof { key: slot, value: self.value, proof: self.proof } + } +} + impl StorageProof { /// Create new storage proof from the storage slot. pub fn new(key: B256) -> Self { diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index ac8c3b05304c..95ff6d91f374 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{keccak256, BlockNumber, B256}; +use alloy_primitives::{BlockNumber, B256}; use derive_more::Deref; use reth_db::tables; use reth_db_api::{ @@ -10,25 +10,34 @@ use reth_db_api::{ use reth_primitives::StorageEntry; use reth_trie::{ prefix_set::{PrefixSetMut, TriePrefixSets}, - Nibbles, + KeyHasher, Nibbles, }; use std::{ collections::{HashMap, HashSet}, + marker::PhantomData, ops::RangeInclusive, }; /// A wrapper around a database transaction that loads prefix sets within a given block range. -#[derive(Deref, Debug)] -pub struct PrefixSetLoader<'a, TX>(&'a TX); +#[derive(Debug)] +pub struct PrefixSetLoader<'a, TX, KH>(&'a TX, PhantomData); -impl<'a, TX> PrefixSetLoader<'a, TX> { +impl<'a, TX, KH> PrefixSetLoader<'a, TX, KH> { /// Create a new loader. pub const fn new(tx: &'a TX) -> Self { - Self(tx) + Self(tx, PhantomData) } } -impl PrefixSetLoader<'_, TX> { +impl Deref for PrefixSetLoader<'_, TX, KH> { + type Target = TX; + + fn deref(&self) -> &Self::Target { + self.0 + } +} + +impl PrefixSetLoader<'_, TX, KH> { /// Load all account and storage changes for the given block range. pub fn load(self, range: RangeInclusive) -> Result { // Initialize prefix sets. @@ -41,7 +50,7 @@ impl PrefixSetLoader<'_, TX> { let mut account_hashed_state_cursor = self.cursor_read::()?; for account_entry in account_changeset_cursor.walk_range(range.clone())? { let (_, AccountBeforeTx { address, .. }) = account_entry?; - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); account_prefix_set.insert(Nibbles::unpack(hashed_address)); if account_hashed_state_cursor.seek_exact(hashed_address)?.is_none() { @@ -55,12 +64,12 @@ impl PrefixSetLoader<'_, TX> { let storage_range = BlockNumberAddress::range(range); for storage_entry in storage_cursor.walk_range(storage_range)? { let (BlockNumberAddress((_, address)), StorageEntry { key, .. }) = storage_entry?; - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); account_prefix_set.insert(Nibbles::unpack(hashed_address)); storage_prefix_sets .entry(hashed_address) .or_default() - .insert(Nibbles::unpack(keccak256(key))); + .insert(Nibbles::unpack(KH::hash_key(key))); } Ok(TriePrefixSets { diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 6e2cea5051d0..5aaf3ebe5b0f 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -1,5 +1,5 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory, PrefixSetLoader}; -use alloy_primitives::{keccak256, Address, BlockNumber, B256, U256}; +use alloy_primitives::{Address, BlockNumber, B256, U256}; use reth_db::tables; use reth_db_api::{ cursor::DbCursorRO, @@ -10,7 +10,8 @@ use reth_execution_errors::StateRootError; use reth_storage_errors::db::DatabaseError; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, - updates::TrieUpdates, HashedPostState, HashedStorage, StateRoot, StateRootProgress, TrieInput, + updates::TrieUpdates, HashedPostState, HashedStorage, KeccakKeyHasher, KeyHasher, StateRoot, + StateRootProgress, TrieInput, }; use std::{collections::HashMap, ops::RangeInclusive}; use tracing::debug; @@ -122,7 +123,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { pub trait DatabaseHashedPostState: Sized { /// Initializes [`HashedPostState`] from reverts. Iterates over state reverts from the specified /// block up to the current tip and aggregates them into hashed state in reverse. - fn from_reverts(tx: &TX, from: BlockNumber) -> Result; + fn from_reverts(tx: &TX, from: BlockNumber) -> Result; } impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> @@ -136,7 +137,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> tx: &'a TX, range: RangeInclusive, ) -> Result { - let loaded_prefix_sets = PrefixSetLoader::new(tx).load(range)?; + let loaded_prefix_sets = PrefixSetLoader::<_, KeccakKeyHasher>::new(tx).load(range)?; Ok(Self::from_tx(tx).with_prefix_sets(loaded_prefix_sets)) } @@ -216,7 +217,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } impl DatabaseHashedPostState for HashedPostState { - fn from_reverts(tx: &TX, from: BlockNumber) -> Result { + fn from_reverts(tx: &TX, from: BlockNumber) -> Result { // Iterate over account changesets and record value before first occurring account change. let mut accounts = HashMap::new(); let mut account_changesets_cursor = tx.cursor_read::()?; @@ -237,19 +238,19 @@ impl DatabaseHashedPostState for HashedPostState { } let hashed_accounts = - accounts.into_iter().map(|(address, info)| (keccak256(address), info)).collect(); + accounts.into_iter().map(|(address, info)| (KH::hash_key(address), info)).collect(); let hashed_storages = storages .into_iter() .map(|(address, storage)| { ( - keccak256(address), + KH::hash_key(address), HashedStorage::from_iter( // The `wiped` flag indicates only whether previous storage entries // should be looked up in db or not. For reverts it's a noop since all // wiped changes had been written as storage reverts. false, - storage.into_iter().map(|(slot, value)| (keccak256(slot), value)), + storage.into_iter().map(|(slot, value)| (KH::hash_key(slot), value)), ), ) }) @@ -265,6 +266,7 @@ mod tests { use alloy_primitives::{hex, map::HashMap, Address, U256}; use reth_db::test_utils::create_test_rw_db; use reth_db_api::database::Database; + use reth_trie::KeccakKeyHasher; use revm::{db::BundleState, primitives::AccountInfo}; #[test] @@ -285,7 +287,7 @@ mod tests { .build(); assert_eq!(bundle_state.reverts.len(), 1); - let post_state = HashedPostState::from_bundle_state(&bundle_state.state); + let post_state = HashedPostState::from_bundle_state::(&bundle_state.state); assert_eq!(post_state.accounts.len(), 2); assert_eq!(post_state.storages.len(), 2); diff --git a/crates/trie/db/tests/walker.rs b/crates/trie/db/tests/walker.rs index 06355ff6d489..0e0b094920b1 100644 --- a/crates/trie/db/tests/walker.rs +++ b/crates/trie/db/tests/walker.rs @@ -63,13 +63,14 @@ where // We're traversing the path in lexicographical order. for expected in expected { - let got = walker.advance().unwrap(); + walker.advance().unwrap(); + let got = walker.key().cloned(); assert_eq!(got.unwrap(), Nibbles::from_nibbles_unchecked(expected.clone())); } // There should be 8 paths traversed in total from 3 branches. - let got = walker.advance().unwrap(); - assert!(got.is_none()); + walker.advance().unwrap(); + assert!(walker.key().is_none()); } #[test] diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index f285079f2526..f90a53fa99a8 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -9,6 +9,7 @@ use reth_db::DatabaseError; use reth_execution_errors::StorageRootError; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, + StateCommitmentProvider, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -53,7 +54,12 @@ impl ParallelProof { impl ParallelProof where - Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, + Factory: DatabaseProviderFactory + + StateCommitmentProvider + + Clone + + Send + + Sync + + 'static, { /// Generate a state multiproof according to specified targets. pub fn multiproof( @@ -203,7 +209,11 @@ where account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - storages.insert(hashed_address, storage_multiproof); + + // We might be adding leaves that are not necessarily our proof targets. + if targets.contains_key(&hashed_address) { + storages.insert(hashed_address, storage_multiproof); + } } } } diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index 8d2b18f5e111..e66d1f782132 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -8,6 +8,7 @@ use reth_db::DatabaseError; use reth_execution_errors::StorageRootError; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, + StateCommitmentProvider, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -58,7 +59,12 @@ impl ParallelStateRoot { impl ParallelStateRoot where - Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, + Factory: DatabaseProviderFactory + + StateCommitmentProvider + + Clone + + Send + + Sync + + 'static, { /// Calculate incremental state root in parallel. pub fn incremental_root(self) -> Result { diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index efd68020ccd7..09826e410847 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -15,6 +15,7 @@ workspace = true [dependencies] # reth reth-primitives-traits.workspace = true +reth-execution-errors.workspace = true reth-trie-common.workspace = true reth-tracing.workspace = true @@ -28,9 +29,9 @@ thiserror.workspace = true [dev-dependencies] reth-primitives-traits = { workspace = true, features = ["arbitrary"] } -reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-testing-utils.workspace = true arbitrary.workspace = true assert_matches.workspace = true diff --git a/crates/trie/sparse/src/blinded.rs b/crates/trie/sparse/src/blinded.rs new file mode 100644 index 000000000000..22471cf99ffd --- /dev/null +++ b/crates/trie/sparse/src/blinded.rs @@ -0,0 +1,65 @@ +//! Traits and default implementations related to retrieval of blinded trie nodes. + +use alloy_primitives::{Bytes, B256}; +use reth_execution_errors::SparseTrieError; +use reth_trie_common::Nibbles; + +/// Factory for instantiating blinded node providers. +pub trait BlindedProviderFactory { + /// Type capable of fetching blinded account nodes. + type AccountNodeProvider: BlindedProvider; + /// Type capable of fetching blinded storage nodes. + type StorageNodeProvider: BlindedProvider; + + /// Returns blinded account node provider. + fn account_node_provider(&self) -> Self::AccountNodeProvider; + + /// Returns blinded storage node provider. + fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider; +} + +/// Trie node provider for retrieving blinded nodes. +pub trait BlindedProvider { + /// The error type for the provider. + type Error: Into; + + /// Retrieve blinded node by path. + fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error>; +} + +/// Default blinded node provider factory that creates [`DefaultBlindedProvider`]. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +pub struct DefaultBlindedProviderFactory; + +impl BlindedProviderFactory for DefaultBlindedProviderFactory { + type AccountNodeProvider = DefaultBlindedProvider; + type StorageNodeProvider = DefaultBlindedProvider; + + fn account_node_provider(&self) -> Self::AccountNodeProvider { + DefaultBlindedProvider + } + + fn storage_node_provider(&self, _account: B256) -> Self::StorageNodeProvider { + DefaultBlindedProvider + } +} + +/// Default blinded node provider that always returns `Ok(None)`. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +pub struct DefaultBlindedProvider; + +impl BlindedProvider for DefaultBlindedProvider { + type Error = SparseTrieError; + + fn blinded_node(&mut self, _path: Nibbles) -> Result, Self::Error> { + Ok(None) + } +} + +/// Right pad the path with 0s and return as [`B256`]. +#[inline] +pub fn pad_path_to_key(path: &Nibbles) -> B256 { + let mut padded = path.pack(); + padded.resize(32, 0); + B256::from_slice(&padded) +} diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs deleted file mode 100644 index a38a92395d9b..000000000000 --- a/crates/trie/sparse/src/errors.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! Errors for sparse trie. - -use alloy_primitives::{Bytes, B256}; -use reth_trie_common::Nibbles; -use thiserror::Error; - -use crate::SparseNode; - -/// Result type with [`SparseStateTrieError`] as error. -pub type SparseStateTrieResult = Result; - -/// Error encountered in [`crate::SparseStateTrie`]. -#[derive(Error, Debug)] -pub enum SparseStateTrieError { - /// Encountered invalid root node. - #[error("invalid root node at {path:?}: {node:?}")] - InvalidRootNode { - /// Path to first proof node. - path: Nibbles, - /// Encoded first proof node. - node: Bytes, - }, - /// Sparse trie error. - #[error(transparent)] - Sparse(#[from] SparseTrieError), - /// RLP error. - #[error(transparent)] - Rlp(#[from] alloy_rlp::Error), -} - -/// Result type with [`SparseTrieError`] as error. -pub type SparseTrieResult = Result; - -/// Error encountered in [`crate::SparseTrie`]. -#[derive(Error, Debug)] -pub enum SparseTrieError { - /// Sparse trie is still blind. Thrown on attempt to update it. - #[error("sparse trie is blind")] - Blind, - /// Encountered blinded node on update. - #[error("attempted to update blind node at {path:?}: {hash}")] - BlindedNode { - /// Blind node path. - path: Nibbles, - /// Node hash - hash: B256, - }, - /// Encountered unexpected node at path when revealing. - #[error("encountered an invalid node at path {path:?} when revealing: {node:?}")] - Reveal { - /// Path to the node. - path: Nibbles, - /// Node that was at the path when revealing. - node: Box, - }, - /// RLP error. - #[error(transparent)] - Rlp(#[from] alloy_rlp::Error), -} diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index b3cb2c5fdffa..1a0f3f73648e 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -6,5 +6,11 @@ pub use state::*; mod trie; pub use trie::*; -mod errors; -pub use errors::*; +pub mod blinded; + +/// Re-export sparse trie error types. +pub mod errors { + pub use reth_execution_errors::{ + SparseStateTrieError, SparseStateTrieResult, SparseTrieError, SparseTrieResult, + }; +} diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 1cd13273e649..9b4b38002511 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,25 +1,30 @@ use crate::{ - RevealedSparseTrie, SparseStateTrieError, SparseStateTrieResult, SparseTrie, SparseTrieError, + blinded::{BlindedProvider, BlindedProviderFactory, DefaultBlindedProviderFactory}, + RevealedSparseTrie, SparseTrie, }; use alloy_primitives::{ + hex, map::{HashMap, HashSet}, Bytes, B256, }; use alloy_rlp::{Decodable, Encodable}; +use reth_execution_errors::{SparseStateTrieError, SparseStateTrieResult, SparseTrieError}; use reth_primitives_traits::Account; +use reth_tracing::tracing::trace; use reth_trie_common::{ updates::{StorageTrieUpdates, TrieUpdates}, MultiProof, Nibbles, TrieAccount, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use std::iter::Peekable; +use std::{fmt, iter::Peekable}; /// Sparse state trie representing lazy-loaded Ethereum state trie. -#[derive(Debug)] -pub struct SparseStateTrie { +pub struct SparseStateTrie { + /// Blinded node provider factory. + provider_factory: F, /// Sparse account trie. - state: SparseTrie, + state: SparseTrie, /// Sparse storage tries. - storages: HashMap, + storages: HashMap>, /// Collection of revealed account and storage keys. revealed: HashMap>, /// Flag indicating whether trie updates should be retained. @@ -34,17 +39,44 @@ impl Default for SparseStateTrie { state: Default::default(), storages: Default::default(), revealed: Default::default(), + provider_factory: Default::default(), retain_updates: false, account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), } } } +impl fmt::Debug for SparseStateTrie

{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SparseStateTrie") + .field("state", &self.state) + .field("storages", &self.storages) + .field("revealed", &self.revealed) + .field("retain_updates", &self.retain_updates) + .field("account_rlp_buf", &hex::encode(&self.account_rlp_buf)) + .finish_non_exhaustive() + } +} + impl SparseStateTrie { /// Create state trie from state trie. pub fn from_state(state: SparseTrie) -> Self { Self { state, ..Default::default() } } +} + +impl SparseStateTrie { + /// Create new [`SparseStateTrie`] with blinded node provider factory. + pub fn new(provider_factory: F) -> Self { + Self { + provider_factory, + state: Default::default(), + storages: Default::default(), + revealed: Default::default(), + retain_updates: false, + account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), + } + } /// Set the retention of branch node updates and deletions. pub const fn with_updates(mut self, retain_updates: bool) -> Self { @@ -62,8 +94,11 @@ impl SparseStateTrie { self.revealed.get(account).is_some_and(|slots| slots.contains(slot)) } - /// Returned mutable reference to storage sparse trie if it was revealed. - pub fn storage_trie_mut(&mut self, account: &B256) -> Option<&mut RevealedSparseTrie> { + /// Returns mutable reference to storage sparse trie if it was revealed. + pub fn storage_trie_mut( + &mut self, + account: &B256, + ) -> Option<&mut RevealedSparseTrie> { self.storages.get_mut(account).and_then(|e| e.as_revealed_mut()) } @@ -83,7 +118,11 @@ impl SparseStateTrie { let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self.state.reveal_root(root_node, self.retain_updates)?; + let trie = self.state.reveal_root_with_provider( + self.provider_factory.account_node_provider(), + root_node, + self.retain_updates, + )?; // Reveal the remaining proof nodes. for (path, bytes) in proof { @@ -114,11 +153,11 @@ impl SparseStateTrie { let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self - .storages - .entry(account) - .or_default() - .reveal_root(root_node, self.retain_updates)?; + let trie = self.storages.entry(account).or_default().reveal_root_with_provider( + self.provider_factory.storage_node_provider(account), + root_node, + self.retain_updates, + )?; // Reveal the remaining proof nodes. for (path, bytes) in proof { @@ -144,11 +183,16 @@ impl SparseStateTrie { if let Some(root_node) = self.validate_root_node(&mut account_nodes)? { // Reveal root node if it wasn't already. - let trie = self.state.reveal_root(root_node, self.retain_updates)?; + let trie = self.state.reveal_root_with_provider( + self.provider_factory.account_node_provider(), + root_node, + self.retain_updates, + )?; // Reveal the remaining proof nodes. for (path, bytes) in account_nodes { let node = TrieNode::decode(&mut &bytes[..])?; + trace!(target: "trie::sparse", ?path, ?node, "Revealing account node"); trie.reveal_node(path, node)?; } } @@ -159,15 +203,16 @@ impl SparseStateTrie { if let Some(root_node) = self.validate_root_node(&mut storage_nodes)? { // Reveal root node if it wasn't already. - let trie = self - .storages - .entry(account) - .or_default() - .reveal_root(root_node, self.retain_updates)?; + let trie = self.storages.entry(account).or_default().reveal_root_with_provider( + self.provider_factory.storage_node_provider(account), + root_node, + self.retain_updates, + )?; // Reveal the remaining proof nodes. for (path, bytes) in storage_nodes { let node = TrieNode::decode(&mut &bytes[..])?; + trace!(target: "trie::sparse", ?account, ?path, ?node, "Revealing storage node"); trie.reveal_node(path, node)?; } } @@ -202,37 +247,6 @@ impl SparseStateTrie { Ok(Some(root_node)) } - /// Update or remove trie account based on new account info. This method will either recompute - /// the storage root based on update storage trie or look it up from existing leaf value. - /// - /// If the new account info and storage trie are empty, the account leaf will be removed. - pub fn update_account(&mut self, address: B256, account: Account) -> SparseStateTrieResult<()> { - let nibbles = Nibbles::unpack(address); - let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { - storage_trie.root().ok_or(SparseTrieError::Blind)? - } else if self.revealed.contains_key(&address) { - let state = self.state.as_revealed_mut().ok_or(SparseTrieError::Blind)?; - // The account was revealed, either... - if let Some(value) = state.get_leaf_value(&nibbles) { - // ..it exists and we should take it's current storage root or... - TrieAccount::decode(&mut &value[..])?.storage_root - } else { - // ...the account is newly created and the storage trie is empty. - EMPTY_ROOT_HASH - } - } else { - return Err(SparseTrieError::Blind.into()) - }; - - if account.is_empty() && storage_root == EMPTY_ROOT_HASH { - self.remove_account_leaf(&nibbles) - } else { - self.account_rlp_buf.clear(); - TrieAccount::from((account, storage_root)).encode(&mut self.account_rlp_buf); - self.update_account_leaf(nibbles, self.account_rlp_buf.clone()) - } - } - /// Update the account leaf node. pub fn update_account_leaf( &mut self, @@ -243,12 +257,6 @@ impl SparseStateTrie { Ok(()) } - /// Remove the account leaf node. - pub fn remove_account_leaf(&mut self, path: &Nibbles) -> SparseStateTrieResult<()> { - self.state.remove_leaf(path)?; - Ok(()) - } - /// Update the leaf node of a storage trie at the provided address. pub fn update_storage_leaf( &mut self, @@ -256,18 +264,11 @@ impl SparseStateTrie { slot: Nibbles, value: Vec, ) -> SparseStateTrieResult<()> { - self.storages.entry(address).or_default().update_leaf(slot, value)?; - Ok(()) - } - - /// Update the leaf node of a storage trie at the provided address. - pub fn remove_storage_leaf( - &mut self, - address: B256, - slot: &Nibbles, - ) -> SparseStateTrieResult<()> { - self.storages.entry(address).or_default().remove_leaf(slot)?; - Ok(()) + if let Some(storage_trie) = self.storages.get_mut(&address) { + Ok(storage_trie.update_leaf(slot, value)?) + } else { + Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) + } } /// Wipe the storage trie at the provided address. @@ -322,6 +323,67 @@ impl SparseStateTrie { } } +impl SparseStateTrie +where + F: BlindedProviderFactory, + SparseTrieError: From<::Error> + + From<::Error>, +{ + /// Update or remove trie account based on new account info. This method will either recompute + /// the storage root based on update storage trie or look it up from existing leaf value. + /// + /// If the new account info and storage trie are empty, the account leaf will be removed. + pub fn update_account(&mut self, address: B256, account: Account) -> SparseStateTrieResult<()> { + let nibbles = Nibbles::unpack(address); + let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { + trace!(target: "trie::sparse", ?address, "Calculating storage root to update account"); + storage_trie.root().ok_or(SparseTrieError::Blind)? + } else if self.revealed.contains_key(&address) { + trace!(target: "trie::sparse", ?address, "Retrieving storage root from account leaf to update account"); + let state = self.state.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + // The account was revealed, either... + if let Some(value) = state.get_leaf_value(&nibbles) { + // ..it exists and we should take it's current storage root or... + TrieAccount::decode(&mut &value[..])?.storage_root + } else { + // ...the account is newly created and the storage trie is empty. + EMPTY_ROOT_HASH + } + } else { + return Err(SparseTrieError::Blind.into()) + }; + + if account.is_empty() && storage_root == EMPTY_ROOT_HASH { + trace!(target: "trie::sparse", ?address, "Removing account"); + self.remove_account_leaf(&nibbles) + } else { + trace!(target: "trie::sparse", ?address, "Updating account"); + self.account_rlp_buf.clear(); + TrieAccount::from((account, storage_root)).encode(&mut self.account_rlp_buf); + self.update_account_leaf(nibbles, self.account_rlp_buf.clone()) + } + } + + /// Remove the account leaf node. + pub fn remove_account_leaf(&mut self, path: &Nibbles) -> SparseStateTrieResult<()> { + self.state.remove_leaf(path)?; + Ok(()) + } + + /// Update the leaf node of a storage trie at the provided address. + pub fn remove_storage_leaf( + &mut self, + address: B256, + slot: &Nibbles, + ) -> SparseStateTrieResult<()> { + if let Some(storage_trie) = self.storages.get_mut(&address) { + Ok(storage_trie.remove_leaf(slot)?) + } else { + Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -331,10 +393,7 @@ mod tests { use assert_matches::assert_matches; use rand::{rngs::StdRng, Rng, SeedableRng}; use reth_primitives_traits::Account; - use reth_trie::{ - updates::StorageTrieUpdates, BranchNodeCompact, HashBuilder, TrieAccount, TrieMask, - EMPTY_ROOT_HASH, - }; + use reth_trie::{updates::StorageTrieUpdates, HashBuilder, TrieAccount, EMPTY_ROOT_HASH}; use reth_trie_common::proof::ProofRetainer; #[test] @@ -480,49 +539,15 @@ mod tests { pretty_assertions::assert_eq!( sparse_updates, TrieUpdates { - account_nodes: HashMap::from_iter([ - ( - Nibbles::default(), - BranchNodeCompact { - state_mask: TrieMask::new(0b110), - tree_mask: TrieMask::new(0b000), - hash_mask: TrieMask::new(0b010), - hashes: vec![b256!( - "4c4ffbda3569fcf2c24ea2000b4cec86ef8b92cbf9ff415db43184c0f75a212e" - )], - root_hash: Some(b256!( - "60944bd29458529c3065d19f63c6e3d5269596fd3b04ca2e7b318912dc89ca4c" - )) - }, - ), - ]), - storage_tries: HashMap::from_iter([ - ( - b256!("1000000000000000000000000000000000000000000000000000000000000000"), - StorageTrieUpdates { - is_deleted: false, - storage_nodes: HashMap::from_iter([( - Nibbles::default(), - BranchNodeCompact { - state_mask: TrieMask::new(0b110), - tree_mask: TrieMask::new(0b000), - hash_mask: TrieMask::new(0b010), - hashes: vec![b256!("5bc8b4fdf51839c1e18b8d6a4bd3e2e52c9f641860f0e4d197b68c2679b0e436")], - root_hash: Some(b256!("c44abf1a9e1a92736ac479b20328e8d7998aa8838b6ef52620324c9ce85e3201")) - } - )]), - removed_nodes: HashSet::default() - } - ), - ( - b256!("1100000000000000000000000000000000000000000000000000000000000000"), - StorageTrieUpdates { - is_deleted: true, - storage_nodes: HashMap::default(), - removed_nodes: HashSet::default() - } - ) - ]), + account_nodes: HashMap::default(), + storage_tries: HashMap::from_iter([( + b256!("1100000000000000000000000000000000000000000000000000000000000000"), + StorageTrieUpdates { + is_deleted: true, + storage_nodes: HashMap::default(), + removed_nodes: HashSet::default() + } + )]), removed_nodes: HashSet::default() } ); diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 97446680df44..df5dd25486c1 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,11 +1,12 @@ -use crate::{SparseTrieError, SparseTrieResult}; +use crate::blinded::{BlindedProvider, DefaultBlindedProvider}; use alloy_primitives::{ hex, keccak256, map::{HashMap, HashSet}, B256, }; use alloy_rlp::Decodable; -use reth_tracing::tracing::debug; +use reth_execution_errors::{SparseTrieError, SparseTrieResult}; +use reth_tracing::tracing::trace; use reth_trie_common::{ prefix_set::{PrefixSet, PrefixSetMut}, BranchNodeCompact, BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, RlpNode, TrieMask, @@ -16,28 +17,62 @@ use std::{borrow::Cow, fmt}; /// Inner representation of the sparse trie. /// Sparse trie is blind by default until nodes are revealed. -#[derive(PartialEq, Eq, Default, Debug)] -pub enum SparseTrie { +#[derive(PartialEq, Eq)] +pub enum SparseTrie

{ /// None of the trie nodes are known. - #[default] Blind, /// The trie nodes have been revealed. - Revealed(Box), + Revealed(Box>), +} + +impl

fmt::Debug for SparseTrie

{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Blind => write!(f, "Blind"), + Self::Revealed(revealed) => write!(f, "Revealed({revealed:?})"), + } + } +} + +impl

Default for SparseTrie

{ + fn default() -> Self { + Self::Blind + } } impl SparseTrie { + /// Creates new blind trie. + pub const fn blind() -> Self { + Self::Blind + } + /// Creates new revealed empty trie. pub fn revealed_empty() -> Self { Self::Revealed(Box::default()) } + /// Reveals the root node if the trie is blinded. + /// + /// # Returns + /// + /// Mutable reference to [`RevealedSparseTrie`]. + pub fn reveal_root( + &mut self, + root: TrieNode, + retain_updates: bool, + ) -> SparseTrieResult<&mut RevealedSparseTrie> { + self.reveal_root_with_provider(Default::default(), root, retain_updates) + } +} + +impl

SparseTrie

{ /// Returns `true` if the sparse trie has no revealed nodes. pub const fn is_blind(&self) -> bool { matches!(self, Self::Blind) } /// Returns mutable reference to revealed sparse trie if the trie is not blind. - pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie> { + pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie

> { if let Self::Revealed(revealed) = self { Some(revealed) } else { @@ -50,13 +85,18 @@ impl SparseTrie { /// # Returns /// /// Mutable reference to [`RevealedSparseTrie`]. - pub fn reveal_root( + pub fn reveal_root_with_provider( &mut self, + provider: P, root: TrieNode, retain_updates: bool, - ) -> SparseTrieResult<&mut RevealedSparseTrie> { + ) -> SparseTrieResult<&mut RevealedSparseTrie

> { if self.is_blind() { - *self = Self::Revealed(Box::new(RevealedSparseTrie::from_root(root, retain_updates)?)) + *self = Self::Revealed(Box::new(RevealedSparseTrie::from_provider_and_root( + provider, + root, + retain_updates, + )?)) } Ok(self.as_revealed_mut().unwrap()) } @@ -68,13 +108,6 @@ impl SparseTrie { Ok(()) } - /// Remove the leaf node. - pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { - let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; - revealed.remove_leaf(path)?; - Ok(()) - } - /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. pub fn wipe(&mut self) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; @@ -93,6 +126,19 @@ impl SparseTrie { } } +impl

SparseTrie

+where + P: BlindedProvider, + SparseTrieError: From, +{ + /// Remove the leaf node. + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.remove_leaf(path)?; + Ok(()) + } +} + /// The representation of revealed sparse trie. /// /// ## Invariants @@ -102,39 +148,42 @@ impl SparseTrie { /// The opposite is also true. /// - All keys in `values` collection are full leaf paths. #[derive(Clone, PartialEq, Eq)] -pub struct RevealedSparseTrie { +pub struct RevealedSparseTrie

{ + /// Blinded node provider. + provider: P, /// All trie nodes. nodes: HashMap, /// All leaf values. values: HashMap>, /// Prefix set. prefix_set: PrefixSetMut, - /// Reusable buffer for RLP encoding of nodes. - rlp_buf: Vec, /// Retained trie updates. updates: Option, + /// Reusable buffer for RLP encoding of nodes. + rlp_buf: Vec, } -impl fmt::Debug for RevealedSparseTrie { +impl

fmt::Debug for RevealedSparseTrie

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RevealedSparseTrie") .field("nodes", &self.nodes) .field("values", &self.values) .field("prefix_set", &self.prefix_set) - .field("rlp_buf", &hex::encode(&self.rlp_buf)) .field("updates", &self.updates) - .finish() + .field("rlp_buf", &hex::encode(&self.rlp_buf)) + .finish_non_exhaustive() } } impl Default for RevealedSparseTrie { fn default() -> Self { Self { + provider: Default::default(), nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), values: HashMap::default(), prefix_set: PrefixSetMut::default(), - rlp_buf: Vec::new(), updates: None, + rlp_buf: Vec::new(), } } } @@ -143,6 +192,28 @@ impl RevealedSparseTrie { /// Create new revealed sparse trie from the given root node. pub fn from_root(node: TrieNode, retain_updates: bool) -> SparseTrieResult { let mut this = Self { + provider: Default::default(), + nodes: HashMap::default(), + values: HashMap::default(), + prefix_set: PrefixSetMut::default(), + rlp_buf: Vec::new(), + updates: None, + } + .with_updates(retain_updates); + this.reveal_node(Nibbles::default(), node)?; + Ok(this) + } +} + +impl

RevealedSparseTrie

{ + /// Create new revealed sparse trie from the given root node. + pub fn from_provider_and_root( + provider: P, + node: TrieNode, + retain_updates: bool, + ) -> SparseTrieResult { + let mut this = Self { + provider, nodes: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), @@ -154,6 +225,18 @@ impl RevealedSparseTrie { Ok(this) } + /// Set new blinded node provider on sparse trie. + pub fn with_provider(self, provider: BP) -> RevealedSparseTrie { + RevealedSparseTrie { + provider, + nodes: self.nodes, + values: self.values, + prefix_set: self.prefix_set, + updates: self.updates, + rlp_buf: self.rlp_buf, + } + } + /// Set the retention of branch node updates and deletions. pub fn with_updates(mut self, retain_updates: bool) -> Self { if retain_updates { @@ -357,176 +440,6 @@ impl RevealedSparseTrie { Ok(()) } - /// Remove leaf node from the trie. - pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { - self.prefix_set.insert(path.clone()); - self.values.remove(path); - - // If the path wasn't present in `values`, we still need to walk the trie and ensure that - // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry - // in `nodes`, but not in the `values`. - - // If the path wasn't present in `values`, we still need to walk the trie and ensure that - // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry - // in `nodes`, but not in the `values`. - - let mut removed_nodes = self.take_nodes_for_path(path)?; - debug!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); - // Pop the first node from the stack which is the leaf node we want to remove. - let mut child = removed_nodes.pop().expect("leaf exists"); - #[cfg(debug_assertions)] - { - let mut child_path = child.path.clone(); - let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; - child_path.extend_from_slice_unchecked(key); - assert_eq!(&child_path, path); - } - - // If we don't have any other removed nodes, insert an empty node at the root. - if removed_nodes.is_empty() { - debug_assert!(self.nodes.is_empty()); - self.nodes.insert(Nibbles::default(), SparseNode::Empty); - - return Ok(()) - } - - // Walk the stack of removed nodes from the back and re-insert them back into the trie, - // adjusting the node type as needed. - while let Some(removed_node) = removed_nodes.pop() { - let removed_path = removed_node.path; - - let new_node = match &removed_node.node { - SparseNode::Empty => return Err(SparseTrieError::Blind), - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { path: removed_path, hash: *hash }) - } - SparseNode::Leaf { .. } => { - unreachable!("we already popped the leaf node") - } - SparseNode::Extension { key, .. } => { - // If the node is an extension node, we need to look at its child to see if we - // need to merge them. - match &child.node { - SparseNode::Empty => return Err(SparseTrieError::Blind), - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { - path: child.path, - hash: *hash, - }) - } - // For a leaf node, we collapse the extension node into a leaf node, - // extending the key. While it's impossible to encounter an extension node - // followed by a leaf node in a complete trie, it's possible here because we - // could have downgraded the extension node's child into a leaf node from - // another node type. - SparseNode::Leaf { key: leaf_key, .. } => { - self.nodes.remove(&child.path); - - let mut new_key = key.clone(); - new_key.extend_from_slice_unchecked(leaf_key); - SparseNode::new_leaf(new_key) - } - // For an extension node, we collapse them into one extension node, - // extending the key - SparseNode::Extension { key: extension_key, .. } => { - self.nodes.remove(&child.path); - - let mut new_key = key.clone(); - new_key.extend_from_slice_unchecked(extension_key); - SparseNode::new_ext(new_key) - } - // For a branch node, we just leave the extension node as-is. - SparseNode::Branch { .. } => removed_node.node, - } - } - SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { - // If the node is a branch node, we need to check the number of children left - // after deleting the child at the given nibble. - - if let Some(removed_nibble) = removed_node.unset_branch_nibble { - state_mask.unset_bit(removed_nibble); - } - - // If only one child is left set in the branch node, we need to collapse it. - if state_mask.count_bits() == 1 { - let child_nibble = - state_mask.first_set_bit_index().expect("state mask is not empty"); - - // Get full path of the only child node left. - let mut child_path = removed_path.clone(); - child_path.push_unchecked(child_nibble); - - // Remove the only child node. - let child = self.nodes.get(&child_path).unwrap(); - - debug!(target: "trie::sparse", ?removed_path, ?child_path, ?child, "Branch node has only one child"); - - let mut delete_child = false; - let new_node = match child { - SparseNode::Empty => return Err(SparseTrieError::Blind), - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { - path: child_path, - hash: *hash, - }) - } - // If the only child is a leaf node, we downgrade the branch node into a - // leaf node, prepending the nibble to the key, and delete the old - // child. - SparseNode::Leaf { key, .. } => { - delete_child = true; - - let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); - new_key.extend_from_slice_unchecked(key); - SparseNode::new_leaf(new_key) - } - // If the only child node is an extension node, we downgrade the branch - // node into an even longer extension node, prepending the nibble to the - // key, and delete the old child. - SparseNode::Extension { key, .. } => { - delete_child = true; - - let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); - new_key.extend_from_slice_unchecked(key); - SparseNode::new_ext(new_key) - } - // If the only child is a branch node, we downgrade the current branch - // node into a one-nibble extension node. - SparseNode::Branch { .. } => { - SparseNode::new_ext(Nibbles::from_nibbles_unchecked([child_nibble])) - } - }; - - if delete_child { - self.nodes.remove(&child_path); - } - - if let Some(updates) = self.updates.as_mut() { - updates.removed_nodes.insert(removed_path.clone()); - } - - new_node - } - // If more than one child is left set in the branch, we just re-insert it - // as-is. - else { - SparseNode::new_branch(state_mask) - } - } - }; - - child = RemovedSparseNode { - path: removed_path.clone(), - node: new_node.clone(), - unset_branch_nibble: None, - }; - debug!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); - self.nodes.insert(removed_path, new_node); - } - - Ok(()) - } - /// Traverse trie nodes down to the leaf node and collect all nodes along the path. fn take_nodes_for_path(&mut self, path: &Nibbles) -> SparseTrieResult> { let mut current = Nibbles::default(); // Start traversal from the root @@ -561,7 +474,13 @@ impl RevealedSparseTrie { { let mut current = current.clone(); current.extend_from_slice_unchecked(key); - assert!(path.starts_with(¤t)); + assert!( + path.starts_with(¤t), + "path: {:?}, current: {:?}, key: {:?}", + path, + current, + key + ); } let path = current.clone(); @@ -570,7 +489,14 @@ impl RevealedSparseTrie { } SparseNode::Branch { state_mask, .. } => { let nibble = path[current.len()]; - debug_assert!(state_mask.is_bit_set(nibble)); + debug_assert!( + state_mask.is_bit_set(nibble), + "current: {:?}, path: {:?}, nibble: {:?}, state_mask: {:?}", + current, + path, + nibble, + state_mask + ); // If the branch node has a child that is a leaf node that we're removing, // we need to unset this nibble. @@ -608,10 +534,10 @@ impl RevealedSparseTrie { /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. pub fn wipe(&mut self) { - let updates_retained = self.updates.is_some(); - *self = Self::default(); + self.nodes = HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]); + self.values = HashMap::default(); self.prefix_set = PrefixSetMut::all(); - self.updates = updates_retained.then(SparseTrieUpdates::wiped); + self.updates = self.updates.is_some().then(SparseTrieUpdates::wiped); } /// Return the root of the sparse trie. @@ -836,7 +762,11 @@ impl RevealedSparseTrie { let rlp_node = branch_node_ref.rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - let store_in_db_trie_value = if let Some(updates) = self.updates.as_mut() { + // Save a branch node update only if it's not a root node, and we need to + // persist updates. + let store_in_db_trie_value = if let Some(updates) = + self.updates.as_mut().filter(|_| !path.is_empty()) + { let mut tree_mask_values = tree_mask_values.into_iter().rev(); let mut hash_mask_values = hash_mask_values.into_iter().rev(); let mut tree_mask = TrieMask::default(); @@ -888,6 +818,195 @@ impl RevealedSparseTrie { } } +impl

RevealedSparseTrie

+where + P: BlindedProvider, + SparseTrieError: From, +{ + /// Remove leaf node from the trie. + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + if self.values.remove(path).is_none() { + if let Some(SparseNode::Hash(hash)) = self.nodes.get(path) { + // Leaf is present in the trie, but it's blinded. + return Err(SparseTrieError::BlindedNode { path: path.clone(), hash: *hash }) + } + + // Leaf is not present in the trie. + return Ok(()) + } + self.prefix_set.insert(path.clone()); + + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. + + let mut removed_nodes = self.take_nodes_for_path(path)?; + trace!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); + // Pop the first node from the stack which is the leaf node we want to remove. + let mut child = removed_nodes.pop().expect("leaf exists"); + #[cfg(debug_assertions)] + { + let mut child_path = child.path.clone(); + let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; + child_path.extend_from_slice_unchecked(key); + assert_eq!(&child_path, path); + } + + // If we don't have any other removed nodes, insert an empty node at the root. + if removed_nodes.is_empty() { + debug_assert!(self.nodes.is_empty()); + self.nodes.insert(Nibbles::default(), SparseNode::Empty); + + return Ok(()) + } + + // Walk the stack of removed nodes from the back and re-insert them back into the trie, + // adjusting the node type as needed. + while let Some(removed_node) = removed_nodes.pop() { + let removed_path = removed_node.path; + + let new_node = match &removed_node.node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: removed_path, hash: *hash }) + } + SparseNode::Leaf { .. } => { + unreachable!("we already popped the leaf node") + } + SparseNode::Extension { key, .. } => { + // If the node is an extension node, we need to look at its child to see if we + // need to merge them. + match &child.node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { + path: child.path, + hash: *hash, + }) + } + // For a leaf node, we collapse the extension node into a leaf node, + // extending the key. While it's impossible to encounter an extension node + // followed by a leaf node in a complete trie, it's possible here because we + // could have downgraded the extension node's child into a leaf node from + // another node type. + SparseNode::Leaf { key: leaf_key, .. } => { + self.nodes.remove(&child.path); + + let mut new_key = key.clone(); + new_key.extend_from_slice_unchecked(leaf_key); + SparseNode::new_leaf(new_key) + } + // For an extension node, we collapse them into one extension node, + // extending the key + SparseNode::Extension { key: extension_key, .. } => { + self.nodes.remove(&child.path); + + let mut new_key = key.clone(); + new_key.extend_from_slice_unchecked(extension_key); + SparseNode::new_ext(new_key) + } + // For a branch node, we just leave the extension node as-is. + SparseNode::Branch { .. } => removed_node.node, + } + } + SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { + // If the node is a branch node, we need to check the number of children left + // after deleting the child at the given nibble. + + if let Some(removed_nibble) = removed_node.unset_branch_nibble { + state_mask.unset_bit(removed_nibble); + } + + // If only one child is left set in the branch node, we need to collapse it. + if state_mask.count_bits() == 1 { + let child_nibble = + state_mask.first_set_bit_index().expect("state mask is not empty"); + + // Get full path of the only child node left. + let mut child_path = removed_path.clone(); + child_path.push_unchecked(child_nibble); + + trace!(target: "trie::sparse", ?removed_path, ?child_path, ?child, "Branch node has only one child"); + + if self.nodes.get(&child_path).unwrap().is_hash() { + trace!(target: "trie::sparse", ?child_path, "Retrieving remaining blinded branch child"); + if let Some(node) = self.provider.blinded_node(child_path.clone())? { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!(target: "trie::sparse", ?child_path, ?decoded, "Revealing remaining blinded branch child"); + self.reveal_node(child_path.clone(), decoded)?; + } + } + + // Get the only child node. + let child = self.nodes.get(&child_path).unwrap(); + + let mut delete_child = false; + let new_node = match child { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { + path: child_path, + hash: *hash, + }) + } + // If the only child is a leaf node, we downgrade the branch node into a + // leaf node, prepending the nibble to the key, and delete the old + // child. + SparseNode::Leaf { key, .. } => { + delete_child = true; + + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend_from_slice_unchecked(key); + SparseNode::new_leaf(new_key) + } + // If the only child node is an extension node, we downgrade the branch + // node into an even longer extension node, prepending the nibble to the + // key, and delete the old child. + SparseNode::Extension { key, .. } => { + delete_child = true; + + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend_from_slice_unchecked(key); + SparseNode::new_ext(new_key) + } + // If the only child is a branch node, we downgrade the current branch + // node into a one-nibble extension node. + SparseNode::Branch { .. } => { + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([child_nibble])) + } + }; + + if delete_child { + self.nodes.remove(&child_path); + } + + if let Some(updates) = self.updates.as_mut() { + updates.removed_nodes.insert(removed_path.clone()); + } + + new_node + } + // If more than one child is left set in the branch, we just re-insert it + // as-is. + else { + SparseNode::new_branch(state_mask) + } + } + }; + + child = RemovedSparseNode { + path: removed_path.clone(), + node: new_node.clone(), + unset_branch_nibble: None, + }; + trace!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); + self.nodes.insert(removed_path, new_node); + } + + Ok(()) + } +} + /// Enum representing sparse trie node type. #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum SparseNodeType { @@ -994,6 +1113,11 @@ impl SparseNode { pub const fn new_leaf(key: Nibbles) -> Self { Self::Leaf { key, hash: None } } + + /// Returns `true` if the node is a hash node. + pub const fn is_hash(&self) -> bool { + matches!(self, Self::Hash(_)) + } } #[derive(Debug)] @@ -1059,6 +1183,7 @@ mod tests { hashed_cursor::{noop::NoopHashedAccountCursor, HashedPostStateAccountCursor}, node_iter::{TrieElement, TrieNodeIter}, trie_cursor::noop::NoopAccountTrieCursor, + updates::TrieUpdates, walker::TrieWalker, BranchNode, ExtensionNode, HashedPostState, LeafNode, TrieAccount, }; @@ -1088,8 +1213,9 @@ mod tests { /// Returns the state root and the retained proof nodes. fn run_hash_builder( state: impl IntoIterator + Clone, + destroyed_accounts: HashSet, proof_targets: impl IntoIterator, - ) -> HashBuilder { + ) -> (B256, TrieUpdates, ProofNodes) { let mut account_rlp = Vec::new(); let mut hash_builder = HashBuilder::default() @@ -1127,9 +1253,14 @@ mod tests { } } } - hash_builder.root(); + let root = hash_builder.root(); + let proof_nodes = hash_builder.take_proof_nodes(); - hash_builder + let mut trie_updates = TrieUpdates::default(); + let removed_keys = node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, destroyed_accounts); + + (root, trie_updates, proof_nodes) } /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. @@ -1177,7 +1308,7 @@ mod tests { #[test] fn sparse_trie_is_blind() { - assert!(SparseTrie::default().is_blind()); + assert!(SparseTrie::blind().is_blind()); assert!(!SparseTrie::revealed_empty().is_blind()); } @@ -1191,16 +1322,17 @@ mod tests { account_rlp }; - let mut hash_builder = run_hash_builder([(key.clone(), value())], [key.clone()]); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = + run_hash_builder([(key.clone(), value())], Default::default(), [key.clone()]); let mut sparse = RevealedSparseTrie::default().with_updates(true); sparse.update_leaf(key, value_encoded()).unwrap(); let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -1215,8 +1347,9 @@ mod tests { account_rlp }; - let mut hash_builder = run_hash_builder( + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(value)), + Default::default(), paths.clone(), ); @@ -1227,9 +1360,9 @@ mod tests { let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -1242,8 +1375,9 @@ mod tests { account_rlp }; - let mut hash_builder = run_hash_builder( + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(value)), + Default::default(), paths.clone(), ); @@ -1254,9 +1388,9 @@ mod tests { let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -1277,8 +1411,9 @@ mod tests { account_rlp }; - let mut hash_builder = run_hash_builder( + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(value)), + Default::default(), paths.clone(), ); @@ -1289,12 +1424,12 @@ mod tests { let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_root, hash_builder_root); pretty_assertions::assert_eq!( BTreeMap::from_iter(sparse_updates.updated_nodes), - BTreeMap::from_iter(hash_builder.updated_branch_nodes.take().unwrap()) + BTreeMap::from_iter(hash_builder_updates.account_nodes) ); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -1313,8 +1448,9 @@ mod tests { account_rlp }; - let mut hash_builder = run_hash_builder( + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(|| old_value)), + Default::default(), paths.clone(), ); @@ -1325,12 +1461,13 @@ mod tests { let sparse_root = sparse.root(); let sparse_updates = sparse.updates_ref(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); - let mut hash_builder = run_hash_builder( + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(|| new_value)), + Default::default(), paths.clone(), ); @@ -1340,9 +1477,9 @@ mod tests { let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -1617,6 +1754,36 @@ mod tests { ); } + #[test] + fn sparse_trie_remove_leaf_non_existent() { + let leaf = LeafNode::new( + Nibbles::default(), + alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), + ); + let branch = TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )); + + let mut sparse = RevealedSparseTrie::from_root(branch.clone(), false).unwrap(); + + // Reveal a branch node and one of its children + // + // Branch (Mask = 11) + // ├── 0 -> Hash (Path = 0) + // └── 1 -> Leaf (Path = 1) + sparse.reveal_node(Nibbles::default(), branch).unwrap(); + sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf)).unwrap(); + + // Removing a non-existent leaf should be a noop + let sparse_old = sparse.clone(); + assert_matches!(sparse.remove_leaf(&Nibbles::from_nibbles([0x2])), Ok(())); + assert_eq!(sparse, sparse_old); + } + #[allow(clippy::type_complexity)] #[test] fn sparse_trie_fuzz() { @@ -1647,21 +1814,22 @@ mod tests { // Insert state updates into the hash builder and calculate the root state.extend(update); - let mut hash_builder = - run_hash_builder(state.clone(), state.keys().cloned().collect::>()); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = + run_hash_builder( + state.clone(), + Default::default(), + state.keys().cloned().collect::>(), + ); // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_root, hash_builder_root); // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( sparse_updates.updated_nodes, - hash_builder.updated_branch_nodes.take().unwrap() + hash_builder_updates.account_nodes ); // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes( - &updated_sparse, - hash_builder.take_proof_nodes(), - ); + assert_eq_sparse_trie_proof_nodes(&updated_sparse, hash_builder_proof_nodes); // Delete some keys from both the hash builder and the sparse trie and check // that the sparse trie root still matches the hash builder root @@ -1677,21 +1845,22 @@ mod tests { let sparse_root = updated_sparse.root(); let sparse_updates = updated_sparse.take_updates(); - let mut hash_builder = - run_hash_builder(state.clone(), state.keys().cloned().collect::>()); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes) = + run_hash_builder( + state.clone(), + Default::default(), + state.keys().cloned().collect::>(), + ); // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_root, hash_builder_root); // Assert that the sparse trie updates match the hash builder updates pretty_assertions::assert_eq!( sparse_updates.updated_nodes, - hash_builder.updated_branch_nodes.take().unwrap() + hash_builder_updates.account_nodes ); // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes( - &updated_sparse, - hash_builder.take_proof_nodes(), - ); + assert_eq_sparse_trie_proof_nodes(&updated_sparse, hash_builder_proof_nodes); } } } @@ -1757,19 +1926,21 @@ mod tests { }; // Generate the proof for the root node and initialize the sparse trie with it - let proof_nodes = - run_hash_builder([(key1(), value()), (key3(), value())], [Nibbles::default()]) - .take_proof_nodes(); + let (_, _, hash_builder_proof_nodes) = run_hash_builder( + [(key1(), value()), (key3(), value())], + Default::default(), + [Nibbles::default()], + ); let mut sparse = RevealedSparseTrie::from_root( - TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), false, ) .unwrap(); // Generate the proof for the first key and reveal it in the sparse trie - let proof_nodes = - run_hash_builder([(key1(), value()), (key3(), value())], [key1()]).take_proof_nodes(); - for (path, node) in proof_nodes.nodes_sorted() { + let (_, _, hash_builder_proof_nodes) = + run_hash_builder([(key1(), value()), (key3(), value())], Default::default(), [key1()]); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1789,9 +1960,9 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let proof_nodes_3 = - run_hash_builder([(key1(), value()), (key3(), value())], [key3()]).take_proof_nodes(); - for (path, node) in proof_nodes_3.nodes_sorted() { + let (_, _, hash_builder_proof_nodes) = + run_hash_builder([(key1(), value()), (key3(), value())], Default::default(), [key3()]); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1803,13 +1974,13 @@ mod tests { // Generate the nodes for the full trie with all three key using the hash builder, and // compare them to the sparse trie - let proof_nodes = run_hash_builder( + let (_, _, hash_builder_proof_nodes) = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), [key1(), key2(), key3()], - ) - .take_proof_nodes(); + ); - assert_eq_sparse_trie_proof_nodes(&sparse, proof_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } /// We have three leaves: 0x0000, 0x0101, and 0x0102. Hash builder trie has all nodes, and we @@ -1830,25 +2001,25 @@ mod tests { let value = || Account::default(); // Generate the proof for the root node and initialize the sparse trie with it - let proof_nodes = run_hash_builder( + let (_, _, hash_builder_proof_nodes) = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), [Nibbles::default()], - ) - .take_proof_nodes(); + ); let mut sparse = RevealedSparseTrie::from_root( - TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), false, ) .unwrap(); // Generate the proof for the children of the root branch node and reveal it in the sparse // trie - let proof_nodes = run_hash_builder( + let (_, _, hash_builder_proof_nodes) = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), [key1(), Nibbles::from_nibbles_unchecked([0x01])], - ) - .take_proof_nodes(); - for (path, node) in proof_nodes.nodes_sorted() { + ); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1868,10 +2039,12 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let proof_nodes = - run_hash_builder([(key1(), value()), (key2(), value()), (key3(), value())], [key2()]) - .take_proof_nodes(); - for (path, node) in proof_nodes.nodes_sorted() { + let (_, _, hash_builder_proof_nodes) = run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), + [key2()], + ); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1903,11 +2076,13 @@ mod tests { }; // Generate the proof for the root node and initialize the sparse trie with it - let proof_nodes = - run_hash_builder([(key1(), value()), (key2(), value())], [Nibbles::default()]) - .take_proof_nodes(); + let (_, _, hash_builder_proof_nodes) = run_hash_builder( + [(key1(), value()), (key2(), value())], + Default::default(), + [Nibbles::default()], + ); let mut sparse = RevealedSparseTrie::from_root( - TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), false, ) .unwrap(); @@ -1928,9 +2103,9 @@ mod tests { ); // Generate the proof for the first key and reveal it in the sparse trie - let proof_nodes = - run_hash_builder([(key1(), value()), (key2(), value())], [key1()]).take_proof_nodes(); - for (path, node) in proof_nodes.nodes_sorted() { + let (_, _, hash_builder_proof_nodes) = + run_hash_builder([(key1(), value()), (key2(), value())], Default::default(), [key1()]); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -2025,16 +2200,19 @@ mod tests { account_rlp }; - let mut hash_builder = - run_hash_builder([(key1(), value()), (key2(), value())], [Nibbles::default()]); + let (hash_builder_root, hash_builder_updates, _) = run_hash_builder( + [(key1(), value()), (key2(), value())], + Default::default(), + [Nibbles::default()], + ); let mut sparse = RevealedSparseTrie::default(); sparse.update_leaf(key1(), value_encoded()).unwrap(); sparse.update_leaf(key2(), value_encoded()).unwrap(); let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder.root()); - assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); } #[test] diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index c1c3ae4dd876..011c95e6a927 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -17,6 +17,7 @@ reth-execution-errors.workspace = true reth-primitives.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true +reth-trie-sparse.workspace = true reth-trie-common.workspace = true revm.workspace = true diff --git a/crates/trie/trie/benches/hash_post_state.rs b/crates/trie/trie/benches/hash_post_state.rs index 6e913ef78a3c..7111a785f469 100644 --- a/crates/trie/trie/benches/hash_post_state.rs +++ b/crates/trie/trie/benches/hash_post_state.rs @@ -2,7 +2,7 @@ use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; -use reth_trie::{HashedPostState, HashedStorage}; +use reth_trie::{HashedPostState, HashedStorage, KeccakKeyHasher}; use revm::db::{states::BundleBuilder, BundleAccount}; pub fn hash_post_state(c: &mut Criterion) { @@ -19,7 +19,7 @@ pub fn hash_post_state(c: &mut Criterion) { // parallel group.bench_function(BenchmarkId::new("parallel hashing", size), |b| { - b.iter(|| HashedPostState::from_bundle_state(&state)) + b.iter(|| HashedPostState::from_bundle_state::(&state)) }); } } diff --git a/crates/trie/trie/src/proof/blinded.rs b/crates/trie/trie/src/proof/blinded.rs new file mode 100644 index 000000000000..5fd3ecdc08e9 --- /dev/null +++ b/crates/trie/trie/src/proof/blinded.rs @@ -0,0 +1,114 @@ +use super::{Proof, StorageProof}; +use crate::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Bytes, B256, +}; +use reth_execution_errors::SparseTrieError; +use reth_trie_common::{prefix_set::TriePrefixSetsMut, Nibbles}; +use reth_trie_sparse::blinded::{pad_path_to_key, BlindedProvider, BlindedProviderFactory}; +use std::sync::Arc; + +/// Factory for instantiating providers capable of retrieving blinded trie nodes via proofs. +#[derive(Debug)] +pub struct ProofBlindedProviderFactory { + /// The cursor factory for traversing trie nodes. + trie_cursor_factory: T, + /// The factory for hashed cursors. + hashed_cursor_factory: H, + /// A set of prefix sets that have changes. + prefix_sets: Arc, +} + +impl BlindedProviderFactory for ProofBlindedProviderFactory +where + T: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, +{ + type AccountNodeProvider = ProofBlindedAccountProvider; + type StorageNodeProvider = ProofBlindedStorageProvider; + + fn account_node_provider(&self) -> Self::AccountNodeProvider { + ProofBlindedAccountProvider { + trie_cursor_factory: self.trie_cursor_factory.clone(), + hashed_cursor_factory: self.hashed_cursor_factory.clone(), + prefix_sets: self.prefix_sets.clone(), + } + } + + fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { + ProofBlindedStorageProvider { + trie_cursor_factory: self.trie_cursor_factory.clone(), + hashed_cursor_factory: self.hashed_cursor_factory.clone(), + prefix_sets: self.prefix_sets.clone(), + account, + } + } +} + +/// Blinded provider for retrieving account trie nodes by path. +#[derive(Debug)] +pub struct ProofBlindedAccountProvider { + /// The cursor factory for traversing trie nodes. + trie_cursor_factory: T, + /// The factory for hashed cursors. + hashed_cursor_factory: H, + /// A set of prefix sets that have changes. + prefix_sets: Arc, +} + +impl BlindedProvider for ProofBlindedAccountProvider +where + T: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, +{ + type Error = SparseTrieError; + + fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error> { + let targets = HashMap::from_iter([(pad_path_to_key(&path), HashSet::default())]); + let proof = + Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) + .with_prefix_sets_mut(self.prefix_sets.as_ref().clone()) + .multiproof(targets) + .map_err(|error| SparseTrieError::Other(Box::new(error)))?; + + Ok(proof.account_subtree.into_inner().remove(&path)) + } +} + +/// Blinded provider for retrieving storage trie nodes by path. +#[derive(Debug)] +pub struct ProofBlindedStorageProvider { + /// The cursor factory for traversing trie nodes. + trie_cursor_factory: T, + /// The factory for hashed cursors. + hashed_cursor_factory: H, + /// A set of prefix sets that have changes. + prefix_sets: Arc, + /// Target account. + account: B256, +} + +impl BlindedProvider for ProofBlindedStorageProvider +where + T: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, +{ + type Error = SparseTrieError; + + fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error> { + let targets = HashSet::from_iter([pad_path_to_key(&path)]); + let storage_prefix_set = + self.prefix_sets.storage_prefix_sets.get(&self.account).cloned().unwrap_or_default(); + let proof = StorageProof::new_hashed( + self.trie_cursor_factory.clone(), + self.hashed_cursor_factory.clone(), + self.account, + ) + .with_prefix_set_mut(storage_prefix_set) + .storage_multiproof(targets) + .map_err(|error| SparseTrieError::Other(Box::new(error)))?; + + Ok(proof.subtree.into_inner().remove(&path)) + } +} diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof/mod.rs similarity index 95% rename from crates/trie/trie/src/proof.rs rename to crates/trie/trie/src/proof/mod.rs index 34315416cb8d..c344ec76239a 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -17,6 +17,9 @@ use reth_trie_common::{ proof::ProofRetainer, AccountProof, MultiProof, StorageMultiProof, TrieAccount, }; +mod blinded; +pub use blinded::*; + /// A struct for generating merkle proofs. /// /// Proof generator adds the target address and slots to the prefix set, enables the proof retainer @@ -115,19 +118,20 @@ where hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } TrieElement::Leaf(hashed_address, account) => { + let proof_targets = targets.remove(&hashed_address); + let leaf_is_proof_target = proof_targets.is_some(); let storage_prefix_set = self .prefix_sets .storage_prefix_sets .remove(&hashed_address) .unwrap_or_default(); - let proof_targets = targets.remove(&hashed_address).unwrap_or_default(); let storage_multiproof = StorageProof::new_hashed( self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone(), hashed_address, ) .with_prefix_set_mut(storage_prefix_set) - .storage_multiproof(proof_targets)?; + .storage_multiproof(proof_targets.unwrap_or_default())?; // Encode account account_rlp.clear(); @@ -136,8 +140,11 @@ where hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - // Overwrite storage multiproof. - storages.insert(hashed_address, storage_multiproof); + // We might be adding leaves that are not necessarily our proof targets. + if leaf_is_proof_target { + // Overwrite storage multiproof. + storages.insert(hashed_address, storage_multiproof); + } } } } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index fdfb86a53ddb..3e390bf97bcf 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -10,6 +10,7 @@ use alloy_primitives::{ use itertools::Itertools; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_primitives::Account; +use reth_trie_common::KeyHasher; use revm::db::{states::CacheAccount, AccountStatus, BundleAccount}; use std::borrow::Cow; @@ -26,13 +27,13 @@ impl HashedPostState { /// Initialize [`HashedPostState`] from bundle state. /// Hashes all changed accounts and storage entries that are currently stored in the bundle /// state. - pub fn from_bundle_state<'a>( + pub fn from_bundle_state<'a, KH: KeyHasher>( state: impl IntoParallelIterator, ) -> Self { let hashed = state .into_par_iter() .map(|(address, account)| { - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); let hashed_account = account.info.clone().map(Into::into); let hashed_storage = HashedStorage::from_plain_storage( account.status, @@ -53,13 +54,13 @@ impl HashedPostState { /// Initialize [`HashedPostState`] from cached state. /// Hashes all changed accounts and storage entries that are currently stored in cache. - pub fn from_cache_state<'a>( + pub fn from_cache_state<'a, KH: KeyHasher>( state: impl IntoParallelIterator, ) -> Self { let hashed = state .into_par_iter() .map(|(address, account)| { - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); let hashed_account = account.account.as_ref().map(|a| a.info.clone().into()); let hashed_storage = HashedStorage::from_plain_storage( account.status, @@ -354,6 +355,7 @@ impl HashedStorageSorted { mod tests { use super::*; use alloy_primitives::Bytes; + use reth_trie_common::KeccakKeyHasher; use revm::{ db::{ states::{plain_account::PlainStorage, StorageSlot}, @@ -467,7 +469,7 @@ mod tests { let state = vec![(&address, &account)]; // Convert the bundle state into a hashed post state. - let hashed_state = HashedPostState::from_bundle_state(state); + let hashed_state = HashedPostState::from_bundle_state::(state); // Validate the hashed post state. assert_eq!(hashed_state.accounts.len(), 1); @@ -506,7 +508,7 @@ mod tests { let state = vec![(&address, &account)]; // Convert the cache state into a hashed post state. - let hashed_state = HashedPostState::from_cache_state(state); + let hashed_state = HashedPostState::from_cache_state::(state); // Validate the hashed post state. assert_eq!(hashed_state.accounts.len(), 1); diff --git a/crates/trie/trie/src/trie_cursor/subnode.rs b/crates/trie/trie/src/trie_cursor/subnode.rs index c928028eb157..457c1ba4685b 100644 --- a/crates/trie/trie/src/trie_cursor/subnode.rs +++ b/crates/trie/trie/src/trie_cursor/subnode.rs @@ -76,7 +76,7 @@ impl CursorSubNode { pub fn state_flag(&self) -> bool { self.node .as_ref() - .map_or(true, |node| self.nibble < 0 || node.state_mask.is_bit_set(self.nibble as u8)) + .is_none_or(|node| self.nibble < 0 || node.state_mask.is_bit_set(self.nibble as u8)) } /// Returns `true` if the tree flag is set for the current nibble. @@ -84,7 +84,7 @@ impl CursorSubNode { pub fn tree_flag(&self) -> bool { self.node .as_ref() - .map_or(true, |node| self.nibble < 0 || node.tree_mask.is_bit_set(self.nibble as u8)) + .is_none_or(|node| self.nibble < 0 || node.tree_mask.is_bit_set(self.nibble as u8)) } /// Returns `true` if the current nibble has a root hash. diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index d1c5247966da..647c1486ef09 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -145,11 +145,12 @@ impl TrieWalker { } /// Advances the walker to the next trie node and updates the skip node flag. + /// The new key can then be obtained via `key()`. /// /// # Returns /// - /// * `Result, Error>` - The next key in the trie or an error. - pub fn advance(&mut self) -> Result, DatabaseError> { + /// * `Result<(), Error>` - Unit on success or an error. + pub fn advance(&mut self) -> Result<(), DatabaseError> { if let Some(last) = self.stack.last() { if !self.can_skip_current_node && self.children_are_in_trie() { // If we can't skip the current node and the children are in the trie, @@ -167,8 +168,7 @@ impl TrieWalker { self.update_skip_node(); } - // Return the current key. - Ok(self.key().cloned()) + Ok(()) } /// Retrieves the current root node from the DB, seeking either the exact node or the next one. diff --git a/docs/design/database.md b/docs/design/database.md index cf2a6c8fcc10..48fc8612cbaa 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -56,7 +56,7 @@ BlockWithdrawals { } Transactions { u64 TxNumber "PK" - TransactionSignedNoHash Data + TransactionSigned Data } TransactionHashNumbers { B256 TxHash "PK" diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index ccba73afbc1d..26109db1e033 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -26,7 +26,7 @@ use reth_evm::execute::{ }; use reth_evm_ethereum::EthEvmConfig; use reth_node_ethereum::{node::EthereumAddOns, BasicBlockExecutorProvider, EthereumNode}; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; use std::{fmt::Display, sync::Arc}; pub const SYSTEM_ADDRESS: Address = address!("fffffffffffffffffffffffffffffffffffffffe"); @@ -59,7 +59,7 @@ pub struct CustomExecutorBuilder; impl ExecutorBuilder for CustomExecutorBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, { type EVM = EthEvmConfig; @@ -88,6 +88,7 @@ pub struct CustomExecutorStrategyFactory { } impl BlockExecutionStrategyFactory for CustomExecutorStrategyFactory { + type Primitives = EthPrimitives; type Strategy + Display>> = CustomExecutorStrategy; fn create_strategy(&self, db: DB) -> Self::Strategy @@ -135,10 +136,12 @@ where } } -impl BlockExecutionStrategy for CustomExecutorStrategy +impl BlockExecutionStrategy for CustomExecutorStrategy where DB: Database + Display>, { + type DB = DB; + type Primitives = EthPrimitives; type Error = BlockExecutionError; fn apply_pre_execution_changes( @@ -158,7 +161,7 @@ where &mut self, _block: &BlockWithSenders, _total_difficulty: U256, - ) -> Result { + ) -> Result, Self::Error> { Ok(ExecuteOutput { receipts: vec![], gas_used: 0 }) } diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index f9ac5c238659..1034effebf84 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -38,14 +38,14 @@ use reth::{ }, network::NetworkHandle, payload::ExecutionPayloadValidator, - primitives::{Block, EthPrimitives, SealedBlockFor}, + primitives::{Block, EthPrimitives, SealedBlockFor, TransactionSigned}, providers::{CanonStateSubscriptions, EthStorage, StateProviderFactory}, rpc::{ eth::EthApi, types::engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}, }, tasks::TaskManager, - transaction_pool::TransactionPool, + transaction_pool::{PoolTransaction, TransactionPool}, }; use reth_basic_payload_builder::{ BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig, BuildArguments, BuildOutcome, @@ -340,7 +340,9 @@ where Primitives = EthPrimitives, >, >, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, { async fn spawn_payload_service( self, @@ -380,7 +382,7 @@ pub struct CustomPayloadBuilder; impl PayloadBuilder for CustomPayloadBuilder where Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, { type Attributes = CustomPayloadBuilderAttributes; type BuiltPayload = EthBuiltPayload; diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index b9a4fc26a95b..8990ba2252ec 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -20,7 +20,7 @@ use reth::{ }, rpc::types::engine::PayloadAttributes, tasks::TaskManager, - transaction_pool::TransactionPool, + transaction_pool::{PoolTransaction, TransactionPool}, }; use reth_chainspec::{Chain, ChainSpec}; use reth_evm_ethereum::EthEvmConfig; @@ -84,6 +84,8 @@ impl MyEvmConfig { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Transaction = TransactionSigned; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { @@ -153,7 +155,7 @@ pub struct MyExecutorBuilder; impl ExecutorBuilder for MyExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type EVM = MyEvmConfig; type Executor = BasicBlockExecutorProvider>; @@ -183,7 +185,9 @@ impl PayloadServiceBuilder for MyPayloadBuilder where Types: NodeTypesWithEngine, Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, Types::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = PayloadAttributes, diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index 67863d00e1e9..6b25c46b76ca 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -54,8 +54,7 @@ fn main() { if let Some(recipient) = tx.to() { if args.is_match(&recipient) { // convert the pool transaction - let call_request = - transaction_to_call_request(tx.to_recovered_transaction()); + let call_request = transaction_to_call_request(tx.to_consensus()); let result = eth_api .spawn_with_call_at( diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index 6047da0dd1ba..d7c42e341b59 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -17,14 +17,14 @@ use reth::{ cli::{config::PayloadBuilderConfig, Cli}, payload::PayloadBuilderHandle, providers::CanonStateSubscriptions, - transaction_pool::TransactionPool, + transaction_pool::{PoolTransaction, TransactionPool}, }; use reth_basic_payload_builder::BasicPayloadJobGeneratorConfig; use reth_chainspec::ChainSpec; use reth_node_api::NodeTypesWithEngine; use reth_node_ethereum::{node::EthereumAddOns, EthEngineTypes, EthEvmConfig, EthereumNode}; use reth_payload_builder::PayloadBuilderService; -use reth_primitives::EthPrimitives; +use reth_primitives::{EthPrimitives, TransactionSigned}; pub mod generator; pub mod job; @@ -42,7 +42,9 @@ where Primitives = EthPrimitives, >, >, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, { async fn spawn_payload_service( self, diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml index 3310d1cbd676..ec278ac1cc14 100644 --- a/examples/db-access/Cargo.toml +++ b/examples/db-access/Cargo.toml @@ -14,6 +14,7 @@ reth-provider.workspace = true reth-node-ethereum.workspace = true reth-node-types.workspace = true +alloy-consensus.workspace = true alloy-rpc-types-eth.workspace = true alloy-primitives.workspace = true diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 9f95fb51d91a..727bd1bfff3c 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,3 +1,4 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::{Address, B256}; use alloy_rpc_types_eth::{Filter, FilteredParams}; use reth_chainspec::ChainSpecBuilder; @@ -193,7 +194,7 @@ fn receipts_provider_example< // receipts and do something with the data // 1. get the bloom from the header let header = provider.header_by_number(header_num)?.unwrap(); - let bloom = header.logs_bloom; + let bloom = header.logs_bloom(); // 2. Construct the address/topics filters // For a hypothetical address, we'll want to filter down for a specific indexed topic (e.g. diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index 79a2ff26a274..15b14d98ea8a 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -16,7 +16,7 @@ use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ EthMessage, EthStream, HelloMessage, P2PStream, Status, UnauthedEthStream, UnauthedP2PStream, }; -use reth_network::config::rng_secret_key; +use reth_network::{config::rng_secret_key, EthNetworkPrimitives}; use reth_network_peers::{mainnet_nodes, pk2id, NodeRecord}; use reth_primitives::{EthereumHardfork, Head}; use secp256k1::{SecretKey, SECP256K1}; @@ -24,7 +24,7 @@ use std::sync::LazyLock; use tokio::net::TcpStream; type AuthedP2PStream = P2PStream>; -type AuthedEthStream = EthStream>>; +type AuthedEthStream = EthStream>, EthNetworkPrimitives>; pub static MAINNET_BOOT_NODES: LazyLock> = LazyLock::new(mainnet_nodes); diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index f683af4e430a..03ed1fa69433 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -28,7 +28,7 @@ use reth_node_ethereum::{ node::EthereumAddOns, BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, EthereumNode, }; -use reth_primitives::TransactionSigned; +use reth_primitives::{EthPrimitives, TransactionSigned}; use reth_tracing::{RethTracer, Tracer}; use schnellru::{ByLength, LruMap}; use std::{collections::HashMap, convert::Infallible, sync::Arc}; @@ -148,6 +148,7 @@ impl StatefulPrecompileMut for WrappedPrecompile { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Transaction = TransactionSigned; type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { @@ -226,7 +227,7 @@ pub struct MyExecutorBuilder { impl ExecutorBuilder for MyExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type EVM = MyEvmConfig; type Executor = BasicBlockExecutorProvider>; diff --git a/examples/txpool-tracing/src/main.rs b/examples/txpool-tracing/src/main.rs index 94f800987a96..76abd65f4af6 100644 --- a/examples/txpool-tracing/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -44,8 +44,7 @@ fn main() { if let Some(recipient) = tx.to() { if args.is_match(&recipient) { // trace the transaction with `trace_call` - let callrequest = - transaction_to_call_request(tx.to_recovered_transaction()); + let callrequest = transaction_to_call_request(tx.to_consensus()); let tracerequest = TraceCallRequest::new(callrequest) .with_trace_type(TraceType::Trace); if let Ok(trace_result) = traceapi.trace_call(tracerequest).await { diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 292b32e8ce0f..742498e81bfc 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -88,6 +88,8 @@ pub struct Header { pub parent_beacon_block_root: Option, /// Requests root. pub requests_hash: Option, + /// Target blobs per block. + pub target_blobs_per_block: Option, } impl From

for SealedHeader { @@ -114,6 +116,7 @@ impl From
for SealedHeader { excess_blob_gas: value.excess_blob_gas.map(|v| v.to::()), parent_beacon_block_root: value.parent_beacon_block_root, requests_hash: value.requests_hash, + target_blobs_per_block: value.target_blobs_per_block.map(|v| v.to::()), }; Self::new(header, value.hash) }