diff --git a/.github/assets/hive/Dockerfile b/.github/assets/hive/Dockerfile index 9f75ba6f1cf2..25b71bf21872 100644 --- a/.github/assets/hive/Dockerfile +++ b/.github/assets/hive/Dockerfile @@ -5,4 +5,5 @@ COPY dist/reth /usr/local/bin COPY LICENSE-* ./ EXPOSE 30303 30303/udp 9001 8545 8546 -ENTRYPOINT ["/usr/local/bin/reth"] \ No newline at end of file +ENV RUST_LOG=debug +ENTRYPOINT ["/usr/local/bin/reth"] diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml index 2652992fca92..c7307d10c7bd 100644 --- a/.github/workflows/kurtosis-op.yml +++ b/.github/workflows/kurtosis-op.yml @@ -102,6 +102,8 @@ jobs: if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH" done + kurtosis service logs -a op-devnet op-el-2-op-reth-op-node-op-kurtosis + kurtosis service logs -a op-devnet op-cl-2-op-node-op-reth-op-kurtosis exit 1 diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index c4df17279205..50f5fa6e38e8 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -34,11 +34,11 @@ jobs: partition: 2 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 1 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 2 total_partitions: 2 - type: book diff --git a/Cargo.lock b/Cargo.lock index c52f872d2449..7a4936b082a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4606,9 +4606,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.164" +version = "0.2.165" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" +checksum = "fcb4d3d38eab6c5239a362fa8bae48c03baf980a6e7079f063942d563ef3533e" [[package]] name = "libloading" @@ -6514,6 +6514,7 @@ dependencies = [ "reth-payload-primitives", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-prune-types", @@ -6630,7 +6631,6 @@ dependencies = [ "alloy-primitives", "alloy-signer", "alloy-signer-local", - "auto_impl", "derive_more 1.0.0", "metrics", "parking_lot", @@ -6641,6 +6641,7 @@ dependencies = [ "reth-execution-types", "reth-metrics", "reth-primitives", + "reth-primitives-traits", "reth-scroll-revm", "reth-storage-api", "reth-testing-utils", @@ -6853,6 +6854,7 @@ dependencies = [ "reth-chainspec", "reth-consensus", "reth-primitives", + "reth-primitives-traits", "reth-storage-api", "revm-primitives", ] @@ -7123,6 +7125,7 @@ dependencies = [ "alloy-eips", "alloy-network", "alloy-primitives", + "alloy-rlp", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-signer", @@ -7132,20 +7135,27 @@ dependencies = [ "futures-util", "jsonrpsee", "op-alloy-rpc-types-engine", - "reth", "reth-chainspec", "reth-db", "reth-engine-local", + "reth-network", + "reth-network-api", "reth-network-peers", "reth-node-api", "reth-node-builder", + "reth-node-core", + "reth-optimism-primitives", "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "reth-provider", + "reth-rpc-api", + "reth-rpc-eth-api", "reth-rpc-layer", + "reth-rpc-server-types", "reth-stages-types", + "reth-tasks", "reth-tokio-util", "reth-tracing", "serde_json", @@ -7202,10 +7212,10 @@ dependencies = [ "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-evm", + "reth-node-types", "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-payload-validator", "reth-provider", "reth-prune", "reth-rpc-types-compat", @@ -7220,6 +7230,7 @@ dependencies = [ name = "reth-engine-primitives" version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-engine", "futures", @@ -7228,6 +7239,7 @@ dependencies = [ "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", + "reth-primitives-traits", "reth-trie", "serde", "thiserror 1.0.69", @@ -7252,7 +7264,6 @@ dependencies = [ "reth-network-p2p", "reth-node-types", "reth-payload-builder", - "reth-payload-validator", "reth-primitives", "reth-provider", "reth-prune", @@ -7277,6 +7288,8 @@ dependencies = [ "crossbeam-channel", "futures", "metrics", + "rand 0.8.5", + "rayon", "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", @@ -7294,9 +7307,7 @@ dependencies = [ "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-payload-validator", "reth-primitives", - "reth-primitives-traits", "reth-provider", "reth-prune", "reth-prune-types", @@ -7306,9 +7317,12 @@ dependencies = [ "reth-stages-api", "reth-static-file", "reth-tasks", + "reth-testing-utils", "reth-tracing", "reth-trie", + "reth-trie-db", "reth-trie-parallel", + "reth-trie-sparse", "revm-primitives", "thiserror 1.0.69", "tokio", @@ -7459,6 +7473,7 @@ dependencies = [ "reth-chainspec", "reth-engine-primitives", "reth-payload-primitives", + "reth-payload-validator", "reth-primitives", "reth-rpc-types-compat", "serde", @@ -7595,6 +7610,7 @@ dependencies = [ name = "reth-execution-types" version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "arbitrary", @@ -7605,6 +7621,7 @@ dependencies = [ "reth-primitives-traits", "reth-scroll-revm", "reth-trie", + "reth-trie-common", "serde", "serde_with", ] @@ -7728,6 +7745,7 @@ dependencies = [ "reth-engine-primitives", "reth-evm", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", @@ -8124,12 +8142,13 @@ dependencies = [ "alloy-primitives", "alloy-provider", "alloy-rpc-types-beacon", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "alloy-signer", "alloy-sol-types", "eyre", "futures", "rand 0.8.5", - "reth", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", @@ -8144,11 +8163,14 @@ dependencies = [ "reth-network", "reth-node-api", "reth-node-builder", + "reth-node-core", "reth-payload-builder", + "reth-payload-primitives", "reth-primitives", "reth-provider", "reth-revm", "reth-rpc", + "reth-rpc-eth-api", "reth-scroll-revm", "reth-tasks", "reth-tracing", @@ -8261,6 +8283,7 @@ dependencies = [ "reth-downloaders", "reth-errors", "reth-execution-types", + "reth-fs-util", "reth-network-p2p", "reth-node-builder", "reth-node-core", @@ -8359,7 +8382,6 @@ dependencies = [ "op-alloy-consensus", "op-alloy-rpc-types-engine", "parking_lot", - "reth", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", @@ -8371,6 +8393,7 @@ dependencies = [ "reth-network", "reth-node-api", "reth-node-builder", + "reth-node-core", "reth-optimism-chainspec", "reth-optimism-consensus", "reth-optimism-evm", @@ -8381,11 +8404,13 @@ dependencies = [ "reth-optimism-rpc", "reth-payload-builder", "reth-payload-util", + "reth-payload-validator", "reth-primitives", "reth-provider", "reth-revm", "reth-rpc-server-types", "reth-scroll-revm", + "reth-tasks", "reth-tracing", "reth-transaction-pool", "reth-trie-db", @@ -8444,7 +8469,6 @@ dependencies = [ "derive_more 1.0.0", "op-alloy-consensus", "reth-codecs", - "reth-node-types", "reth-primitives", "reth-primitives-traits", "rstest", @@ -8479,6 +8503,7 @@ dependencies = [ "reth-optimism-evm", "reth-optimism-forks", "reth-optimism-payload-builder", + "reth-optimism-primitives", "reth-primitives", "reth-provider", "reth-rpc", @@ -8710,6 +8735,7 @@ dependencies = [ name = "reth-prune" version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "assert_matches", @@ -8823,6 +8849,7 @@ dependencies = [ "reth-network-types", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", @@ -9326,6 +9353,7 @@ dependencies = [ "reth-stages-types", "reth-storage-errors", "reth-trie", + "reth-trie-db", ] [[package]] @@ -9367,6 +9395,7 @@ dependencies = [ "alloy-primitives", "rand 0.8.5", "reth-primitives", + "reth-primitives-traits", "secp256k1", ] @@ -9449,7 +9478,6 @@ dependencies = [ "alloy-rlp", "alloy-trie", "auto_impl", - "bincode", "criterion", "itertools 0.13.0", "metrics", @@ -9464,9 +9492,7 @@ dependencies = [ "reth-stages-types", "reth-storage-errors", "reth-trie-common", - "serde", "serde_json", - "serde_with", "tracing", "triehash", ] @@ -9481,7 +9507,9 @@ dependencies = [ "alloy-rlp", "alloy-trie", "arbitrary", + "bincode", "bytes", + "criterion", "derive_more 1.0.0", "hash-db", "itertools 0.13.0", @@ -9494,6 +9522,8 @@ dependencies = [ "reth-scroll-primitives", "reth-scroll-revm", "serde", + "serde_json", + "serde_with", ] [[package]] @@ -9559,6 +9589,7 @@ version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", + "arbitrary", "assert_matches", "criterion", "itertools 0.13.0", @@ -10710,9 +10741,9 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.31.4" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355dbe4f8799b304b05e1b0f05fc59b2a18d36645cf169607da45bde2f69a1be" +checksum = "4c33cd241af0f2e9e3b5c32163b873b29956890b5342e6745b917ce9d490f4af" dependencies = [ "core-foundation-sys", "libc", diff --git a/Cargo.toml b/Cargo.toml index a9a188ec2c41..72809cc2610b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -427,6 +427,7 @@ reth-trie = { path = "crates/trie/trie" } reth-trie-common = { path = "crates/trie/common" } reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } +reth-trie-sparse = { path = "crates/trie/sparse" } # revm revm = { package = "reth-scroll-revm", path = "crates/scroll/revm", default-features = false } diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index dd2f863e2c94..9e573a8957e1 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -18,7 +18,7 @@ use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -75,11 +75,11 @@ impl Command { while let Some((block, head, safe, finalized)) = receiver.recv().await { // just put gas used here - let gas_used = block.header.gas_used; + let gas_used = block.gas_used; let block_number = block.header.number; let versioned_hashes: Vec = - block.blob_versioned_hashes().into_iter().copied().collect(); + block.body.blob_versioned_hashes().into_iter().copied().collect(); let parent_beacon_block_root = block.parent_beacon_block_root; let payload = block_to_payload(block); diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 68b2f76527df..0611faabf101 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -16,7 +16,7 @@ use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -60,10 +60,10 @@ impl Command { while let Some(block) = receiver.recv().await { // just put gas used here - let gas_used = block.header.gas_used; + let gas_used = block.gas_used; let versioned_hashes: Vec = - block.blob_versioned_hashes().into_iter().copied().collect(); + block.body.blob_versioned_hashes().into_iter().copied().collect(); let parent_beacon_block_root = block.parent_beacon_block_root; let payload = block_to_payload(block); diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index aa89b4112c3b..dc00e07d8830 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -22,11 +22,11 @@ use reth_errors::RethResult; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; -use reth_node_api::{EngineApiMessageVersion, PayloadBuilderAttributes}; +use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_primitives::{ - BlobTransaction, PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, SealedHeader, - Transaction, TransactionSigned, + BlobTransaction, BlockExt, PooledTransactionsElement, SealedBlockFor, SealedBlockWithSenders, + SealedHeader, Transaction, TransactionSigned, }; use reth_provider::{ providers::{BlockchainProvider, ProviderNodeTypes}, @@ -90,7 +90,7 @@ impl> Command { fn lookup_best_block>( &self, factory: ProviderFactory, - ) -> RethResult> { + ) -> RethResult>>> { let provider = factory.provider()?; let best_number = @@ -259,7 +259,7 @@ impl> Command { let senders = block.senders().expect("sender recovery failed"); let block_with_senders = - SealedBlockWithSenders::new(block.clone(), senders).unwrap(); + SealedBlockWithSenders::>::new(block.clone(), senders).unwrap(); let db = StateProviderDatabase::new(blockchain_db.latest()?); let executor = diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index ce5f318632e5..870dc1ddf233 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -19,11 +19,13 @@ use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; +use reth_node_api::{BlockTy, NodePrimitives}; use reth_node_ethereum::EthExecutorProvider; +use reth_primitives::BlockExt; use reth_provider::{ - providers::ProviderNodeTypes, writer::UnifiedStorageWriter, AccountExtReader, - ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderFactory, StageCheckpointReader, StateWriter, StorageReader, + providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, + HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, + StageCheckpointReader, StateWriter, StorageLocation, StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; @@ -55,7 +57,15 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, + >( &self, config: &Config, task_executor: TaskExecutor, @@ -142,7 +152,7 @@ impl> Command { ( &block .clone() - .unseal() + .unseal::>() .with_recovered_senders() .ok_or(BlockValidationError::SenderRecoveryError)?, merkle_block_td + block.difficulty, @@ -162,7 +172,7 @@ impl> Command { return Ok(()) } - let provider_rw = provider_factory.provider_rw()?; + let provider_rw = provider_factory.database_provider_rw()?; // Insert block, state and hashes provider_rw.insert_historical_block( @@ -171,8 +181,11 @@ impl> Command { .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError)?, )?; - let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw.0); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + provider_rw.write_state( + execution_outcome, + OriginalValuesKnown::No, + StorageLocation::Database, + )?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; let storages = provider_rw.plain_state_storages(storage_lists)?; provider_rw.insert_storage_for_hashing(storages)?; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index bb8a6a2c4a10..78e32df52664 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -17,11 +17,12 @@ use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; +use reth_node_api::{BlockTy, NodePrimitives}; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ - providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, - ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, StorageLocation, + providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider, + DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, + ProviderError, ProviderFactory, StateWriter, StorageLocation, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -55,7 +56,15 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, + >( &self, config: &Config, task_executor: TaskExecutor, @@ -144,7 +153,7 @@ impl> Command { for block in blocks.into_iter().rev() { let block_number = block.number; let sealed_block = block - .try_seal_with_senders() + .try_seal_with_senders::>() .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; trace!(target: "reth::cli", block_number, "Executing block"); @@ -157,8 +166,11 @@ impl> Command { executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; let execution_outcome = executor.finalize(); - let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider_rw.write_state( + execution_outcome, + OriginalValuesKnown::Yes, + StorageLocation::Database, + )?; let checkpoint = Some(StageCheckpoint::new( block_number diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 7daead83a846..04d3b5763aef 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -18,7 +18,7 @@ use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage} use reth_fs_util as fs; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_api::{EngineApiMessageVersion, NodeTypesWithDBAdapter}; +use reth_node_api::{EngineApiMessageVersion, NodePrimitives, NodeTypesWithDBAdapter}; use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ @@ -55,7 +55,15 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, + >( &self, config: &Config, task_executor: TaskExecutor, diff --git a/book/installation/installation.md b/book/installation/installation.md index ebf6c8ef3f90..1df122d4d442 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -44,13 +44,13 @@ As of April 2024 at block number 19.6M: * Archive Node: At least 2.14TB is required * Full Node: At least 1.13TB is required -NVMe drives are recommended for the best performance, with SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. +NVMe based SSD drives are recommended for the best performance, with SATA SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. As of February 2024, syncing an Ethereum mainnet node to block 19.3M on NVMe drives takes about 50 hours, while on a GCP "Persistent SSD" it takes around 5 days. > **Note** > -> It is highly recommended to choose a TLC drive when using NVMe, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). +> It is highly recommended to choose a TLC drive when using an NVMe drive, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). ### CPU diff --git a/book/sources/exex/hello-world/src/bin/3.rs b/book/sources/exex/hello-world/src/bin/3.rs index 21bd25a56dbf..ebeaf6c84f19 100644 --- a/book/sources/exex/hello-world/src/bin/3.rs +++ b/book/sources/exex/hello-world/src/bin/3.rs @@ -1,10 +1,12 @@ use futures_util::TryStreamExt; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; -async fn my_exex(mut ctx: ExExContext) -> eyre::Result<()> { +async fn my_exex>>( + mut ctx: ExExContext, +) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.try_next().await? { match ¬ification { ExExNotification::ChainCommitted { new } => { diff --git a/book/sources/exex/remote/src/exex.rs b/book/sources/exex/remote/src/exex.rs index 1ae4785db8b8..00392b4dad10 100644 --- a/book/sources/exex/remote/src/exex.rs +++ b/book/sources/exex/remote/src/exex.rs @@ -3,6 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; +use reth::{primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -44,7 +45,7 @@ impl RemoteExEx for ExExService { } } -async fn remote_exex( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/remote/src/exex_4.rs b/book/sources/exex/remote/src/exex_4.rs index 24c7bf2c2f11..c37f26d739dc 100644 --- a/book/sources/exex/remote/src/exex_4.rs +++ b/book/sources/exex/remote/src/exex_4.rs @@ -3,6 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; +use reth::{primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -46,7 +47,7 @@ impl RemoteExEx for ExExService { // ANCHOR: snippet #[allow(dead_code)] -async fn remote_exex( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/tracking-state/src/bin/1.rs b/book/sources/exex/tracking-state/src/bin/1.rs index 0d42e0791a17..2cf43bec3a17 100644 --- a/book/sources/exex/tracking-state/src/bin/1.rs +++ b/book/sources/exex/tracking-state/src/bin/1.rs @@ -5,7 +5,7 @@ use std::{ }; use futures_util::{FutureExt, TryStreamExt}; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -14,7 +14,7 @@ struct MyExEx { ctx: ExExContext, } -impl Future for MyExEx { +impl>> Future for MyExEx { type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/book/sources/exex/tracking-state/src/bin/2.rs b/book/sources/exex/tracking-state/src/bin/2.rs index 9416810668f5..b58d2a39c85c 100644 --- a/book/sources/exex/tracking-state/src/bin/2.rs +++ b/book/sources/exex/tracking-state/src/bin/2.rs @@ -6,7 +6,7 @@ use std::{ use alloy_primitives::BlockNumber; use futures_util::{FutureExt, TryStreamExt}; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -25,7 +25,7 @@ impl MyExEx { } } -impl Future for MyExEx { +impl>> Future for MyExEx { type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index f74361320d1a..de33d717a8a7 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1589,7 +1589,9 @@ mod tests { body: Vec, num_of_signer_txs: u64| -> SealedBlockWithSenders { - let transactions_root = calculate_transaction_root(&body); + let signed_body = + body.clone().into_iter().map(|tx| tx.into_signed()).collect::>(); + let transactions_root = calculate_transaction_root(&signed_body); let receipts = body .iter() .enumerate() @@ -1635,7 +1637,7 @@ mod tests { SealedBlock { header: SealedHeader::seal(header), body: BlockBody { - transactions: body.clone().into_iter().map(|tx| tx.into_signed()).collect(), + transactions: signed_body, ommers: Vec::new(), withdrawals: Some(Withdrawals::default()), }, diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 2b9dae9a3dfd..2a825921f893 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -4,8 +4,8 @@ use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::Consensus; use reth_db::{static_file::BlockHashMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; -use reth_node_types::{FullNodePrimitives, NodeTypesWithDB}; -use reth_primitives::{BlockBody, StaticFileSegment}; +use reth_node_types::NodeTypesWithDB; +use reth_primitives::StaticFileSegment; use reth_provider::{ providers::ProviderNodeTypes, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, StatsReader, @@ -13,15 +13,7 @@ use reth_provider::{ use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; -/// A helper trait with requirements for [`ProviderNodeTypes`] to be used within [`TreeExternals`]. -pub trait TreeNodeTypes: - ProviderNodeTypes> -{ -} -impl TreeNodeTypes for T where - T: ProviderNodeTypes> -{ -} +pub use reth_provider::providers::{NodeTypesForTree, TreeNodeTypes}; /// A container for external components. /// diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 862b02e76070..f5d2ad8c6f78 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -6,10 +6,10 @@ use reth_blockchain_tree_api::{ BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{EthPrimitives, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications, - CanonStateSubscriptions, FullExecutionDataProvider, + CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, }; use reth_storage_errors::provider::ProviderResult; use std::collections::BTreeMap; @@ -126,6 +126,10 @@ impl BlockchainTreePendingStateProvider for NoopBlockchainTree { } } +impl NodePrimitivesProvider for NoopBlockchainTree { + type Primitives = EthPrimitives; +} + impl CanonStateSubscriptions for NoopBlockchainTree { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.canon_state_notification_sender diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index f997e0a062d3..484b4b51869e 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -16,7 +16,7 @@ use reth_node_types::NodeTypesWithDB; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateNotifications, - CanonStateSubscriptions, FullExecutionDataProvider, ProviderError, + CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, ProviderError, }; use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; @@ -185,10 +185,18 @@ where } } -impl CanonStateSubscriptions for ShareableBlockchainTree +impl NodePrimitivesProvider for ShareableBlockchainTree where N: ProviderNodeTypes, E: Send + Sync, +{ + type Primitives = N::Primitives; +} + +impl CanonStateSubscriptions for ShareableBlockchainTree +where + N: TreeNodeTypes, + E: Send + Sync, { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index ff62b76e5dfb..d2ef5870947b 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -18,6 +18,7 @@ reth-errors.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-trie.workspace = true @@ -34,7 +35,6 @@ tokio-stream = { workspace = true, features = ["sync"] } tracing.workspace = true # misc -auto_impl.workspace = true derive_more.workspace = true metrics.workspace = true parking_lot.workspace = true @@ -62,6 +62,7 @@ test-utils = [ "revm", "reth-chainspec/test-utils", "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", "reth-trie/test-utils", "revm?/test-utils", ] diff --git a/crates/chain-state/src/chain_info.rs b/crates/chain-state/src/chain_info.rs index 3c75544ac460..1b8575005c40 100644 --- a/crates/chain-state/src/chain_info.rs +++ b/crates/chain-state/src/chain_info.rs @@ -1,8 +1,9 @@ +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use alloy_primitives::BlockNumber; use parking_lot::RwLock; use reth_chainspec::ChainInfo; -use reth_primitives::SealedHeader; +use reth_primitives::{NodePrimitives, SealedHeader}; use std::{ sync::{ atomic::{AtomicU64, Ordering}, @@ -14,17 +15,21 @@ use tokio::sync::watch; /// Tracks the chain info: canonical head, safe block, finalized block. #[derive(Debug, Clone)] -pub struct ChainInfoTracker { - inner: Arc, +pub struct ChainInfoTracker { + inner: Arc>, } -impl ChainInfoTracker { +impl ChainInfoTracker +where + N: NodePrimitives, + N::BlockHeader: BlockHeader, +{ /// Create a new chain info container for the given canonical head and finalized header if it /// exists. pub fn new( - head: SealedHeader, - finalized: Option, - safe: Option, + head: SealedHeader, + finalized: Option>, + safe: Option>, ) -> Self { let (finalized_block, _) = watch::channel(finalized); let (safe_block, _) = watch::channel(safe); @@ -33,7 +38,7 @@ impl ChainInfoTracker { inner: Arc::new(ChainInfoInner { last_forkchoice_update: RwLock::new(None), last_transition_configuration_exchange: RwLock::new(None), - canonical_head_number: AtomicU64::new(head.number), + canonical_head_number: AtomicU64::new(head.number()), canonical_head: RwLock::new(head), safe_block, finalized_block, @@ -44,7 +49,7 @@ impl ChainInfoTracker { /// Returns the [`ChainInfo`] for the canonical head. pub fn chain_info(&self) -> ChainInfo { let inner = self.inner.canonical_head.read(); - ChainInfo { best_hash: inner.hash(), best_number: inner.number } + ChainInfo { best_hash: inner.hash(), best_number: inner.number() } } /// Update the timestamp when we received a forkchoice update. @@ -68,17 +73,17 @@ impl ChainInfoTracker { } /// Returns the canonical head of the chain. - pub fn get_canonical_head(&self) -> SealedHeader { + pub fn get_canonical_head(&self) -> SealedHeader { self.inner.canonical_head.read().clone() } /// Returns the safe header of the chain. - pub fn get_safe_header(&self) -> Option { + pub fn get_safe_header(&self) -> Option> { self.inner.safe_block.borrow().clone() } /// Returns the finalized header of the chain. - pub fn get_finalized_header(&self) -> Option { + pub fn get_finalized_header(&self) -> Option> { self.inner.finalized_block.borrow().clone() } @@ -104,8 +109,8 @@ impl ChainInfoTracker { } /// Sets the canonical head of the chain. - pub fn set_canonical_head(&self, header: SealedHeader) { - let number = header.number; + pub fn set_canonical_head(&self, header: SealedHeader) { + let number = header.number(); *self.inner.canonical_head.write() = header; // also update the atomic number. @@ -113,7 +118,7 @@ impl ChainInfoTracker { } /// Sets the safe header of the chain. - pub fn set_safe(&self, header: SealedHeader) { + pub fn set_safe(&self, header: SealedHeader) { self.inner.safe_block.send_if_modified(|current_header| { if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) { let _ = current_header.replace(header); @@ -125,7 +130,7 @@ impl ChainInfoTracker { } /// Sets the finalized header of the chain. - pub fn set_finalized(&self, header: SealedHeader) { + pub fn set_finalized(&self, header: SealedHeader) { self.inner.finalized_block.send_if_modified(|current_header| { if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) { let _ = current_header.replace(header); @@ -137,19 +142,21 @@ impl ChainInfoTracker { } /// Subscribe to the finalized block. - pub fn subscribe_finalized_block(&self) -> watch::Receiver> { + pub fn subscribe_finalized_block( + &self, + ) -> watch::Receiver>> { self.inner.finalized_block.subscribe() } /// Subscribe to the safe block. - pub fn subscribe_safe_block(&self) -> watch::Receiver> { + pub fn subscribe_safe_block(&self) -> watch::Receiver>> { self.inner.safe_block.subscribe() } } /// Container type for all chain info fields #[derive(Debug)] -struct ChainInfoInner { +struct ChainInfoInner { /// Timestamp when we received the last fork choice update. /// /// This is mainly used to track if we're connected to a beacon node. @@ -161,16 +168,17 @@ struct ChainInfoInner { /// Tracks the number of the `canonical_head`. canonical_head_number: AtomicU64, /// The canonical head of the chain. - canonical_head: RwLock, + canonical_head: RwLock>, /// The block that the beacon node considers safe. - safe_block: watch::Sender>, + safe_block: watch::Sender>>, /// The block that the beacon node considers finalized. - finalized_block: watch::Sender>, + finalized_block: watch::Sender>>, } #[cfg(test)] mod tests { use super::*; + use reth_primitives::EthPrimitives; use reth_testing_utils::{generators, generators::random_header}; #[test] @@ -180,7 +188,8 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header.clone(), None, None); + let tracker: ChainInfoTracker = + ChainInfoTracker::new(header.clone(), None, None); // Fetch the chain information from the tracker let chain_info = tracker.chain_info(); @@ -197,7 +206,7 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header, None, None); // Assert that there has been no forkchoice update yet (the timestamp is None) assert!(tracker.last_forkchoice_update_received_at().is_none()); @@ -216,7 +225,7 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header, None, None); // Assert that there has been no transition configuration exchange yet (the timestamp is // None) @@ -239,7 +248,7 @@ mod tests { let header2 = random_header(&mut rng, 20, None); // Create a new chain info tracker with the first header - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Set the second header as the canonical head of the tracker tracker.set_canonical_head(header2.clone()); @@ -260,7 +269,7 @@ mod tests { let header2 = random_header(&mut rng, 20, None); // Create a new chain info tracker with the first header (header1) - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Call the set_safe method with the second header (header2) tracker.set_safe(header2.clone()); @@ -306,7 +315,7 @@ mod tests { let header3 = random_header(&mut rng, 30, None); // Create a new chain info tracker with the first header - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Initial state: finalize header should be None assert!(tracker.get_finalized_header().is_none()); @@ -343,7 +352,7 @@ mod tests { let finalized_header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the finalized header - let tracker = + let tracker: ChainInfoTracker = ChainInfoTracker::new(finalized_header.clone(), Some(finalized_header.clone()), None); // Assert that the BlockNumHash returned matches the finalized header @@ -357,7 +366,8 @@ mod tests { let safe_header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the safe header - let tracker = ChainInfoTracker::new(safe_header.clone(), None, None); + let tracker: ChainInfoTracker = + ChainInfoTracker::new(safe_header.clone(), None, None); tracker.set_safe(safe_header.clone()); // Assert that the BlockNumHash returned matches the safe header diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 24f394a761f5..f43aae562e00 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -4,17 +4,18 @@ use crate::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainInfoTracker, MemoryOverlayStateProvider, }; -use alloy_consensus::Header; -use alloy_eips::{BlockHashOrNumber, BlockNumHash}; +use alloy_consensus::BlockHeader; +use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, Address, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, NodePrimitives, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, - TransactionMeta, TransactionSigned, + BlockWithSenders, HeaderExt, NodePrimitives, Receipts, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, TransactionMeta, }; +use reth_primitives_traits::{Block, BlockBody as _, SignedTransaction}; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; @@ -134,7 +135,7 @@ impl InMemoryState { pub(crate) struct CanonicalInMemoryStateInner { /// Tracks certain chain information, such as the canonical head, safe head, and finalized /// head. - pub(crate) chain_info_tracker: ChainInfoTracker, + pub(crate) chain_info_tracker: ChainInfoTracker, /// Tracks blocks at the tip of the chain that have not been persisted to disk yet. pub(crate) in_memory_state: InMemoryState, /// A broadcast stream that emits events when the canonical chain is updated. @@ -158,6 +159,9 @@ impl CanonicalInMemoryStateInner { } } +type PendingBlockAndReceipts = + (SealedBlockFor<::Block>, Vec>); + /// This type is responsible for providing the blocks, receipts, and state for /// all canonical blocks not on disk yet and keeps track of the block range that /// is in memory. @@ -173,8 +177,8 @@ impl CanonicalInMemoryState { blocks: HashMap>>, numbers: BTreeMap, pending: Option>, - finalized: Option, - safe: Option, + finalized: Option>, + safe: Option>, ) -> Self { let in_memory_state = InMemoryState::new(blocks, numbers, pending); let header = in_memory_state @@ -201,9 +205,9 @@ impl CanonicalInMemoryState { /// Create a new in memory state with the given local head and finalized header /// if it exists. pub fn with_head( - head: SealedHeader, - finalized: Option, - safe: Option, + head: SealedHeader, + finalized: Option>, + safe: Option>, ) -> Self { let chain_info_tracker = ChainInfoTracker::new(head, finalized, safe); let in_memory_state = InMemoryState::default(); @@ -224,7 +228,7 @@ impl CanonicalInMemoryState { } /// Returns the header corresponding to the given hash. - pub fn header_by_hash(&self, hash: B256) -> Option { + pub fn header_by_hash(&self, hash: B256) -> Option> { self.state_by_hash(hash).map(|block| block.block_ref().block.header.clone()) } @@ -238,7 +242,7 @@ impl CanonicalInMemoryState { /// Note: This assumes that the parent block of the pending block is canonical. pub fn set_pending_block(&self, pending: ExecutedBlock) { // fetch the state of the pending block's parent block - let parent = self.state_by_hash(pending.block().parent_hash); + let parent = self.state_by_hash(pending.block().parent_hash()); let pending = BlockState::with_parent(pending, parent); self.inner.in_memory_state.pending.send_modify(|p| { p.replace(pending); @@ -262,14 +266,14 @@ impl CanonicalInMemoryState { // we first remove the blocks from the reorged chain for block in reorged { let hash = block.block().hash(); - let number = block.block().number; + let number = block.block().number(); blocks.remove(&hash); numbers.remove(&number); } // insert the new blocks for block in new_blocks { - let parent = blocks.get(&block.block().parent_hash).cloned(); + let parent = blocks.get(&block.block().parent_hash()).cloned(); let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -329,16 +333,16 @@ impl CanonicalInMemoryState { // height) let mut old_blocks = blocks .drain() - .filter(|(_, b)| b.block_ref().block().number > persisted_height) + .filter(|(_, b)| b.block_ref().block().number() > persisted_height) .map(|(_, b)| b.block.clone()) .collect::>(); // sort the blocks by number so we can insert them back in natural order (low -> high) - old_blocks.sort_unstable_by_key(|block| block.block().number); + old_blocks.sort_unstable_by_key(|block| block.block().number()); // re-insert the blocks in natural order and connect them to their parent blocks for block in old_blocks { - let parent = blocks.get(&block.block().parent_hash).cloned(); + let parent = blocks.get(&block.block().parent_hash()).cloned(); let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -351,7 +355,7 @@ impl CanonicalInMemoryState { // also shift the pending state if it exists self.inner.in_memory_state.pending.send_modify(|p| { if let Some(p) = p.as_mut() { - p.parent = blocks.get(&p.block_ref().block.parent_hash).cloned(); + p.parent = blocks.get(&p.block_ref().block.parent_hash()).cloned(); } }); } @@ -427,59 +431,62 @@ impl CanonicalInMemoryState { } /// Canonical head setter. - pub fn set_canonical_head(&self, header: SealedHeader) { + pub fn set_canonical_head(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_canonical_head(header); } /// Safe head setter. - pub fn set_safe(&self, header: SealedHeader) { + pub fn set_safe(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_safe(header); } /// Finalized head setter. - pub fn set_finalized(&self, header: SealedHeader) { + pub fn set_finalized(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_finalized(header); } /// Canonical head getter. - pub fn get_canonical_head(&self) -> SealedHeader { + pub fn get_canonical_head(&self) -> SealedHeader { self.inner.chain_info_tracker.get_canonical_head() } /// Finalized header getter. - pub fn get_finalized_header(&self) -> Option { + pub fn get_finalized_header(&self) -> Option> { self.inner.chain_info_tracker.get_finalized_header() } /// Safe header getter. - pub fn get_safe_header(&self) -> Option { + pub fn get_safe_header(&self) -> Option> { self.inner.chain_info_tracker.get_safe_header() } /// Returns the `SealedHeader` corresponding to the pending state. - pub fn pending_sealed_header(&self) -> Option { + pub fn pending_sealed_header(&self) -> Option> { self.pending_state().map(|h| h.block_ref().block().header.clone()) } /// Returns the `Header` corresponding to the pending state. - pub fn pending_header(&self) -> Option
{ + pub fn pending_header(&self) -> Option { self.pending_sealed_header().map(|sealed_header| sealed_header.unseal()) } /// Returns the `SealedBlock` corresponding to the pending state. - pub fn pending_block(&self) -> Option { + pub fn pending_block(&self) -> Option> { self.pending_state().map(|block_state| block_state.block_ref().block().clone()) } /// Returns the `SealedBlockWithSenders` corresponding to the pending state. - pub fn pending_block_with_senders(&self) -> Option { + pub fn pending_block_with_senders(&self) -> Option> + where + N::SignedTx: SignedTransaction, + { self.pending_state() .and_then(|block_state| block_state.block_ref().block().clone().seal_with_senders()) } /// Returns a tuple with the `SealedBlock` corresponding to the pending /// state and a vector of its `Receipt`s. - pub fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { + pub fn pending_block_and_receipts(&self) -> Option> { self.pending_state().map(|block_state| { (block_state.block_ref().block().clone(), block_state.executed_block_receipts()) }) @@ -491,12 +498,14 @@ impl CanonicalInMemoryState { } /// Subscribe to new safe block events. - pub fn subscribe_safe_block(&self) -> watch::Receiver> { + pub fn subscribe_safe_block(&self) -> watch::Receiver>> { self.inner.chain_info_tracker.subscribe_safe_block() } /// Subscribe to new finalized block events. - pub fn subscribe_finalized_block(&self) -> watch::Receiver> { + pub fn subscribe_finalized_block( + &self, + ) -> watch::Receiver>> { self.inner.chain_info_tracker.subscribe_finalized_block() } @@ -532,10 +541,18 @@ impl CanonicalInMemoryState { } /// Returns a `TransactionSigned` for the given `TxHash` if found. - pub fn transaction_by_hash(&self, hash: TxHash) -> Option { + pub fn transaction_by_hash(&self, hash: TxHash) -> Option + where + N::SignedTx: Encodable2718, + { for block_state in self.canonical_chain() { - if let Some(tx) = - block_state.block_ref().block().body.transactions().find(|tx| tx.hash() == hash) + if let Some(tx) = block_state + .block_ref() + .block() + .body + .transactions() + .iter() + .find(|tx| tx.trie_hash() == hash) { return Some(tx.clone()) } @@ -548,24 +565,28 @@ impl CanonicalInMemoryState { pub fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> Option<(TransactionSigned, TransactionMeta)> { + ) -> Option<(N::SignedTx, TransactionMeta)> + where + N::SignedTx: Encodable2718, + { for block_state in self.canonical_chain() { if let Some((index, tx)) = block_state .block_ref() .block() .body .transactions() + .iter() .enumerate() - .find(|(_, tx)| tx.hash() == tx_hash) + .find(|(_, tx)| tx.trie_hash() == tx_hash) { let meta = TransactionMeta { tx_hash, index: index as u64, block_hash: block_state.hash(), - block_number: block_state.block_ref().block.number, - base_fee: block_state.block_ref().block.header.base_fee_per_gas, - timestamp: block_state.block_ref().block.timestamp, - excess_blob_gas: block_state.block_ref().block.excess_blob_gas, + block_number: block_state.block_ref().block.number(), + base_fee: block_state.block_ref().block.header.base_fee_per_gas(), + timestamp: block_state.block_ref().block.timestamp(), + excess_blob_gas: block_state.block_ref().block.excess_blob_gas(), }; return Some((tx.clone(), meta)) } @@ -616,14 +637,15 @@ impl BlockState { } /// Returns the block with senders for the state. - pub fn block_with_senders(&self) -> BlockWithSenders { + pub fn block_with_senders(&self) -> BlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); - BlockWithSenders::new_unchecked(block.unseal(), senders) + let (header, body) = block.split(); + BlockWithSenders::new_unchecked(N::Block::new(header.unseal(), body), senders) } /// Returns the sealed block with senders for the state. - pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { + pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); SealedBlockWithSenders { block, senders } @@ -636,13 +658,13 @@ impl BlockState { /// Returns the block number of executed block that determines the state. pub fn number(&self) -> u64 { - self.block.block().number + self.block.block().number() } /// Returns the state root after applying the executed block that determines /// the state. pub fn state_root(&self) -> B256 { - self.block.block().header.state_root + self.block.block().header.state_root() } /// Returns the `Receipts` of executed block that determines the state. @@ -728,14 +750,18 @@ impl BlockState { } /// Tries to find a transaction by [`TxHash`] in the chain ending at this block. - pub fn transaction_on_chain(&self, hash: TxHash) -> Option { + pub fn transaction_on_chain(&self, hash: TxHash) -> Option + where + N::SignedTx: Encodable2718, + { self.chain().find_map(|block_state| { block_state .block_ref() .block() .body .transactions() - .find(|tx| tx.hash() == hash) + .iter() + .find(|tx| tx.trie_hash() == hash) .cloned() }) } @@ -744,24 +770,28 @@ impl BlockState { pub fn transaction_meta_on_chain( &self, tx_hash: TxHash, - ) -> Option<(TransactionSigned, TransactionMeta)> { + ) -> Option<(N::SignedTx, TransactionMeta)> + where + N::SignedTx: Encodable2718, + { self.chain().find_map(|block_state| { block_state .block_ref() .block() .body .transactions() + .iter() .enumerate() - .find(|(_, tx)| tx.hash() == tx_hash) + .find(|(_, tx)| tx.trie_hash() == tx_hash) .map(|(index, tx)| { let meta = TransactionMeta { tx_hash, index: index as u64, block_hash: block_state.hash(), - block_number: block_state.block_ref().block.number, - base_fee: block_state.block_ref().block.header.base_fee_per_gas, - timestamp: block_state.block_ref().block.timestamp, - excess_blob_gas: block_state.block_ref().block.excess_blob_gas, + block_number: block_state.block_ref().block.number(), + base_fee: block_state.block_ref().block.header.base_fee_per_gas(), + timestamp: block_state.block_ref().block.timestamp(), + excess_blob_gas: block_state.block_ref().block.excess_blob_gas(), }; (tx.clone(), meta) }) @@ -773,7 +803,7 @@ impl BlockState { #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct ExecutedBlock { /// Sealed block the rest of fields refer to. - pub block: Arc, + pub block: Arc>, /// Block's senders. pub senders: Arc>, /// Block's execution outcome. @@ -787,7 +817,7 @@ pub struct ExecutedBlock { impl ExecutedBlock { /// [`ExecutedBlock`] constructor. pub const fn new( - block: Arc, + block: Arc>, senders: Arc>, execution_output: Arc>, hashed_state: Arc, @@ -797,7 +827,7 @@ impl ExecutedBlock { } /// Returns a reference to the executed block. - pub fn block(&self) -> &SealedBlock { + pub fn block(&self) -> &SealedBlockFor { &self.block } @@ -809,7 +839,7 @@ impl ExecutedBlock { /// Returns a [`SealedBlockWithSenders`] /// /// Note: this clones the block and senders. - pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { + pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { SealedBlockWithSenders { block: (*self.block).clone(), senders: (*self.senders).clone() } } @@ -847,7 +877,7 @@ pub enum NewCanonicalChain { }, } -impl NewCanonicalChain { +impl> NewCanonicalChain { /// Returns the length of the new chain. pub fn new_block_count(&self) -> usize { match self { @@ -900,7 +930,7 @@ impl NewCanonicalChain { /// /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least /// 1 new block. - pub fn tip(&self) -> &SealedBlock { + pub fn tip(&self) -> &SealedBlockFor { match self { Self::Commit { new } | Self::Reorg { new, .. } => { new.last().expect("non empty blocks").block() @@ -922,7 +952,9 @@ mod tests { AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; - use reth_trie::{AccountProof, HashedStorage, MultiProof, StorageProof, TrieInput}; + use reth_trie::{ + AccountProof, HashedStorage, MultiProof, StorageMultiProof, StorageProof, TrieInput, + }; fn create_mock_state( test_block_builder: &mut TestBlockBuilder, @@ -1032,6 +1064,15 @@ mod tests { ) -> ProviderResult { Ok(StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(StorageMultiProof::empty()) + } } impl StateProofProvider for MockStateProvider { diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 88cd411d38b2..c84bd8c93f06 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -1,4 +1,5 @@ use super::ExecutedBlock; +use alloy_consensus::BlockHeader; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, @@ -11,7 +12,8 @@ use reth_storage_api::{ StorageRootProvider, }; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, TrieInput, }; use std::sync::OnceLock; @@ -74,7 +76,7 @@ macro_rules! impl_state_provider { impl $($tokens)* BlockHashReader for $type { fn block_hash(&self, number: BlockNumber) -> ProviderResult> { for block in &self.in_memory { - if block.block.number == number { + if block.block.number() == number { return Ok(Some(block.block.hash())) } } @@ -91,9 +93,9 @@ macro_rules! impl_state_provider { let mut earliest_block_number = None; let mut in_memory_hashes = Vec::new(); for block in &self.in_memory { - if range.contains(&block.block.number) { + if range.contains(&block.block.number()) { in_memory_hashes.insert(0, block.block.hash()); - earliest_block_number = Some(block.block.number); + earliest_block_number = Some(block.block.number()); } } @@ -167,6 +169,20 @@ macro_rules! impl_state_provider { hashed_storage.extend(&storage); self.historical.storage_proof(address, slot, hashed_storage) } + + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_multiproof(address, slots, hashed_storage) + } } impl $($tokens)* StateProofProvider for $type { diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 03d740d3d133..c4e0415436a5 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -1,9 +1,10 @@ //! Canonical chain state notification trait and types. -use auto_impl::auto_impl; +use alloy_eips::eip2718::Encodable2718; use derive_more::{Deref, DerefMut}; use reth_execution_types::{BlockReceipts, Chain}; use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_storage_api::NodePrimitivesProvider; use std::{ pin::Pin, sync::Arc, @@ -25,21 +26,30 @@ pub type CanonStateNotificationSender = broadcast::Sender>; /// A type that allows to register chain related event subscriptions. -#[auto_impl(&, Arc)] -pub trait CanonStateSubscriptions: Send + Sync { +pub trait CanonStateSubscriptions: NodePrimitivesProvider + Send + Sync { /// Get notified when a new canonical chain was imported. /// /// A canonical chain be one or more blocks, a reorg or a revert. - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications; + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications; /// Convenience method to get a stream of [`CanonStateNotification`]. - fn canonical_state_stream(&self) -> CanonStateNotificationStream { + fn canonical_state_stream(&self) -> CanonStateNotificationStream { CanonStateNotificationStream { st: BroadcastStream::new(self.subscribe_to_canonical_state()), } } } +impl CanonStateSubscriptions for &T { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + (*self).subscribe_to_canonical_state() + } + + fn canonical_state_stream(&self) -> CanonStateNotificationStream { + (*self).canonical_state_stream() + } +} + /// A Stream of [`CanonStateNotification`]. #[derive(Debug)] #[pin_project::pin_project] @@ -113,7 +123,7 @@ impl CanonStateNotification { /// /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least /// 1 new block. - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { match self { Self::Commit { new } | Self::Reorg { new, .. } => new.tip(), } @@ -124,7 +134,10 @@ impl CanonStateNotification { /// /// The boolean in the tuple (2nd element) denotes whether the receipt was from the reverted /// chain segment. - pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> { + pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> + where + N::SignedTx: Encodable2718, + { let mut receipts = Vec::new(); // get old receipts @@ -142,7 +155,9 @@ impl CanonStateNotification { /// Wrapper around a broadcast receiver that receives fork choice notifications. #[derive(Debug, Deref, DerefMut)] -pub struct ForkChoiceNotifications(pub watch::Receiver>); +pub struct ForkChoiceNotifications( + pub watch::Receiver>>, +); /// A trait that allows to register to fork choice related events /// and get notified when a new fork choice is available. diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index af0c363fe486..f6b0a4f17723 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -14,9 +14,11 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, NodePrimitives, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, - SealedHeader, Transaction, TransactionSigned, TransactionSignedEcRecovered, + BlockBody, EthPrimitives, NodePrimitives, Receipt, Receipts, SealedBlock, + SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, + TransactionSignedEcRecovered, }; +use reth_storage_api::NodePrimitivesProvider; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; use std::{ @@ -139,7 +141,9 @@ impl TestBlockBuilder { gas_limit: self.chain_spec.max_gas_limit, mix_hash: B256::random(), base_fee_per_gas: Some(INITIAL_BASE_FEE), - transactions_root: calculate_transaction_root(&transactions), + transactions_root: calculate_transaction_root( + &transactions.clone().into_iter().map(|tx| tx.into_signed()).collect::>(), + ), receipts_root: calculate_receipt_root(&receipts), beneficiary: Address::random(), state_root: state_root_unhashed(HashMap::from([( @@ -312,6 +316,10 @@ impl TestCanonStateSubscriptions { } } +impl NodePrimitivesProvider for TestCanonStateSubscriptions { + type Primitives = EthPrimitives; +} + impl CanonStateSubscriptions for TestCanonStateSubscriptions { /// Sets up a broadcast channel with a buffer size of 100. fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 251e01a105a9..b2ad1452aa46 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -10,12 +10,12 @@ use reth_db::{init_db, open_db_read_only, DatabaseEnv}; use reth_db_common::init::init_genesis; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; -use reth_node_api::FullNodePrimitives; use reth_node_builder::{NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_core::{ args::{DatabaseArgs, DatadirArgs}, dirs::{ChainPath, DataDirPath}, }; +use reth_primitives::EthPrimitives; use reth_provider::{ providers::{NodeTypesForProvider, StaticFileProvider}, ProviderFactory, StaticFileProviderFactory, @@ -196,12 +196,10 @@ impl AccessRights { /// Helper trait with a common set of requirements for the /// [`NodeTypes`](reth_node_builder::NodeTypes) in CLI. pub trait CliNodeTypes: - NodeTypesWithEngine - + NodeTypesForProvider> + NodeTypesWithEngine + NodeTypesForProvider { } impl CliNodeTypes for N where - N: NodeTypesWithEngine - + NodeTypesForProvider> + N: NodeTypesWithEngine + NodeTypesForProvider { } diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index 2aa2483fdda3..bdade252a668 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -11,7 +11,7 @@ use reth_provider::{ BlockNumReader, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; -use std::{fs::File, io::BufReader, path::PathBuf, str::FromStr}; +use std::{io::BufReader, path::PathBuf, str::FromStr}; use tracing::info; pub mod without_evm; @@ -115,8 +115,7 @@ impl> InitStateC info!(target: "reth::cli", "Initiating state dump"); - let file = File::open(self.state)?; - let reader = BufReader::new(file); + let reader = BufReader::new(reth_fs_util::open(self.state)?); let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index e3594a593638..22236d14c76b 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -33,7 +33,7 @@ pub fn setup_without_evm( where Provider: StaticFileProviderFactory + StageCheckpointWriter - + BlockWriter, + + BlockWriter>, { info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); @@ -64,7 +64,8 @@ fn append_first_block( total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: BlockWriter + StaticFileProviderFactory, + Provider: BlockWriter> + + StaticFileProviderFactory, { provider_rw.insert_block( SealedBlockWithSenders::new(SealedBlock::new(header.clone(), Default::default()), vec![]) diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 19704cb1c2fe..000c1b542dbf 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -7,6 +7,7 @@ use reth_db_api::{ }; use reth_db_common::DbTool; use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider}; +use reth_node_api::NodePrimitives; use reth_node_builder::NodeTypesWithDB; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ @@ -25,7 +26,13 @@ pub(crate) async fn dump_execution_stage( executor: E, ) -> eyre::Result<()> where - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + DB = Arc, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, E: BlockExecutorProvider, { let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; @@ -131,7 +138,14 @@ fn import_tables_with_range( /// Dry-run an unwind to FROM block, so we can get the `PlainStorageState` and /// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. -fn unwind_and_copy( +fn unwind_and_copy< + N: ProviderNodeTypes< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, +>( db_tool: &DbTool, from: u64, tip_block_number: u64, @@ -168,7 +182,12 @@ fn dry_run( executor: E, ) -> eyre::Result<()> where - N: ProviderNodeTypes, + N: ProviderNodeTypes< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, E: BlockExecutorProvider, { info!(target: "reth::cli", "Executing stage. [dry-run]"); diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index f2688c365e1e..ce187437218a 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -9,6 +9,7 @@ use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; +use reth_node_api::NodePrimitives; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -24,7 +25,15 @@ use reth_stages::{ }; use tracing::info; -pub(crate) async fn dump_merkle_stage>>( +pub(crate) async fn dump_merkle_stage< + N: ProviderNodeTypes< + DB = Arc, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, +>( db_tool: &DbTool, from: BlockNumber, to: BlockNumber, @@ -67,7 +76,14 @@ pub(crate) async fn dump_merkle_stage } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -fn unwind_and_copy( +fn unwind_and_copy< + N: ProviderNodeTypes< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, +>( db_tool: &DbTool, range: (u64, u64), tip_block_number: u64, diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 65994557c06d..a7e326848391 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -17,6 +17,7 @@ reth-blockchain-tree-api.workspace = true reth-codecs.workspace = true reth-db-api.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-stages-api.workspace = true reth-errors.workspace = true reth-provider.workspace = true diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index b76b85374cd8..b503e1e102af 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -1,7 +1,8 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; use reth_engine_primitives::ForkchoiceStatus; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlock, SealedHeader}; use std::{ fmt::{Display, Formatter, Result}, sync::Arc, @@ -10,23 +11,23 @@ use std::{ /// Events emitted by [`crate::BeaconConsensusEngine`]. #[derive(Clone, Debug)] -pub enum BeaconConsensusEngineEvent { +pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), /// A block was added to the fork chain. - ForkBlockAdded(Arc, Duration), + ForkBlockAdded(Arc>, Duration), /// A block was added to the canonical chain, and the elapsed time validating the block - CanonicalBlockAdded(Arc, Duration), + CanonicalBlockAdded(Arc>, Duration), /// A canonical chain was committed, and the elapsed time committing the data - CanonicalChainCommitted(Box, Duration), + CanonicalChainCommitted(Box>, Duration), /// The consensus engine is involved in live sync, and has specific progress LiveSyncProgress(ConsensusEngineLiveSyncProgress), } -impl BeaconConsensusEngineEvent { +impl BeaconConsensusEngineEvent { /// Returns the canonical header if the event is a /// [`BeaconConsensusEngineEvent::CanonicalChainCommitted`]. - pub const fn canonical_header(&self) -> Option<&SealedHeader> { + pub const fn canonical_header(&self) -> Option<&SealedHeader> { match self { Self::CanonicalChainCommitted(header, _) => Some(header), _ => None, @@ -34,7 +35,10 @@ impl BeaconConsensusEngineEvent { } } -impl Display for BeaconConsensusEngineEvent { +impl Display for BeaconConsensusEngineEvent +where + N: NodePrimitives, +{ fn fmt(&self, f: &mut Formatter<'_>) -> Result { match self { Self::ForkchoiceUpdated(state, status) => { diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index b8d80b0ceeae..0a72129a6274 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -6,7 +6,7 @@ use reth_metrics::{ }; use reth_primitives::SealedHeader; use schnellru::{ByLength, LruMap}; -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use tracing::warn; /// The max hit counter for invalid headers in the cache before it is forcefully evicted. @@ -17,20 +17,20 @@ const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; /// Keeps track of invalid headers. #[derive(Debug)] -pub struct InvalidHeaderCache { +pub struct InvalidHeaderCache { /// This maps a header hash to a reference to its invalid ancestor. - headers: LruMap, + headers: LruMap>, /// Metrics for the cache. metrics: InvalidHeaderCacheMetrics, } -impl InvalidHeaderCache { +impl InvalidHeaderCache { /// Invalid header cache constructor. pub fn new(max_length: u32) -> Self { Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } } - fn insert_entry(&mut self, hash: B256, header: Arc
) { + fn insert_entry(&mut self, hash: B256, header: Arc) { self.headers.insert(hash, HeaderEntry { header, hit_count: 0 }); } @@ -38,7 +38,7 @@ impl InvalidHeaderCache { /// /// If this is called, the hit count for the entry is incremented. /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub fn get(&mut self, hash: &B256) -> Option> { + pub fn get(&mut self, hash: &B256) -> Option> { { let entry = self.headers.get(hash)?; entry.hit_count += 1; @@ -53,11 +53,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid block into the cache, with a given invalid ancestor. - pub fn insert_with_invalid_ancestor( - &mut self, - header_hash: B256, - invalid_ancestor: Arc
, - ) { + pub fn insert_with_invalid_ancestor(&mut self, header_hash: B256, invalid_ancestor: Arc) { if self.get(&header_hash).is_none() { warn!(target: "consensus::engine", hash=?header_hash, ?invalid_ancestor, "Bad block with existing invalid ancestor"); self.insert_entry(header_hash, invalid_ancestor); @@ -69,7 +65,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid ancestor into the map. - pub fn insert(&mut self, invalid_ancestor: SealedHeader) { + pub fn insert(&mut self, invalid_ancestor: SealedHeader) { if self.get(&invalid_ancestor.hash()).is_none() { let hash = invalid_ancestor.hash(); let header = invalid_ancestor.unseal(); @@ -83,11 +79,11 @@ impl InvalidHeaderCache { } } -struct HeaderEntry { +struct HeaderEntry { /// Keeps track how many times this header has been hit. hit_count: u8, /// The actually header entry - header: Arc
, + header: Arc, } /// Metrics for the invalid headers cache. diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 0fedbdd452de..7a894f08e1c7 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,4 +1,4 @@ -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ @@ -21,12 +21,12 @@ use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, EthBlockClient, }; -use reth_node_types::NodeTypesWithEngine; +use reth_node_types::{Block, BlockTy, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Head, SealedBlock, SealedHeader}; +use reth_primitives::{EthPrimitives, Head, SealedBlock, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, @@ -84,9 +84,15 @@ const MAX_INVALID_HEADERS: u32 = 512u32; pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; /// Helper trait expressing requirements for node types to be used in engine. -pub trait EngineNodeTypes: ProviderNodeTypes + NodeTypesWithEngine {} +pub trait EngineNodeTypes: + ProviderNodeTypes + NodeTypesWithEngine +{ +} -impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine {} +impl EngineNodeTypes for T where + T: ProviderNodeTypes + NodeTypesWithEngine +{ +} /// Represents a pending forkchoice update. /// @@ -228,7 +234,7 @@ impl BeaconConsensusEngine where N: EngineNodeTypes, BT: BlockchainTreeEngine - + BlockReader + + BlockReader> + BlockIdReader + CanonChainTracker + StageCheckpointReader @@ -946,7 +952,7 @@ where .blockchain .find_block_by_hash(safe_block_hash, BlockSource::Any)? .ok_or(ProviderError::UnknownBlockHash(safe_block_hash))?; - self.blockchain.set_safe(SealedHeader::new(safe.header, safe_block_hash)); + self.blockchain.set_safe(SealedHeader::new(safe.split().0, safe_block_hash)); } Ok(()) } @@ -966,9 +972,9 @@ where .blockchain .find_block_by_hash(finalized_block_hash, BlockSource::Any)? .ok_or(ProviderError::UnknownBlockHash(finalized_block_hash))?; - self.blockchain.finalize_block(finalized.number)?; + self.blockchain.finalize_block(finalized.header().number())?; self.blockchain - .set_finalized(SealedHeader::new(finalized.header, finalized_block_hash)); + .set_finalized(SealedHeader::new(finalized.split().0, finalized_block_hash)); } Ok(()) } @@ -1798,7 +1804,7 @@ where N: EngineNodeTypes, Client: EthBlockClient + 'static, BT: BlockchainTreeEngine - + BlockReader + + BlockReader> + BlockIdReader + CanonChainTracker + StageCheckpointReader @@ -1992,6 +1998,7 @@ mod tests { use assert_matches::assert_matches; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_node_types::FullNodePrimitives; + use reth_primitives::BlockExt; use reth_provider::{BlockWriter, ProviderFactory, StorageLocation}; use reth_rpc_types_compat::engine::payload::block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; @@ -2878,7 +2885,7 @@ mod tests { block1.header.set_difficulty( MAINNET.fork(EthereumHardfork::Paris).ttd().unwrap() - U256::from(1), ); - block1 = block1.unseal().seal_slow(); + block1 = block1.unseal::().seal_slow(); let (block2, exec_result2) = data.blocks[1].clone(); let mut block2 = block2.unseal().block; block2.body.withdrawals = None; diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index b140846981e9..861aeebf1eb8 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -4,13 +4,14 @@ use crate::{ engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, EthBeaconConsensus, }; +use alloy_consensus::Header; use alloy_primitives::{BlockNumber, B256}; use futures::FutureExt; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, EthBlockClient, }; -use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlock}; +use reth_primitives::{BlockBody, EthPrimitives, NodePrimitives, SealedBlock}; use reth_provider::providers::ProviderNodeTypes; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; use reth_tasks::TaskSpawner; @@ -345,17 +346,25 @@ where /// A wrapper type around [`SealedBlock`] that implements the [Ord] trait by block number. #[derive(Debug, Clone, PartialEq, Eq)] -struct OrderedSealedBlock(SealedBlock); +struct OrderedSealedBlock(SealedBlock); -impl PartialOrd for OrderedSealedBlock { +impl PartialOrd for OrderedSealedBlock +where + H: reth_primitives_traits::BlockHeader + 'static, + B: reth_primitives_traits::BlockBody + 'static, +{ fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedSealedBlock { +impl Ord for OrderedSealedBlock +where + H: reth_primitives_traits::BlockHeader + 'static, + B: reth_primitives_traits::BlockBody + 'static, +{ fn cmp(&self, other: &Self) -> Ordering { - self.0.number.cmp(&other.0.number) + self.0.number().cmp(&other.0.number()) } } diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index c83312577e9e..272adbb9297a 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -19,6 +19,7 @@ reth-consensus.workspace = true # ethereum alloy-primitives.workspace = true revm-primitives.workspace = true +reth-primitives-traits.workspace = true alloy-consensus.workspace = true alloy-eips.workspace = true diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 6042f16bf50f..b5314cdd1ec9 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,19 +1,21 @@ //! Collection of methods for block validation. -use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, Header}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader, Header}; use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{BlockBody, EthereumHardfork, GotExpected, SealedBlock, SealedHeader}; +use reth_primitives::{ + BlockBody, BlockBodyTxExt, EthereumHardfork, GotExpected, SealedBlock, SealedHeader, +}; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. #[inline] -pub const fn validate_header_gas(header: &Header) -> Result<(), ConsensusError> { - if header.gas_used > header.gas_limit { +pub fn validate_header_gas(header: &H) -> Result<(), ConsensusError> { + if header.gas_used() > header.gas_limit() { return Err(ConsensusError::HeaderGasUsedExceedsGasLimit { - gas_used: header.gas_used, - gas_limit: header.gas_limit, + gas_used: header.gas_used(), + gas_limit: header.gas_limit(), }) } Ok(()) @@ -21,12 +23,12 @@ pub const fn validate_header_gas(header: &Header) -> Result<(), ConsensusError> /// Ensure the EIP-1559 base fee is set if the London hardfork is active. #[inline] -pub fn validate_header_base_fee( - header: &Header, +pub fn validate_header_base_fee( + header: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number) && - header.base_fee_per_gas.is_none() + if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number()) && + header.base_fee_per_gas().is_none() { return Err(ConsensusError::BaseFeeMissing) } @@ -39,15 +41,16 @@ pub fn validate_header_base_fee( /// /// [EIP-4895]: https://eips.ethereum.org/EIPS/eip-4895 #[inline] -pub fn validate_shanghai_withdrawals(block: &SealedBlock) -> Result<(), ConsensusError> { - let withdrawals = - block.body.withdrawals.as_ref().ok_or(ConsensusError::BodyWithdrawalsMissing)?; +pub fn validate_shanghai_withdrawals( + block: &SealedBlock, +) -> Result<(), ConsensusError> { + let withdrawals = block.body.withdrawals().ok_or(ConsensusError::BodyWithdrawalsMissing)?; let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); let header_withdrawals_root = - block.withdrawals_root.as_ref().ok_or(ConsensusError::WithdrawalsRootMissing)?; + block.withdrawals_root().ok_or(ConsensusError::WithdrawalsRootMissing)?; if withdrawals_root != *header_withdrawals_root { return Err(ConsensusError::BodyWithdrawalsRootDiff( - GotExpected { got: withdrawals_root, expected: *header_withdrawals_root }.into(), + GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), )); } Ok(()) diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index e33690d45861..9a11a3b11095 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -11,22 +11,31 @@ repository.workspace = true workspace = true [dependencies] -reth.workspace = true reth-chainspec.workspace = true reth-tracing.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-rpc-layer.workspace = true +reth-rpc-server-types.workspace = true +reth-rpc-eth-api.workspace = true +reth-rpc-api = { workspace = true, features = ["client"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-primitives.workspace = true reth-provider.workspace = true +reth-network-api.workspace = true +reth-network.workspace = true reth-node-api.workspace = true +reth-node-core.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-stages-types.workspace = true reth-network-peers.workspace = true reth-engine-local.workspace = true +reth-tasks.workspace = true + +# currently need to enable this for workspace level +reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } # rpc jsonrpsee.workspace = true @@ -35,6 +44,7 @@ url.workspace = true # ethereum alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true futures-util.workspace = true diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index cfa245e1de01..8c0f03bafd3a 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -1,20 +1,17 @@ use crate::traits::PayloadEnvelopeExt; use alloy_primitives::B256; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadStatusEnum}; use jsonrpsee::{ core::client::ClientT, http_client::{transport::HttpBackend, HttpClient}, }; -use reth::{ - api::{EngineTypes, PayloadBuilderAttributes}, - providers::CanonStateNotificationStream, - rpc::{ - api::EngineApiClient, - types::engine::{ForkchoiceState, PayloadStatusEnum}, - }, -}; use reth_chainspec::EthereumHardforks; +use reth_node_api::EngineTypes; use reth_node_builder::BuiltPayload; use reth_payload_builder::PayloadId; +use reth_payload_primitives::PayloadBuilderAttributes; +use reth_provider::CanonStateNotificationStream; +use reth_rpc_api::EngineApiClient; use reth_rpc_layer::AuthClientService; use std::{marker::PhantomData, sync::Arc}; @@ -83,7 +80,7 @@ impl EngineApiTestContext( attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + Node> + NodeTypesForProvider + NodeTypesWithEngine, + N: Default + Node> + NodeTypesForTree + NodeTypesWithEngine, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, >, N::AddOns: RethRpcAddOns>, - N::Primitives: FullNodePrimitives, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -125,7 +126,7 @@ pub async fn setup_engine( where N: Default + Node>>> - + NodeTypesWithEngine + + NodeTypesWithEngine + NodeTypesForProvider, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, @@ -134,11 +135,14 @@ where Network: PeersHandleProvider, >, >, - N::AddOns: RethRpcAddOns>>>, + N::AddOns: RethRpcAddOns>>> + + EngineValidatorAddOn< + Adapter>>, + Validator: EngineValidator, + >, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, - N::Primitives: FullNodePrimitives, { let tasks = TaskManager::current(); let exec = tasks.executor(); diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index 3f25915b35b4..2efc8d47f2d7 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -1,5 +1,7 @@ use futures_util::StreamExt; -use reth::network::{NetworkEvent, NetworkEventListenerProvider, PeersHandleProvider, PeersInfo}; +use reth_network_api::{ + test_utils::PeersHandleProvider, NetworkEvent, NetworkEventListenerProvider, PeersInfo, +}; use reth_network_peers::{NodeRecord, PeerId}; use reth_tokio_util::EventStream; use reth_tracing::tracing::info; diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index c3dff527eb20..b3eb641c1371 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,30 +1,28 @@ -use std::{marker::PhantomData, pin::Pin}; - +use crate::{ + engine_api::EngineApiTestContext, network::NetworkTestContext, payload::PayloadTestContext, + rpc::RpcTestContext, traits::PayloadEnvelopeExt, +}; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; +use alloy_rpc_types_engine::PayloadStatusEnum; use alloy_rpc_types_eth::BlockNumberOrTag; use eyre::Ok; use futures_util::Future; -use reth::{ - api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, - builder::FullNode, - network::PeersHandleProvider, - providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, - rpc::{ - api::eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, - types::engine::PayloadStatusEnum, - }, -}; use reth_chainspec::EthereumHardforks; -use reth_node_builder::{rpc::RethRpcAddOns, NodeTypes, NodeTypesWithEngine}; +use reth_network_api::test_utils::PeersHandleProvider; +use reth_node_api::{Block, EngineTypes, FullNodeComponents}; +use reth_node_builder::{rpc::RethRpcAddOns, FullNode, NodeTypes, NodeTypesWithEngine}; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; +use reth_primitives::EthPrimitives; +use reth_provider::{ + BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader, +}; +use reth_rpc_eth_api::helpers::{EthApiSpec, EthTransactions, TraceExt}; use reth_stages_types::StageId; +use std::{marker::PhantomData, pin::Pin}; use tokio_stream::StreamExt; use url::Url; -use crate::{ - engine_api::EngineApiTestContext, network::NetworkTestContext, payload::PayloadTestContext, - rpc::RpcTestContext, traits::PayloadEnvelopeExt, -}; - /// An helper struct to handle node actions #[allow(missing_debug_implementations)] pub struct NodeTestContext @@ -51,7 +49,11 @@ impl NodeTestContext where Engine: EngineTypes, Node: FullNodeComponents, - Node::Types: NodeTypesWithEngine, + Node::Types: NodeTypesWithEngine< + ChainSpec: EthereumHardforks, + Engine = Engine, + Primitives = EthPrimitives, + >, Node::Network: PeersHandleProvider, AddOns: RethRpcAddOns, { @@ -178,7 +180,7 @@ where if check { if let Some(latest_block) = self.inner.provider.block_by_number(number)? { - assert_eq!(latest_block.hash_slow(), expected_block_hash); + assert_eq!(latest_block.header().hash_slow(), expected_block_hash); break } assert!( @@ -216,7 +218,7 @@ where // get head block from notifications stream and verify the tx has been pushed to the // pool is actually present in the canonical block let head = self.engine_api.canonical_stream.next().await.unwrap(); - let tx = head.tip().transactions().next(); + let tx = head.tip().transactions().first(); assert_eq!(tx.unwrap().hash().as_slice(), tip_tx_hash.as_slice()); loop { @@ -225,10 +227,10 @@ where if let Some(latest_block) = self.inner.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)? { - if latest_block.number == block_number { + if latest_block.header().number() == block_number { // make sure the block hash we submitted via FCU engine api is the new latest // block using an RPC call - assert_eq!(latest_block.hash_slow(), block_hash); + assert_eq!(latest_block.header().hash_slow(), block_hash); break } } diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 7828f61c2afb..45889a171c1a 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -1,8 +1,7 @@ use futures_util::StreamExt; -use reth::api::BuiltPayload; use reth_payload_builder::{PayloadBuilderHandle, PayloadId}; use reth_payload_builder_primitives::{Events, PayloadBuilder}; -use reth_payload_primitives::{PayloadBuilderAttributes, PayloadTypes}; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadTypes}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 7b7dabdf2404..8399a482dfd6 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,18 +1,15 @@ use alloy_consensus::TxEnvelope; use alloy_network::eip2718::Decodable2718; use alloy_primitives::{Bytes, B256}; -use reth::{ - builder::{rpc::RpcRegistry, FullNodeComponents}, - rpc::api::{ - eth::{ - helpers::{EthApiSpec, EthTransactions, TraceExt}, - EthApiTypes, - }, - DebugApiServer, - }, -}; +use alloy_rlp::Encodable; use reth_chainspec::EthereumHardforks; -use reth_node_builder::NodeTypes; +use reth_node_api::{FullNodeComponents, NodePrimitives}; +use reth_node_builder::{rpc::RpcRegistry, NodeTypes}; +use reth_rpc_api::DebugApiServer; +use reth_rpc_eth_api::{ + helpers::{EthApiSpec, EthTransactions, TraceExt}, + EthApiTypes, +}; #[allow(missing_debug_implementations)] pub struct RpcTestContext { @@ -21,7 +18,12 @@ pub struct RpcTestContext { impl RpcTestContext where - Node: FullNodeComponents>, + Node: FullNodeComponents< + Types: NodeTypes< + ChainSpec: EthereumHardforks, + Primitives: NodePrimitives, + >, + >, EthApi: EthApiSpec + EthTransactions + TraceExt, { /// Injects a raw transaction into the node tx pool via RPC server diff --git a/crates/e2e-test-utils/src/traits.rs b/crates/e2e-test-utils/src/traits.rs index d14445370d41..6d9bf14dbc12 100644 --- a/crates/e2e-test-utils/src/traits.rs +++ b/crates/e2e-test-utils/src/traits.rs @@ -1,6 +1,7 @@ -use alloy_rpc_types_engine::ExecutionPayloadEnvelopeV4; +use alloy_rpc_types_engine::{ + ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV3, +}; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; -use reth::rpc::types::engine::{ExecutionPayloadEnvelopeV3, ExecutionPayloadV3}; /// The execution payload envelope type. pub trait PayloadEnvelopeExt: Send + Sync + std::fmt::Debug { diff --git a/crates/engine/invalid-block-hooks/Cargo.toml b/crates/engine/invalid-block-hooks/Cargo.toml index 35afad8a4d8e..2b215954e3e4 100644 --- a/crates/engine/invalid-block-hooks/Cargo.toml +++ b/crates/engine/invalid-block-hooks/Cargo.toml @@ -16,11 +16,12 @@ reth-chainspec.workspace = true reth-engine-primitives.workspace = true reth-evm.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-revm = { workspace = true, features = ["serde"] } reth-rpc-api = { workspace = true, features = ["client"] } reth-tracing.workspace = true -reth-trie = { workspace = true, features = ["serde"] } +reth-trie.workspace = true # alloy alloy-primitives.workspace = true diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index a1affdeda349..110315292204 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,5 +1,3 @@ -use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; - use alloy_consensus::Header; use alloy_primitives::{keccak256, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; @@ -11,17 +9,18 @@ use reth_evm::{ state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, }; use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ - db::states::bundle_state::BundleRetention, - primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg}, - DatabaseCommit, StateBuilder, + db::states::bundle_state::BundleRetention, primitives::EnvWithHandlerCfg, DatabaseCommit, + StateBuilder, }; use reth_rpc_api::DebugApiClient; use reth_scroll_execution::FinalizeExecution; use reth_tracing::tracing::warn; use reth_trie::{updates::TrieUpdates, HashedPostState, HashedStorage}; use serde::Serialize; +use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; /// Generates a witness for the given block and saves it to a file. #[derive(Debug)] @@ -76,9 +75,7 @@ where let mut db = StateBuilder::new().with_database(state).with_bundle_update().build(); // Setup environment for the execution. - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, block.header(), U256::MAX); + let (cfg, block_env) = self.evm_config.cfg_and_block_env(block.header(), U256::MAX); // Setup EVM let mut evm = self.evm_config.evm_with_env( diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index a1b74d13fee7..d8a66e65e04c 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -16,12 +16,12 @@ reth-consensus.workspace = true reth-engine-primitives.workspace = true reth-engine-service.workspace = true reth-engine-tree.workspace = true +reth-node-types.workspace = true reth-evm.workspace = true reth-ethereum-engine-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-rpc-types-compat.workspace = true diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 3a0f5a2f1925..a5c7cf4d4c60 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -210,12 +210,13 @@ where let block = payload.block(); - let cancun_fields = - self.provider.chain_spec().is_cancun_active_at_timestamp(block.timestamp).then(|| { - CancunPayloadFields { - parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), - versioned_hashes: block.blob_versioned_hashes().into_iter().copied().collect(), - } + let cancun_fields = self + .provider + .chain_spec() + .is_cancun_active_at_timestamp(block.timestamp) + .then(|| CancunPayloadFields { + parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), + versioned_hashes: block.body.blob_versioned_hashes().into_iter().copied().collect(), }); let (tx, rx) = oneshot::channel(); diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 3575bc133c6c..5838cb89116b 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -19,7 +19,7 @@ use futures_util::{Stream, StreamExt}; use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; use reth_consensus::Consensus; -use reth_engine_primitives::BeaconEngineMessage; +use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_service::service::EngineMessageStream; use reth_engine_tree::{ chain::{ChainEvent, HandlerEvent}, @@ -31,9 +31,9 @@ use reth_engine_tree::{ tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; use reth_evm::execute::BlockExecutorProvider; +use reth_node_types::BlockTy; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes}; -use reth_payload_validator::ExecutionPayloadValidator; use reth_provider::{providers::BlockchainProvider2, ChainSpecProvider, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::MetricEventsSender; @@ -63,13 +63,14 @@ where { /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new( consensus: Arc, executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, + payload_validator: V, tree_config: TreeConfig, invalid_block_hook: Box, sync_metrics_tx: MetricEventsSender, @@ -80,6 +81,7 @@ where ) -> Self where B: PayloadAttributesBuilder<::PayloadAttributes>, + V: EngineValidator>, { let chain_spec = provider.chain_spec(); let engine_kind = @@ -87,11 +89,9 @@ where let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - let payload_validator = ExecutionPayloadValidator::new(chain_spec); - let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - let (to_tree_tx, from_tree) = EngineApiTreeHandler::spawn_new( + let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( blockchain_db.clone(), executor_factory, consensus, diff --git a/crates/engine/primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml index 94f97e308f41..97d2959b3294 100644 --- a/crates/engine/primitives/Cargo.toml +++ b/crates/engine/primitives/Cargo.toml @@ -16,11 +16,13 @@ reth-execution-types.workspace = true reth-payload-primitives.workspace = true reth-payload-builder-primitives.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-trie.workspace = true reth-errors.workspace = true # alloy alloy-primitives.workspace = true +alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true # async diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 3429edc28675..89fb7459b7de 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -9,6 +9,9 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod error; + +use alloy_consensus::BlockHeader; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; pub use error::BeaconOnNewPayloadError; mod forkchoice; @@ -24,6 +27,9 @@ pub use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, }; +use reth_payload_primitives::{InvalidPayloadAttributesError, PayloadAttributes}; +use reth_primitives::SealedBlockFor; +use reth_primitives_traits::Block; use serde::{de::DeserializeOwned, ser::Serialize}; /// This type defines the versioned types of the engine API. @@ -74,8 +80,11 @@ pub trait EngineTypes: + 'static; } -/// Type that validates the payloads sent to the engine. +/// Type that validates the payloads processed by the engine. pub trait EngineValidator: Clone + Send + Sync + Unpin + 'static { + /// The block type used by the engine. + type Block: Block; + /// Validates the presence or exclusion of fork-specific fields based on the payload attributes /// and the message version. fn validate_version_specific_fields( @@ -90,4 +99,38 @@ pub trait EngineValidator: Clone + Send + Sync + Unpin + 'st version: EngineApiMessageVersion, attributes: &::PayloadAttributes, ) -> Result<(), EngineObjectValidationError>; + + /// Ensures that the given payload does not violate any consensus rules that concern the block's + /// layout. + /// + /// This function must convert the payload into the executable block and pre-validate its + /// fields. + /// + /// Implementers should ensure that the checks are done in the order that conforms with the + /// engine-API specification. + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError>; + + /// Validates the payload attributes with respect to the header. + /// + /// By default, this enforces that the payload attributes timestamp is greater than the + /// timestamp according to: + /// > 7. Client software MUST ensure that payloadAttributes.timestamp is greater than + /// > timestamp + /// > of a block referenced by forkchoiceState.headBlockHash. + /// + /// See also [engine api spec](https://github.com/ethereum/execution-apis/tree/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine) + fn validate_payload_attributes_against_header( + &self, + attr: &::PayloadAttributes, + header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + if attr.timestamp() <= header.timestamp() { + return Err(InvalidPayloadAttributesError::InvalidTimestamp); + } + Ok(()) + } } diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index 8359c453dccb..8854fd18879d 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -18,7 +18,6 @@ reth-engine-tree.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true reth-payload-builder.workspace = true -reth-payload-validator.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-stages-api.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 49233439e0a8..a54a2ef9e1a1 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -3,7 +3,7 @@ use pin_project::pin_project; use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; use reth_consensus::Consensus; -use reth_engine_primitives::BeaconEngineMessage; +use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, @@ -17,9 +17,8 @@ pub use reth_engine_tree::{ }; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::EthBlockClient; -use reth_node_types::NodeTypesWithEngine; +use reth_node_types::{BlockTy, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_validator::ExecutionPayloadValidator; use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::{MetricEventsSender, Pipeline}; @@ -65,7 +64,7 @@ where { /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new( consensus: Arc, executor_factory: E, chain_spec: Arc, @@ -77,10 +76,14 @@ where blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, + payload_validator: V, tree_config: TreeConfig, invalid_block_hook: Box, sync_metrics_tx: MetricEventsSender, - ) -> Self { + ) -> Self + where + V: EngineValidator>, + { let engine_kind = if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum }; @@ -88,11 +91,10 @@ where let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - let payload_validator = ExecutionPayloadValidator::new(chain_spec); let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - let (to_tree_tx, from_tree) = EngineApiTreeHandler::spawn_new( + let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( blockchain_db, executor_factory, consensus, @@ -148,7 +150,7 @@ mod tests { use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_engine_primitives::BeaconEngineMessage; use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; - use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; @@ -186,7 +188,7 @@ mod tests { let blockchain_db = BlockchainProvider2::with_latest(provider_factory.clone(), SealedHeader::default()) .unwrap(); - + let engine_payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs); let pruner = Pruner::new_with_factory(provider_factory.clone(), vec![], 0, 0, None, rx); @@ -204,6 +206,7 @@ mod tests { blockchain_db, pruner, PayloadBuilderHandle::new(tx), + engine_payload_validator, TreeConfig::default(), Box::new(NoopInvalidBlockHook::default()), sync_metrics_tx, diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 70be84a9f799..680b6933ebe6 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -13,47 +13,49 @@ workspace = true [dependencies] # reth reth-beacon-consensus.workspace = true -reth-blockchain-tree.workspace = true reth-blockchain-tree-api.workspace = true +reth-blockchain-tree.workspace = true reth-chain-state.workspace = true +reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true -reth-chainspec.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true -reth-payload-builder.workspace = true reth-payload-builder-primitives.workspace = true +reth-payload-builder.workspace = true reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true reth-primitives.workspace = true -reth-primitives-traits.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true -reth-trie.workspace = true +reth-trie-db.workspace = true reth-trie-parallel.workspace = true +reth-trie-sparse.workspace = true +reth-trie.workspace = true # alloy -alloy-primitives.workspace = true +alloy-consensus.workspace = true alloy-eips.workspace = true +alloy-primitives.workspace = true +alloy-rlp.workspace = true alloy-rpc-types-engine.workspace = true -alloy-consensus.workspace = true revm-primitives.workspace = true # common futures.workspace = true -tokio = { workspace = true, features = ["macros", "sync"] } thiserror.workspace = true +tokio = { workspace = true, features = ["macros", "sync"] } # metrics metrics.workspace = true reth-metrics = { workspace = true, features = ["common"] } # misc +rayon.workspace = true tracing.workspace = true # optional deps for test-utils @@ -64,25 +66,28 @@ reth-tracing = { workspace = true, optional = true } [dev-dependencies] # reth -reth-db = { workspace = true, features = ["test-utils"] } reth-chain-state = { workspace = true, features = ["test-utils"] } +reth-chainspec.workspace = true +reth-db = { workspace = true, features = ["test-utils"] } reth-ethereum-engine-primitives.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-exex-types.workspace = true reth-network-p2p = { workspace = true, features = ["test-utils"] } -reth-prune.workspace = true reth-prune-types.workspace = true +reth-prune.workspace = true reth-rpc-types-compat.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } reth-static-file.workspace = true +reth-testing-utils.workspace = true reth-tracing.workspace = true -reth-chainspec.workspace = true +# alloy alloy-rlp.workspace = true assert_matches.workspace = true criterion.workspace = true crossbeam-channel = "0.5.13" +rand.workspace = true [[bench]] name = "channel_perf" @@ -90,23 +95,24 @@ harness = false [features] test-utils = [ - "reth-db/test-utils", - "reth-chain-state/test-utils", - "reth-network-p2p/test-utils", - "reth-prune-types", - "reth-stages/test-utils", - "reth-static-file", - "reth-tracing", "reth-blockchain-tree/test-utils", + "reth-chain-state/test-utils", "reth-chainspec/test-utils", "reth-consensus/test-utils", + "reth-db/test-utils", "reth-evm/test-utils", + "reth-network-p2p/test-utils", "reth-payload-builder/test-utils", "reth-primitives/test-utils", + "reth-provider/test-utils", + "reth-prune-types", + "reth-prune-types?/test-utils", "reth-revm/test-utils", "reth-stages-api/test-utils", - "reth-provider/test-utils", + "reth-stages/test-utils", + "reth-static-file", + "reth-tracing", "reth-trie/test-utils", "reth-prune-types?/test-utils", - "reth-primitives-traits/test-utils", + "reth-trie-db/test-utils", ] diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 005d4e54399c..947d025e9ab6 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -10,7 +10,7 @@ use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chain_state::ExecutedBlock; use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; -use reth_primitives::SealedBlockWithSenders; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; use std::{ collections::HashSet, fmt::Display, @@ -270,25 +270,25 @@ impl From> for FromEngine { /// Event from the consensus engine. // TODO(mattsse): find a more appropriate name for this variant, consider phasing it out. - BeaconConsensus(BeaconConsensusEngineEvent), + BeaconConsensus(BeaconConsensusEngineEvent), /// Backfill action is needed. BackfillAction(BackfillAction), /// Block download is needed. Download(DownloadRequest), } -impl EngineApiEvent { +impl EngineApiEvent { /// Returns `true` if the event is a backfill action. pub const fn is_backfill_action(&self) -> bool { matches!(self, Self::BackfillAction(_)) } } -impl From for EngineApiEvent { - fn from(event: BeaconConsensusEngineEvent) -> Self { +impl From> for EngineApiEvent { + fn from(event: BeaconConsensusEngineEvent) -> Self { Self::BeaconConsensus(event) } } diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 86d18ceb48ce..950310b170f7 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -2,8 +2,7 @@ use crate::metrics::PersistenceMetrics; use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; -use reth_primitives::BlockBody; -use reth_primitives_traits::FullNodePrimitives; +use reth_primitives::EthPrimitives; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StaticFileProviderFactory, @@ -20,14 +19,9 @@ use tracing::{debug, error}; /// A helper trait with requirements for [`ProviderNodeTypes`] to be used within /// [`PersistenceService`]. -pub trait PersistenceNodeTypes: - ProviderNodeTypes> -{ -} -impl PersistenceNodeTypes for T where - T: ProviderNodeTypes> -{ -} +pub trait PersistenceNodeTypes: ProviderNodeTypes {} +impl PersistenceNodeTypes for T where T: ProviderNodeTypes {} + /// Writes parts of reth's in memory tree state to the database and static files. /// /// This is meant to be a spawned service that listens for various incoming persistence operations, @@ -153,7 +147,7 @@ impl PersistenceService { let provider_rw = self.provider.database_provider_rw()?; let static_file_provider = self.provider.static_file_provider(); - UnifiedStorageWriter::from(&provider_rw, &static_file_provider).save_blocks(&blocks)?; + UnifiedStorageWriter::from(&provider_rw, &static_file_provider).save_blocks(blocks)?; UnifiedStorageWriter::commit(provider_rw)?; } self.metrics.save_blocks_duration_seconds.record(start_time.elapsed()); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 39843377684c..5dc8039afe65 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1,10 +1,11 @@ use crate::{ backfill::{BackfillAction, BackfillSyncState}, chain::FromOrchestrator, - engine::{DownloadRequest, EngineApiEvent, FromEngine}, + engine::{DownloadRequest, EngineApiEvent, EngineApiKind, EngineApiRequest, FromEngine}, persistence::PersistenceHandle, + tree::metrics::EngineApiMetrics, }; -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -24,19 +25,20 @@ use reth_blockchain_tree::{ use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; -use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, PostExecutionInput}; use reth_engine_primitives::{ BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, - ForkchoiceStateTracker, OnForkChoiceUpdated, + EngineValidator, ForkchoiceStateTracker, OnForkChoiceUpdated, }; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; -use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Block, GotExpected, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{ + Block, EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockWithSenders, + SealedHeader, +}; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, ProviderError, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, @@ -51,6 +53,7 @@ use std::{ cmp::Ordering, collections::{btree_map, hash_map, BTreeMap, VecDeque}, fmt::Debug, + marker::PhantomData, ops::Bound, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, @@ -68,10 +71,6 @@ pub mod config; mod invalid_block_hook; mod metrics; mod persistence_state; -use crate::{ - engine::{EngineApiKind, EngineApiRequest}, - tree::metrics::EngineApiMetrics, -}; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; @@ -86,17 +85,17 @@ mod root; /// - This only stores blocks that are connected to the canonical chain. /// - All executed blocks are valid and have been executed. #[derive(Debug, Default)] -pub struct TreeState { +pub struct TreeState { /// __All__ unique executed blocks by block hash that are connected to the canonical chain. /// /// This includes blocks of all forks. - blocks_by_hash: HashMap, + blocks_by_hash: HashMap>, /// Executed blocks grouped by their respective block number. /// /// This maps unique block number to all known blocks for that height. /// /// Note: there can be multiple blocks at the same height due to forks. - blocks_by_number: BTreeMap>, + blocks_by_number: BTreeMap>>, /// Map of any parent block hash to its children. parent_to_child: HashMap>, /// Map of hash to trie updates for canonical blocks that are persisted but not finalized. @@ -107,7 +106,7 @@ pub struct TreeState { current_canonical_head: BlockNumHash, } -impl TreeState { +impl TreeState { /// Returns a new, empty tree state that points to the given canonical head. fn new(current_canonical_head: BlockNumHash) -> Self { Self { @@ -125,12 +124,12 @@ impl TreeState { } /// Returns the [`ExecutedBlock`] by hash. - fn executed_block_by_hash(&self, hash: B256) -> Option<&ExecutedBlock> { + fn executed_block_by_hash(&self, hash: B256) -> Option<&ExecutedBlock> { self.blocks_by_hash.get(&hash) } /// Returns the block by hash. - fn block_by_hash(&self, hash: B256) -> Option> { + fn block_by_hash(&self, hash: B256) -> Option>> { self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) } @@ -138,12 +137,12 @@ impl TreeState { /// newest to oldest. And the parent hash of the oldest block that is missing from the buffer. /// /// Returns `None` if the block for the given hash is not found. - fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec)> { + fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec>)> { let block = self.blocks_by_hash.get(&hash).cloned()?; - let mut parent_hash = block.block().parent_hash; + let mut parent_hash = block.block().parent_hash(); let mut blocks = vec![block]; while let Some(executed) = self.blocks_by_hash.get(&parent_hash) { - parent_hash = executed.block.parent_hash; + parent_hash = executed.block.parent_hash(); blocks.push(executed.clone()); } @@ -151,10 +150,10 @@ impl TreeState { } /// Insert executed block into the state. - fn insert_executed(&mut self, executed: ExecutedBlock) { + fn insert_executed(&mut self, executed: ExecutedBlock) { let hash = executed.block.hash(); - let parent_hash = executed.block.parent_hash; - let block_number = executed.block.number; + let parent_hash = executed.block.parent_hash(); + let block_number = executed.block.number(); if self.blocks_by_hash.contains_key(&hash) { return; @@ -182,11 +181,11 @@ impl TreeState { /// ## Returns /// /// The removed block and the block hashes of its children. - fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock, HashSet)> { + fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock, HashSet)> { let executed = self.blocks_by_hash.remove(&hash)?; // Remove this block from collection of children of its parent block. - let parent_entry = self.parent_to_child.entry(executed.block.parent_hash); + let parent_entry = self.parent_to_child.entry(executed.block.parent_hash()); if let hash_map::Entry::Occupied(mut entry) = parent_entry { entry.get_mut().remove(&hash); @@ -199,7 +198,7 @@ impl TreeState { let children = self.parent_to_child.remove(&hash).unwrap_or_default(); // Remove this block from `blocks_by_number`. - let block_number_entry = self.blocks_by_number.entry(executed.block.number); + let block_number_entry = self.blocks_by_number.entry(executed.block.number()); if let btree_map::Entry::Occupied(mut entry) = block_number_entry { // We have to find the index of the block since it exists in a vec if let Some(index) = entry.get().iter().position(|b| b.block.hash() == hash) { @@ -223,7 +222,7 @@ impl TreeState { } while let Some(executed) = self.blocks_by_hash.get(¤t_block) { - current_block = executed.block.parent_hash; + current_block = executed.block.parent_hash(); if current_block == hash { return true } @@ -251,14 +250,14 @@ impl TreeState { // upper bound let mut current_block = self.current_canonical_head.hash; while let Some(executed) = self.blocks_by_hash.get(¤t_block) { - current_block = executed.block.parent_hash; - if executed.block.number <= upper_bound { + current_block = executed.block.parent_hash(); + if executed.block.number() <= upper_bound { debug!(target: "engine::tree", num_hash=?executed.block.num_hash(), "Attempting to remove block walking back from the head"); if let Some((removed, _)) = self.remove_by_hash(executed.block.hash()) { debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed block walking back from the head"); // finally, move the trie updates self.persisted_trie_updates - .insert(removed.block.hash(), (removed.block.number, removed.trie)); + .insert(removed.block.hash(), (removed.block.number(), removed.trie)); } } } @@ -468,11 +467,14 @@ pub enum TreeAction { /// /// This type is responsible for processing engine API requests, maintaining the canonical state and /// emitting events. -pub struct EngineApiTreeHandler { +pub struct EngineApiTreeHandler +where + T: EngineTypes, +{ provider: P, executor_provider: E, consensus: Arc, - payload_validator: ExecutionPayloadValidator, + payload_validator: V, /// Keeps track of internals such as executed and buffered blocks. state: EngineApiTreeState, /// The half for sending messages to the engine. @@ -508,10 +510,12 @@ pub struct EngineApiTreeHandler { invalid_block_hook: Box, /// The engine API variant of this handler engine_kind: EngineApiKind, + /// Captures the types the engine operates on + _primtives: PhantomData, } -impl std::fmt::Debug - for EngineApiTreeHandler +impl std::fmt::Debug + for EngineApiTreeHandler { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EngineApiTreeHandler") @@ -534,13 +538,19 @@ impl std::fmt::Debug } } -impl EngineApiTreeHandler +impl EngineApiTreeHandler where - P: DatabaseProviderFactory + BlockReader + StateProviderFactory + StateReader + Clone + 'static, + N: NodePrimitives, + P: DatabaseProviderFactory + + BlockReader + + StateProviderFactory + + StateReader + + Clone + + 'static,

::Provider: BlockReader, E: BlockExecutorProvider, T: EngineTypes, - Spec: Send + Sync + EthereumHardforks + 'static, + V: EngineValidator, { /// Creates a new [`EngineApiTreeHandler`]. #[allow(clippy::too_many_arguments)] @@ -548,7 +558,7 @@ where provider: P, executor_provider: E, consensus: Arc, - payload_validator: ExecutionPayloadValidator, + payload_validator: V, outgoing: UnboundedSender, state: EngineApiTreeState, canonical_in_memory_state: CanonicalInMemoryState, @@ -578,6 +588,7 @@ where incoming_tx, invalid_block_hook: Box::new(NoopInvalidBlockHook), engine_kind, + _primtives: Default::default(), } } @@ -596,7 +607,7 @@ where provider: P, executor_provider: E, consensus: Arc, - payload_validator: ExecutionPayloadValidator, + payload_validator: V, persistence: PersistenceHandle, payload_builder: PayloadBuilderHandle, canonical_in_memory_state: CanonicalInMemoryState, @@ -1207,8 +1218,17 @@ where match request { EngineApiRequest::InsertExecutedBlock(block) => { debug!(target: "engine::tree", block=?block.block().num_hash(), "inserting already executed block"); + let now = Instant::now(); + let sealed_block = block.block.clone(); self.state.tree_state.insert_executed(block); self.metrics.engine.inserted_already_executed_blocks.increment(1); + + self.emit_event(EngineApiEvent::BeaconConsensus( + BeaconConsensusEngineEvent::CanonicalBlockAdded( + sealed_block, + now.elapsed(), + ), + )); } EngineApiRequest::Beacon(request) => { match request { @@ -1539,8 +1559,8 @@ where .ok_or_else(|| ProviderError::HeaderNotFound(hash.into()))?; let execution_output = self .provider - .get_state(block.number)? - .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number))?; + .get_state(block.number())? + .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number()))?; let hashed_state = execution_output.hash_state_slow(); Ok(Some(ExecutedBlock { @@ -2267,7 +2287,7 @@ where self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); debug!(target: "engine::tree", ?root_elapsed, block=?sealed_block.num_hash(), "Calculated state root"); - let executed = ExecutedBlock { + let executed: ExecutedBlock = ExecutedBlock { block: sealed_block.clone(), senders: Arc::new(block.senders), execution_output: Arc::new(ExecutionOutcome::from((output, block_number))), @@ -2607,8 +2627,9 @@ mod tests { use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; use reth_engine_primitives::ForkchoiceStatus; - use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::test_utils::MockExecutorProvider; + use reth_primitives::{BlockExt, EthPrimitives}; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::{block_to_payload_v1, payload::block_to_payload_v3}; use reth_trie::updates::TrieUpdates; @@ -2673,8 +2694,13 @@ mod tests { } struct TestHarness { - tree: - EngineApiTreeHandler, + tree: EngineApiTreeHandler< + EthPrimitives, + MockEthProvider, + MockExecutorProvider, + EthEngineTypes, + EthereumEngineValidator, + >, to_tree_tx: Sender>>, from_tree_rx: UnboundedReceiver, blocks: Vec, @@ -2708,7 +2734,7 @@ mod tests { let provider = MockEthProvider::default(); let executor_provider = MockExecutorProvider::default(); - let payload_validator = ExecutionPayloadValidator::new(chain_spec.clone()); + let payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (from_tree_tx, from_tree_rx) = unbounded_channel(); @@ -2933,7 +2959,7 @@ mod tests { EngineApiEvent::BeaconConsensus( BeaconConsensusEngineEvent::CanonicalBlockAdded(block, _), ) => { - assert!(block.hash() == expected_hash); + assert_eq!(block.hash(), expected_hash); } _ => panic!("Unexpected event: {:#?}", event), } diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 45cf5a780310..602e87a63dbe 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,14 +1,29 @@ //! State root task related functionality. -use reth_provider::providers::ConsistentDbView; -use reth_trie::{updates::TrieUpdates, TrieInput}; +use alloy_primitives::map::{HashMap, HashSet}; +use reth_provider::{ + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, +}; +use reth_trie::{ + proof::Proof, updates::TrieUpdates, HashedPostState, HashedStorage, MultiProof, Nibbles, + TrieInput, +}; +use reth_trie_db::DatabaseProof; use reth_trie_parallel::root::ParallelStateRootError; -use revm_primitives::{EvmState, B256}; -use std::sync::{ - mpsc::{self, Receiver, RecvError}, - Arc, +use reth_trie_sparse::{SparseStateTrie, SparseStateTrieResult, SparseTrieError}; +use revm_primitives::{keccak256, EvmState, B256}; +use std::{ + collections::BTreeMap, + sync::{ + mpsc::{self, Receiver, Sender}, + Arc, + }, + time::{Duration, Instant}, }; -use tracing::debug; +use tracing::{debug, error, trace}; + +/// The level below which the sparse trie hashes are calculated in [`update_sparse_trie`]. +const SPARSE_TRIE_INCREMENTAL_LEVEL: usize = 2; /// Result of the state root calculation pub(crate) type StateRootResult = Result<(B256, TrieUpdates), ParallelStateRootError>; @@ -43,20 +58,93 @@ pub(crate) struct StateRootConfig { pub input: Arc, } -/// Wrapper for std channel receiver to maintain compatibility with `UnboundedReceiverStream` +/// Messages used internally by the state root task +#[derive(Debug)] #[allow(dead_code)] -pub(crate) struct StdReceiverStream { - rx: Receiver, +pub(crate) enum StateRootMessage { + /// New state update from transaction execution + StateUpdate(EvmState), + /// Proof calculation completed for a specific state update + ProofCalculated { + /// The calculated proof + proof: MultiProof, + /// The state update that was used to calculate the proof + state_update: HashedPostState, + /// The index of this proof in the sequence of state updates + sequence_number: u64, + }, + /// State root calculation completed + RootCalculated { + /// The updated sparse trie + trie: Box, + /// Time taken to calculate the root + elapsed: Duration, + }, } -#[allow(dead_code)] -impl StdReceiverStream { - pub(crate) const fn new(rx: Receiver) -> Self { - Self { rx } +/// Handle to track proof calculation ordering +#[derive(Debug, Default)] +pub(crate) struct ProofSequencer { + /// The next proof sequence number to be produced. + next_sequence: u64, + /// The next sequence number expected to be delivered. + next_to_deliver: u64, + /// Buffer for out-of-order proofs and corresponding state updates + pending_proofs: BTreeMap, +} + +impl ProofSequencer { + /// Creates a new proof sequencer + pub(crate) fn new() -> Self { + Self::default() } - pub(crate) fn recv(&self) -> Result { - self.rx.recv() + /// Gets the next sequence number and increments the counter + pub(crate) fn next_sequence(&mut self) -> u64 { + let seq = self.next_sequence; + self.next_sequence += 1; + seq + } + + /// Adds a proof with the corresponding state update and returns all sequential proofs and state + /// updates if we have a continuous sequence + pub(crate) fn add_proof( + &mut self, + sequence: u64, + proof: MultiProof, + state_update: HashedPostState, + ) -> Vec<(MultiProof, HashedPostState)> { + if sequence >= self.next_to_deliver { + self.pending_proofs.insert(sequence, (proof, state_update)); + } + + // return early if we don't have the next expected proof + if !self.pending_proofs.contains_key(&self.next_to_deliver) { + return Vec::new() + } + + let mut consecutive_proofs = Vec::with_capacity(self.pending_proofs.len()); + let mut current_sequence = self.next_to_deliver; + + // keep collecting proofs and state updates as long as we have consecutive sequence numbers + while let Some((proof, state_update)) = self.pending_proofs.remove(¤t_sequence) { + consecutive_proofs.push((proof, state_update)); + current_sequence += 1; + + // if we don't have the next number, stop collecting + if !self.pending_proofs.contains_key(¤t_sequence) { + break; + } + } + + self.next_to_deliver += consecutive_proofs.len() as u64; + + consecutive_proofs + } + + /// Returns true if we still have pending proofs + pub(crate) fn has_pending(&self) -> bool { + !self.pending_proofs.is_empty() } } @@ -68,25 +156,42 @@ impl StdReceiverStream { /// fetches the proofs for relevant accounts from the database and reveal them /// to the tree. /// Then it updates relevant leaves according to the result of the transaction. -#[allow(dead_code)] +#[derive(Debug)] pub(crate) struct StateRootTask { - /// Incoming state updates. - state_stream: StdReceiverStream, /// Task configuration. config: StateRootConfig, + /// Receiver for state root related messages. + rx: Receiver, + /// Sender for state root related messages. + tx: Sender, + /// Proof targets that have been already fetched. + fetched_proof_targets: HashMap>, + /// Proof sequencing handler. + proof_sequencer: ProofSequencer, + /// The sparse trie used for the state root calculation. If [`None`], then update is in + /// progress. + sparse_trie: Option>, } #[allow(dead_code)] impl StateRootTask where - Factory: Send + 'static, + Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, { - /// Creates a new `StateRootTask`. - pub(crate) const fn new( + /// Creates a new state root task with the unified message channel + pub(crate) fn new( config: StateRootConfig, - state_stream: StdReceiverStream, + tx: Sender, + rx: Receiver, ) -> Self { - Self { config, state_stream } + Self { + config, + rx, + tx, + fetched_proof_targets: Default::default(), + proof_sequencer: ProofSequencer::new(), + sparse_trie: Some(Box::new(SparseStateTrie::default().with_updates(true))), + } } /// Spawns the state root task and returns a handle to await its result. @@ -105,87 +210,577 @@ where } /// Handles state updates. + /// + /// Returns proof targets derived from the state update. fn on_state_update( - _view: &reth_provider::providers::ConsistentDbView, - _input: &std::sync::Arc, - _state: EvmState, - ) { - // Default implementation of state update handling - // TODO: calculate hashed state update and dispatch proof gathering for it. + view: ConsistentDbView, + input: Arc, + update: EvmState, + fetched_proof_targets: &HashMap>, + proof_sequence_number: u64, + state_root_message_sender: Sender, + ) -> HashMap> { + let mut hashed_state_update = HashedPostState::default(); + for (address, account) in update { + if account.is_touched() { + let hashed_address = keccak256(address); + + let destroyed = account.is_selfdestructed(); + let info = if account.is_empty() { None } else { Some(account.info.into()) }; + hashed_state_update.accounts.insert(hashed_address, info); + + let mut changed_storage_iter = account + .storage + .into_iter() + .filter_map(|(slot, value)| { + value + .is_changed() + .then(|| (keccak256(B256::from(slot)), value.present_value)) + }) + .peekable(); + if destroyed || changed_storage_iter.peek().is_some() { + hashed_state_update.storages.insert( + hashed_address, + HashedStorage::from_iter(destroyed, changed_storage_iter), + ); + } + } + } + + let proof_targets = get_proof_targets(&hashed_state_update, fetched_proof_targets); + + // Dispatch proof gathering for this state update + let targets = proof_targets.clone(); + rayon::spawn(move || { + let provider = match view.provider_ro() { + Ok(provider) => provider, + Err(error) => { + error!(target: "engine::root", ?error, "Could not get provider"); + return; + } + }; + + // TODO: replace with parallel proof + let result = Proof::overlay_multiproof( + provider.tx_ref(), + // TODO(alexey): this clone can be expensive, we should avoid it + input.as_ref().clone(), + targets, + ); + match result { + Ok(proof) => { + let _ = state_root_message_sender.send(StateRootMessage::ProofCalculated { + proof, + state_update: hashed_state_update, + sequence_number: proof_sequence_number, + }); + } + Err(e) => { + error!(target: "engine::root", error = ?e, "Could not calculate multiproof"); + } + } + }); + + proof_targets + } + + /// Handler for new proof calculated, aggregates all the existing sequential proofs. + fn on_proof( + &mut self, + sequence_number: u64, + proof: MultiProof, + state_update: HashedPostState, + ) -> Option<(MultiProof, HashedPostState)> { + let ready_proofs = self.proof_sequencer.add_proof(sequence_number, proof, state_update); + + if ready_proofs.is_empty() { + None + } else { + // Merge all ready proofs and state updates + ready_proofs.into_iter().reduce(|mut acc, (proof, state_update)| { + acc.0.extend(proof); + acc.1.extend(state_update); + acc + }) + } + } + + /// Spawns root calculation with the current state and proofs. + fn spawn_root_calculation(&mut self, state: HashedPostState, multiproof: MultiProof) { + let Some(trie) = self.sparse_trie.take() else { return }; + + trace!( + target: "engine::root", + account_proofs = multiproof.account_subtree.len(), + storage_proofs = multiproof.storages.len(), + "Spawning root calculation" + ); + + // TODO(alexey): store proof targets in `ProofSequecner` to avoid recomputing them + let targets = get_proof_targets(&state, &HashMap::default()); + + let tx = self.tx.clone(); + rayon::spawn(move || { + let result = update_sparse_trie(trie, multiproof, targets, state); + match result { + Ok((trie, elapsed)) => { + trace!( + target: "engine::root", + ?elapsed, + "Root calculation completed, sending result" + ); + let _ = tx.send(StateRootMessage::RootCalculated { trie, elapsed }); + } + Err(e) => { + error!(target: "engine::root", error = ?e, "Could not calculate state root"); + } + } + }); + } + + fn run(mut self) -> StateRootResult { + let mut current_state_update = HashedPostState::default(); + let mut current_multiproof = MultiProof::default(); + let mut updates_received = 0; + let mut proofs_processed = 0; + let mut roots_calculated = 0; + + loop { + match self.rx.recv() { + Ok(message) => match message { + StateRootMessage::StateUpdate(update) => { + updates_received += 1; + trace!( + target: "engine::root", + len = update.len(), + total_updates = updates_received, + "Received new state update" + ); + let targets = Self::on_state_update( + self.config.consistent_view.clone(), + self.config.input.clone(), + update, + &self.fetched_proof_targets, + self.proof_sequencer.next_sequence(), + self.tx.clone(), + ); + for (address, slots) in targets { + self.fetched_proof_targets.entry(address).or_default().extend(slots) + } + } + StateRootMessage::ProofCalculated { proof, state_update, sequence_number } => { + proofs_processed += 1; + trace!( + target: "engine::root", + sequence = sequence_number, + total_proofs = proofs_processed, + "Processing calculated proof" + ); + + if let Some((combined_proof, combined_state_update)) = + self.on_proof(sequence_number, proof, state_update) + { + if self.sparse_trie.is_none() { + current_multiproof.extend(combined_proof); + current_state_update.extend(combined_state_update); + } else { + self.spawn_root_calculation(combined_state_update, combined_proof); + } + } + } + StateRootMessage::RootCalculated { trie, elapsed } => { + roots_calculated += 1; + trace!( + target: "engine::root", + ?elapsed, + roots_calculated, + proofs = proofs_processed, + updates = updates_received, + "Computed intermediate root" + ); + self.sparse_trie = Some(trie); + + let has_new_proofs = !current_multiproof.account_subtree.is_empty() || + !current_multiproof.storages.is_empty(); + let all_proofs_received = proofs_processed >= updates_received; + let no_pending = !self.proof_sequencer.has_pending(); + + trace!( + target: "engine::root", + has_new_proofs, + all_proofs_received, + no_pending, + "State check" + ); + + // only spawn new calculation if we have accumulated new proofs + if has_new_proofs { + trace!( + target: "engine::root", + account_proofs = current_multiproof.account_subtree.len(), + storage_proofs = current_multiproof.storages.len(), + "Spawning subsequent root calculation" + ); + self.spawn_root_calculation( + std::mem::take(&mut current_state_update), + std::mem::take(&mut current_multiproof), + ); + } else if all_proofs_received && no_pending { + debug!( + target: "engine::root", + total_updates = updates_received, + total_proofs = proofs_processed, + roots_calculated, + "All proofs processed, ending calculation" + ); + let mut trie = self + .sparse_trie + .take() + .expect("sparse trie update should not be in progress"); + let root = trie.root().expect("sparse trie should be revealed"); + let trie_updates = trie + .take_trie_updates() + .expect("sparse trie should have updates retention enabled"); + return Ok((root, trie_updates)); + } + } + }, + Err(_) => { + // this means our internal message channel is closed, which shouldn't happen + // in normal operation since we hold both ends + error!( + target: "engine::root", + "Internal message channel closed unexpectedly" + ); + return Err(ParallelStateRootError::Other( + "Internal message channel closed unexpectedly".into(), + )); + } + } + } } } -#[allow(dead_code)] -impl StateRootTask -where - Factory: Send + 'static, -{ - fn run(self) -> StateRootResult { - while let Ok(state) = self.state_stream.recv() { - Self::on_state_update(&self.config.consistent_view, &self.config.input, state); +fn get_proof_targets( + state_update: &HashedPostState, + fetched_proof_targets: &HashMap>, +) -> HashMap> { + state_update + .accounts + .keys() + .filter(|hashed_address| !fetched_proof_targets.contains_key(*hashed_address)) + .map(|hashed_address| (*hashed_address, HashSet::default())) + .chain(state_update.storages.iter().map(|(hashed_address, storage)| { + let fetched_storage_proof_targets = fetched_proof_targets.get(hashed_address); + ( + *hashed_address, + storage + .storage + .keys() + .filter(|slot| { + !fetched_storage_proof_targets + .is_some_and(|targets| targets.contains(*slot)) + }) + .copied() + .collect(), + ) + })) + .collect() +} + +/// Updates the sparse trie with the given proofs and state, and returns the updated trie and the +/// time it took. +fn update_sparse_trie( + mut trie: Box, + multiproof: MultiProof, + targets: HashMap>, + state: HashedPostState, +) -> SparseStateTrieResult<(Box, Duration)> { + let started_at = Instant::now(); + + // Reveal new accounts and storage slots. + trie.reveal_multiproof(targets, multiproof)?; + + // Update storage slots with new values and calculate storage roots. + for (address, storage) in state.storages { + let storage_trie = trie.storage_trie_mut(&address).ok_or(SparseTrieError::Blind)?; + + if storage.wiped { + storage_trie.wipe(); } - // TODO: - // * keep track of proof calculation - // * keep track of intermediate root computation - // * return final state root result - Ok((B256::default(), TrieUpdates::default())) + for (slot, value) in storage.storage { + let slot_nibbles = Nibbles::unpack(slot); + if value.is_zero() { + // TODO: handle blinded node error + storage_trie.remove_leaf(&slot_nibbles)?; + } else { + storage_trie + .update_leaf(slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; + } + } + + storage_trie.root(); + } + + // Update accounts with new values + for (address, account) in state.accounts { + trie.update_account(address, account.unwrap_or_default())?; } + + trie.calculate_below_level(SPARSE_TRIE_INCREMENTAL_LEVEL); + let elapsed = started_at.elapsed(); + + Ok((trie, elapsed)) } #[cfg(test)] mod tests { use super::*; - use reth_provider::{providers::ConsistentDbView, test_utils::MockEthProvider}; - use reth_trie::TrieInput; + use reth_primitives::{Account as RethAccount, StorageEntry}; + use reth_provider::{ + providers::ConsistentDbView, test_utils::create_test_provider_factory, HashingWriter, + }; + use reth_testing_utils::generators::{self, Rng}; + use reth_trie::{test_utils::state_root, TrieInput}; use revm_primitives::{ - Account, AccountInfo, AccountStatus, Address, EvmState, EvmStorage, EvmStorageSlot, - HashMap, B256, U256, + Account as RevmAccount, AccountInfo, AccountStatus, Address, EvmState, EvmStorageSlot, + HashMap, B256, KECCAK_EMPTY, U256, }; use std::sync::Arc; - fn create_mock_config() -> StateRootConfig { - let factory = MockEthProvider::default(); - let view = ConsistentDbView::new(factory, None); - let input = Arc::new(TrieInput::default()); - StateRootConfig { consistent_view: view, input } - } - - fn create_mock_state() -> revm_primitives::EvmState { - let mut state_changes: EvmState = HashMap::default(); - let storage = EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); - let account = Account { - info: AccountInfo { - balance: U256::from(100), - nonce: 10, - code_hash: B256::random(), - code: Default::default(), + fn convert_revm_to_reth_account(revm_account: &RevmAccount) -> RethAccount { + RethAccount { + balance: revm_account.info.balance, + nonce: revm_account.info.nonce, + bytecode_hash: if revm_account.info.code_hash == KECCAK_EMPTY { + None + } else { + Some(revm_account.info.code_hash) }, - storage, - status: AccountStatus::Loaded, - }; + } + } + + fn create_mock_state_updates(num_accounts: usize, updates_per_account: usize) -> Vec { + let mut rng = generators::rng(); + let all_addresses: Vec

= (0..num_accounts).map(|_| rng.gen()).collect(); + let mut updates = Vec::new(); + + for _ in 0..updates_per_account { + let num_accounts_in_update = rng.gen_range(1..=num_accounts); + let mut state_update = EvmState::default(); + + let selected_addresses = &all_addresses[0..num_accounts_in_update]; + + for &address in selected_addresses { + let mut storage = HashMap::default(); + if rng.gen_bool(0.7) { + for _ in 0..rng.gen_range(1..10) { + let slot = U256::from(rng.gen::()); + storage.insert( + slot, + EvmStorageSlot::new_changed(U256::ZERO, U256::from(rng.gen::())), + ); + } + } + + let account = RevmAccount { + info: AccountInfo { + balance: U256::from(rng.gen::()), + nonce: rng.gen::(), + code_hash: KECCAK_EMPTY, + code: Some(Default::default()), + }, + storage, + status: AccountStatus::Touched, + }; - let address = Address::random(); - state_changes.insert(address, account); + state_update.insert(address, account); + } - state_changes + updates.push(state_update); + } + + updates } #[test] fn test_state_root_task() { - let config = create_mock_config(); + reth_tracing::init_test_tracing(); + + let factory = create_test_provider_factory(); let (tx, rx) = std::sync::mpsc::channel(); - let stream = StdReceiverStream::new(rx); - let task = StateRootTask::new(config, stream); + let state_updates = create_mock_state_updates(10, 10); + let mut hashed_state = HashedPostState::default(); + let mut accumulated_state: HashMap)> = + HashMap::default(); + + { + let provider_rw = factory.provider_rw().expect("failed to get provider"); + + for update in &state_updates { + let account_updates = update.iter().map(|(address, account)| { + (*address, Some(convert_revm_to_reth_account(account))) + }); + provider_rw + .insert_account_for_hashing(account_updates) + .expect("failed to insert accounts"); + + let storage_updates = update.iter().map(|(address, account)| { + let storage_entries = account.storage.iter().map(|(slot, value)| { + StorageEntry { key: B256::from(*slot), value: value.present_value } + }); + (*address, storage_entries) + }); + provider_rw + .insert_storage_for_hashing(storage_updates) + .expect("failed to insert storage"); + } + provider_rw.commit().expect("failed to commit changes"); + } + + for update in &state_updates { + for (address, account) in update { + let hashed_address = keccak256(*address); + + if account.is_touched() { + let destroyed = account.is_selfdestructed(); + hashed_state.accounts.insert( + hashed_address, + if destroyed || account.is_empty() { + None + } else { + Some(account.info.clone().into()) + }, + ); + + if destroyed || !account.storage.is_empty() { + let storage = account + .storage + .iter() + .filter(|&(_slot, value)| (!destroyed && value.is_changed())) + .map(|(slot, value)| { + (keccak256(B256::from(*slot)), value.present_value) + }); + hashed_state + .storages + .insert(hashed_address, HashedStorage::from_iter(destroyed, storage)); + } + } + + let storage: HashMap = account + .storage + .iter() + .map(|(k, v)| (B256::from(*k), v.present_value)) + .collect(); + + let entry = accumulated_state.entry(*address).or_default(); + entry.0 = convert_revm_to_reth_account(account); + entry.1.extend(storage); + } + } + + let config = StateRootConfig { + consistent_view: ConsistentDbView::new(factory, None), + input: Arc::new(TrieInput::from_state(hashed_state)), + }; + let task = StateRootTask::new(config, tx.clone(), rx); let handle = task.spawn(); - for _ in 0..10 { - tx.send(create_mock_state()).expect("failed to send state"); + for update in state_updates { + tx.send(StateRootMessage::StateUpdate(update)).expect("failed to send state"); } drop(tx); - let result = handle.wait_for_result(); - assert!(result.is_ok(), "sync block execution failed"); + let (root_from_task, _) = handle.wait_for_result().expect("task failed"); + let root_from_base = state_root(accumulated_state); + + assert_eq!( + root_from_task, root_from_base, + "State root mismatch: task={root_from_task:?}, base={root_from_base:?}" + ); + } + + #[test] + fn test_add_proof_in_sequence() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + sequencer.next_sequence = 2; + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + assert!(!sequencer.has_pending()); + + let ready = sequencer.add_proof(1, proof2, HashedPostState::default()); + assert_eq!(ready.len(), 1); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_out_of_order() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + let proof3 = MultiProof::default(); + sequencer.next_sequence = 3; + + let ready = sequencer.add_proof(2, proof3, HashedPostState::default()); + assert_eq!(ready.len(), 0); + assert!(sequencer.has_pending()); + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + assert!(sequencer.has_pending()); + + let ready = sequencer.add_proof(1, proof2, HashedPostState::default()); + assert_eq!(ready.len(), 2); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_with_gaps() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof3 = MultiProof::default(); + sequencer.next_sequence = 3; + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + + let ready = sequencer.add_proof(2, proof3, HashedPostState::default()); + assert_eq!(ready.len(), 0); + assert!(sequencer.has_pending()); + } + + #[test] + fn test_add_proof_duplicate_sequence() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + + let ready = sequencer.add_proof(0, proof2, HashedPostState::default()); + assert_eq!(ready.len(), 0); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_batch_processing() { + let mut sequencer = ProofSequencer::new(); + let proofs: Vec<_> = (0..5).map(|_| MultiProof::default()).collect(); + sequencer.next_sequence = 5; + + sequencer.add_proof(4, proofs[4].clone(), HashedPostState::default()); + sequencer.add_proof(2, proofs[2].clone(), HashedPostState::default()); + sequencer.add_proof(1, proofs[1].clone(), HashedPostState::default()); + sequencer.add_proof(3, proofs[3].clone(), HashedPostState::default()); + + let ready = sequencer.add_proof(0, proofs[0].clone(), HashedPostState::default()); + assert_eq!(ready.len(), 5); + assert!(!sequencer.has_pending()); } } diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 8467bba3fd2c..b68efc017f62 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -18,7 +18,7 @@ use reth_evm::{ ConfigureEvm, }; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{proofs, Block, BlockBody, Receipt, Receipts}; +use reth_primitives::{proofs, Block, BlockBody, BlockExt, Receipt, Receipts}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ db::{states::bundle_state::BundleRetention, State}, @@ -27,9 +27,7 @@ use reth_revm::{ use reth_rpc_types_compat::engine::payload::block_to_payload; use reth_scroll_execution::FinalizeExecution; use reth_trie::HashedPostState; -use revm_primitives::{ - calc_excess_blob_gas, BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, -}; +use revm_primitives::{calc_excess_blob_gas, EVMError, EnvWithHandlerCfg}; use std::{ collections::VecDeque, future::Future, @@ -109,7 +107,7 @@ impl Stream for EngineReorg>, Engine: EngineTypes, - Provider: BlockReader + StateProviderFactory, + Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, Spec: EthereumHardforks, { @@ -256,7 +254,7 @@ fn create_reorg_head( next_sidecar: ExecutionPayloadSidecar, ) -> RethResult<(ExecutionPayload, ExecutionPayloadSidecar)> where - Provider: BlockReader + StateProviderFactory, + Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, Spec: EthereumHardforks, { @@ -299,9 +297,7 @@ where let mut state = State::builder().with_database(&mut db).with_bundle_update().build(); // Configure environments - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, &reorg_target.header, U256::MAX); + let (cfg, block_env) = evm_config.cfg_and_block_env(&reorg_target.header, U256::MAX); let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); let mut evm = evm_config.evm_with_env(&mut state, env); diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index ffabe5b1952c..2c260c4a7d1c 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -118,8 +118,8 @@ impl HeaderVa for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validate_header_gas(header)?; - validate_header_base_fee(header, &self.chain_spec)?; + validate_header_gas(header.header())?; + validate_header_base_fee(header.header(), &self.chain_spec)?; // EIP-4895: Beacon chain push withdrawals as operations if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) && diff --git a/crates/ethereum/engine-primitives/Cargo.toml b/crates/ethereum/engine-primitives/Cargo.toml index e9bcd4256865..f019f6e5f2a6 100644 --- a/crates/ethereum/engine-primitives/Cargo.toml +++ b/crates/ethereum/engine-primitives/Cargo.toml @@ -16,6 +16,7 @@ reth-chainspec.workspace = true reth-primitives.workspace = true reth-engine-primitives.workspace = true reth-payload-primitives.workspace = true +reth-payload-validator.workspace = true reth-rpc-types-compat.workspace = true alloy-rlp.workspace = true reth-chain-state.workspace = true diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 5addf2a18c51..beefd54ca05b 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -11,6 +11,7 @@ mod payload; use std::sync::Arc; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; pub use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, @@ -22,6 +23,8 @@ use reth_payload_primitives::{ validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, }; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{Block, SealedBlock}; /// The types used in the default mainnet ethereum beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] @@ -63,13 +66,19 @@ impl PayloadTypes for EthPayloadTypes { /// Validator for the ethereum engine API. #[derive(Debug, Clone)] pub struct EthereumEngineValidator { - chain_spec: Arc, + inner: ExecutionPayloadValidator, } impl EthereumEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + Self { inner: ExecutionPayloadValidator::new(chain_spec) } + } + + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &ChainSpec { + self.inner.chain_spec() } } @@ -77,12 +86,14 @@ impl EngineValidator for EthereumEngineValidator where Types: EngineTypes, { + type Block = Block; + fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, EthPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, payload_or_attrs) + validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } fn ensure_well_formed_attributes( @@ -90,6 +101,14 @@ where version: EngineApiMessageVersion, attributes: &EthPayloadAttributes, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into()) + validate_version_specific_fields(self.chain_spec(), version, attributes.into()) + } + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result { + self.inner.ensure_well_formed_payload(payload, sidecar) } } diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index fe2f51380114..e22cf55e5c23 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -25,7 +25,7 @@ use reth_revm::db::{BundleState, State}; use reth_scroll_execution::FinalizeExecution; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, + EnvWithHandlerCfg, ResultAndState, U256, }; /// Factory for [`EthExecutionStrategy`]. @@ -121,10 +121,7 @@ where header: &alloy_consensus::Header, total_difficulty: U256, ) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) } } @@ -324,7 +321,9 @@ mod tests { BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor, }; use reth_execution_types::BlockExecutionOutput; - use reth_primitives::{public_key_to_address, Account, Block, BlockBody, Transaction}; + use reth_primitives::{ + public_key_to_address, Account, Block, BlockBody, BlockExt, Transaction, + }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 206230cd00ef..8042562357f4 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -207,17 +207,11 @@ mod tests { primitives::{BlockEnv, CfgEnv, SpecId}, JournaledState, }; - use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; + use revm_primitives::{EnvWithHandlerCfg, HandlerCfg}; use std::collections::HashSet; #[test] fn test_fill_cfg_and_block_env() { - // Create a new configuration environment - let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - - // Create a default block environment - let mut block_env = BlockEnv::default(); - // Create a default header let header = Header::default(); @@ -236,12 +230,8 @@ mod tests { // Use the `EthEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - EthEvmConfig::new(Arc::new(chain_spec.clone())).fill_cfg_and_block_env( - &mut cfg_env, - &mut block_env, - &header, - total_difficulty, - ); + let (cfg_env, _) = EthEvmConfig::new(Arc::new(chain_spec.clone())) + .cfg_and_block_env(&header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the // ChainSpec diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 830c648f9b96..3058a6b05642 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -41,26 +41,32 @@ revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } eyre.workspace = true [dev-dependencies] -reth.workspace = true reth-chainspec.workspace = true reth-db.workspace = true reth-exex.workspace = true reth-node-api.workspace = true +reth-node-core.workspace = true +reth-payload-primitives.workspace = true reth-e2e-test-utils.workspace = true +reth-rpc-eth-api.workspace = true reth-tasks.workspace = true -futures.workspace = true + alloy-primitives.workspace = true -alloy-genesis.workspace = true -tokio.workspace = true -serde_json.workspace = true alloy-consensus.workspace = true alloy-provider.workspace = true -rand.workspace = true +alloy-genesis.workspace = true alloy-signer.workspace = true alloy-eips.workspace = true alloy-sol-types.workspace = true alloy-contract.workspace = true alloy-rpc-types-beacon.workspace = true +alloy-rpc-types-engine.workspace = true +alloy-rpc-types-eth.workspace = true + +futures.workspace = true +tokio.workspace = true +serde_json.workspace = true +rand.workspace = true [features] default = [] diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index a2ae2374b966..a536b9dff907 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -53,7 +53,7 @@ impl EthereumNode { EthereumConsensusBuilder, > where - Node: FullNodeTypes>, + Node: FullNodeTypes>, ::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, @@ -164,7 +164,7 @@ pub struct EthereumPoolBuilder { impl PoolBuilder for EthereumPoolBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, { type Pool = EthTransactionPool; @@ -240,7 +240,7 @@ impl EthereumPayloadBuilder { pool: Pool, ) -> eyre::Result> where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, Evm: ConfigureEvm
, Pool: TransactionPool + Unpin + 'static, @@ -278,7 +278,7 @@ impl EthereumPayloadBuilder { impl PayloadServiceBuilder for EthereumPayloadBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, Types::Engine: PayloadTypes< @@ -304,7 +304,7 @@ pub struct EthereumNetworkBuilder { impl NetworkBuilder for EthereumNetworkBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, { async fn build_network( diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index 976727bc8158..111810514504 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -1,21 +1,17 @@ -use std::sync::Arc; - +use crate::utils::eth_payload_attributes; use alloy_consensus::constants::MAINNET_GENESIS_HASH; use alloy_genesis::Genesis; -use reth::{ - args::RpcServerArgs, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - rpc::types::engine::PayloadStatusEnum, - tasks::TaskManager, -}; +use alloy_rpc_types_engine::PayloadStatusEnum; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; +use reth_tasks::TaskManager; use reth_transaction_pool::TransactionPool; - -use crate::utils::eth_payload_attributes; +use std::sync::Arc; #[tokio::test] async fn can_handle_blobs() -> eyre::Result<()> { diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index b6d0ffcfaaaf..325575998c26 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,17 +1,18 @@ -use std::sync::Arc; - +use alloy_eips::eip2718::Encodable2718; use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; -use reth::{args::DevArgs, rpc::api::eth::helpers::EthTransactions}; use reth_chainspec::ChainSpec; -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, FullNodePrimitives, NodeTypes}; use reth_node_builder::{ rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, }; +use reth_node_core::args::DevArgs; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; use reth_provider::{providers::BlockchainProvider2, CanonStateSubscriptions}; +use reth_rpc_eth_api::helpers::EthTransactions; use reth_tasks::TaskManager; +use std::sync::Arc; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { @@ -46,6 +47,7 @@ async fn assert_chain_advances(node: FullNode) where N: FullNodeComponents, AddOns: RethRpcAddOns, + N::Types: NodeTypes, { let mut notifications = node.provider.canonical_state_stream(); @@ -63,8 +65,8 @@ where let head = notifications.next().await.unwrap(); - let tx = head.tip().transactions().next().unwrap(); - assert_eq!(tx.hash(), hash); + let tx = &head.tip().transactions()[0]; + assert_eq!(tx.trie_hash(), hash); println!("mined transaction: {hash}"); } diff --git a/crates/ethereum/node/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs index cb7517c0c932..a91ccf6e391b 100644 --- a/crates/ethereum/node/tests/e2e/eth.rs +++ b/crates/ethereum/node/tests/e2e/eth.rs @@ -1,15 +1,13 @@ use crate::utils::eth_payload_attributes; use alloy_genesis::Genesis; -use reth::{ - args::RpcServerArgs, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - tasks::TaskManager, -}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, setup, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; +use reth_tasks::TaskManager; use std::sync::Arc; #[tokio::test] diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index 5b2a6654fbbd..f8680f47ae3e 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -7,9 +7,9 @@ use alloy_provider::{ }, Provider, ProviderBuilder, SendableTx, }; +use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; -use reth::rpc::types::TransactionRequest; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{setup, setup_engine, transaction::TransactionTestContext}; use reth_node_ethereum::EthereumNode; diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index b1a11b1b5eb6..54bfbc8205e5 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -6,17 +6,14 @@ use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, SignedBidSubmissionV3, SignedBidSubmissionV4, }; +use alloy_rpc_types_engine::BlobsBundleV1; +use alloy_rpc_types_eth::TransactionRequest; use rand::{rngs::StdRng, Rng, SeedableRng}; -use reth::{ - payload::BuiltPayload, - rpc::{ - compat::engine::payload::block_to_payload_v3, - types::{engine::BlobsBundleV1, TransactionRequest}, - }, -}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::setup_engine; +use reth_node_core::rpc::compat::engine::payload::block_to_payload_v3; use reth_node_ethereum::EthereumNode; +use reth_payload_primitives::BuiltPayload; use std::sync::Arc; alloy_sol_types::sol! { diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index 6e534f5dc0ed..c3743de185f5 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -1,5 +1,5 @@ use alloy_primitives::{Address, B256}; -use reth::rpc::types::engine::PayloadAttributes; +use alloy_rpc_types_engine::PayloadAttributes; use reth_payload_builder::EthPayloadBuilderAttributes; /// Helper function to create a new eth payload attributes diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index ee99057f3d53..76a1a783c878 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -30,13 +30,13 @@ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ proofs::{self}, - Block, BlockBody, EthereumHardforks, Receipt, + Block, BlockBody, BlockExt, EthereumHardforks, InvalidTransactionError, Receipt, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactions, BestTransactionsAttributes, TransactionPool, - ValidPoolTransaction, + error::InvalidPoolTransactionError, noop::NoopTransactionPool, BestTransactions, + BestTransactionsAttributes, TransactionPool, ValidPoolTransaction, }; use reth_trie::HashedPostState; use revm::{ @@ -231,7 +231,10 @@ where // we can't fit this transaction into the block, so we need to mark it as invalid // which also removes all dependent transaction from the iterator before we can // continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit(pool_tx.gas_limit(), block_gas_limit), + ); continue } @@ -253,7 +256,13 @@ where // the iterator. This is similar to the gas limit condition // for regular transactions above. trace!(target: "payload_builder", tx=?tx.hash, ?sum_blob_gas_used, ?tx_blob_gas, "skipping blob transaction because it would exceed the max data gas per block"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + tx_blob_gas, + MAX_DATA_GAS_PER_BLOCK, + ), + ); continue } } @@ -273,7 +282,12 @@ where // if the transaction is invalid, we can skip it and all of its // descendants trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); } continue diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 2a1725642fee..9b5841f0d682 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -12,13 +12,15 @@ workspace = true [dependencies] reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-execution-errors.workspace = true +reth-trie-common = { workspace = true, optional = true } reth-trie.workspace = true -reth-primitives-traits.workspace = true revm.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true @@ -36,19 +38,23 @@ default = ["std"] optimism = ["reth-primitives/optimism", "revm/optimism"] serde = [ "dep:serde", - "reth-trie/serde", + "rand/serde", "revm/serde", "alloy-eips/serde", "alloy-primitives/serde", - "rand/serde", "reth-primitives-traits/serde", + "alloy-consensus/serde", + "reth-trie/serde", + "reth-trie-common?/serde" ] serde-bincode-compat = [ + "serde", + "reth-trie-common/serde-bincode-compat", "reth-primitives/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", - "reth-trie/serde-bincode-compat", "serde_with", "alloy-eips/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", ] std = [ "reth-primitives/std", @@ -57,6 +63,7 @@ std = [ "revm/std", "serde?/std", "reth-primitives-traits/std", + "alloy-consensus/std", ] scroll = [ "revm/scroll", diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 2d1f815aacad..143f8c312847 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -2,15 +2,16 @@ use crate::ExecutionOutcome; use alloc::{borrow::Cow, collections::BTreeMap}; -use alloy_eips::{eip1898::ForkBlock, BlockNumHash}; +use alloy_consensus::BlockHeader; +use alloy_eips::{eip1898::ForkBlock, eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; use core::{fmt, ops::RangeInclusive}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{ - SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionSigned, - TransactionSignedEcRecovered, + transaction::SignedTransactionIntoRecoveredExt, SealedBlockFor, SealedBlockWithSenders, + SealedHeader, TransactionSignedEcRecovered, }; -use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::{Block, BlockBody, NodePrimitives, SignedTransaction}; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; @@ -28,7 +29,7 @@ use revm::db::BundleState; #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Chain { /// All blocks in this chain. - blocks: BTreeMap, + blocks: BTreeMap>, /// The outcome of block execution for this chain. /// /// This field contains the state of all accounts after the execution of all blocks in this @@ -49,11 +50,11 @@ impl Chain { /// /// A chain of blocks should not be empty. pub fn new( - blocks: impl IntoIterator, + blocks: impl IntoIterator>, execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { - let blocks = blocks.into_iter().map(|b| (b.number, b)).collect::>(); + let blocks = blocks.into_iter().map(|b| (b.number(), b)).collect::>(); debug_assert!(!blocks.is_empty(), "Chain should have at least one block"); Self { blocks, execution_outcome, trie_updates } @@ -61,7 +62,7 @@ impl Chain { /// Create new Chain from a single block and its state. pub fn from_block( - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { @@ -69,17 +70,17 @@ impl Chain { } /// Get the blocks in this chain. - pub const fn blocks(&self) -> &BTreeMap { + pub const fn blocks(&self) -> &BTreeMap> { &self.blocks } /// Consumes the type and only returns the blocks in this chain. - pub fn into_blocks(self) -> BTreeMap { + pub fn into_blocks(self) -> BTreeMap> { self.blocks } /// Returns an iterator over all headers in the block with increasing block numbers. - pub fn headers(&self) -> impl Iterator + '_ { + pub fn headers(&self) -> impl Iterator> + '_ { self.blocks.values().map(|block| block.header.clone()) } @@ -120,12 +121,15 @@ impl Chain { } /// Returns the block with matching hash. - pub fn block(&self, block_hash: BlockHash) -> Option<&SealedBlock> { + pub fn block(&self, block_hash: BlockHash) -> Option<&SealedBlockFor> { self.block_with_senders(block_hash).map(|block| &block.block) } /// Returns the block with matching hash. - pub fn block_with_senders(&self, block_hash: BlockHash) -> Option<&SealedBlockWithSenders> { + pub fn block_with_senders( + &self, + block_hash: BlockHash, + ) -> Option<&SealedBlockWithSenders> { self.blocks.iter().find_map(|(_num, block)| (block.hash() == block_hash).then_some(block)) } @@ -134,7 +138,7 @@ impl Chain { &self, block_number: BlockNumber, ) -> Option> { - if self.tip().number == block_number { + if self.tip().number() == block_number { return Some(self.execution_outcome.clone()) } @@ -152,14 +156,14 @@ impl Chain { /// 3. The optional trie updates. pub fn into_inner( self, - ) -> (ChainBlocks<'static>, ExecutionOutcome, Option) { + ) -> (ChainBlocks<'static, N::Block>, ExecutionOutcome, Option) { (ChainBlocks { blocks: Cow::Owned(self.blocks) }, self.execution_outcome, self.trie_updates) } /// Destructure the chain into its inner components: /// 1. A reference to the blocks contained in the chain. /// 2. A reference to the execution outcome representing the final state. - pub const fn inner(&self) -> (ChainBlocks<'_>, &ExecutionOutcome) { + pub const fn inner(&self) -> (ChainBlocks<'_, N::Block>, &ExecutionOutcome) { (ChainBlocks { blocks: Cow::Borrowed(&self.blocks) }, &self.execution_outcome) } @@ -169,14 +173,15 @@ impl Chain { } /// Returns an iterator over all blocks in the chain with increasing block number. - pub fn blocks_iter(&self) -> impl Iterator + '_ { + pub fn blocks_iter(&self) -> impl Iterator> + '_ { self.blocks().iter().map(|block| block.1) } /// Returns an iterator over all blocks and their receipts in the chain. pub fn blocks_and_receipts( &self, - ) -> impl Iterator>)> + '_ { + ) -> impl Iterator, &Vec>)> + '_ + { self.blocks_iter().zip(self.block_receipts_iter()) } @@ -184,7 +189,7 @@ impl Chain { #[track_caller] pub fn fork_block(&self) -> ForkBlock { let first = self.first(); - ForkBlock { number: first.number.saturating_sub(1), hash: first.parent_hash } + ForkBlock { number: first.number().saturating_sub(1), hash: first.parent_hash() } } /// Get the first block in this chain. @@ -193,7 +198,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn first(&self) -> &SealedBlockWithSenders { + pub fn first(&self) -> &SealedBlockWithSenders { self.blocks.first_key_value().expect("Chain should have at least one block").1 } @@ -203,7 +208,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { self.blocks.last_key_value().expect("Chain should have at least one block").1 } @@ -218,7 +223,7 @@ impl Chain { /// /// If chain doesn't have any blocks. pub fn range(&self) -> RangeInclusive { - self.first().number..=self.tip().number + self.first().number()..=self.tip().number() } /// Get all receipts for the given block. @@ -230,15 +235,18 @@ impl Chain { /// Get all receipts with attachment. /// /// Attachment includes block number, block hash, transaction hash and transaction index. - pub fn receipts_with_attachment(&self) -> Vec> { + pub fn receipts_with_attachment(&self) -> Vec> + where + N::SignedTx: Encodable2718, + { let mut receipt_attach = Vec::with_capacity(self.blocks().len()); for ((block_num, block), receipts) in self.blocks().iter().zip(self.execution_outcome.receipts().iter()) { let mut tx_receipts = Vec::with_capacity(receipts.len()); - for (tx, receipt) in block.body.transactions().zip(receipts.iter()) { + for (tx, receipt) in block.body.transactions().iter().zip(receipts.iter()) { tx_receipts.push(( - tx.hash(), + tx.trie_hash(), receipt.as_ref().expect("receipts have not been pruned").clone(), )); } @@ -252,10 +260,10 @@ impl Chain { /// This method assumes that blocks attachment to the chain has already been validated. pub fn append_block( &mut self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, execution_outcome: ExecutionOutcome, ) { - self.blocks.insert(block.number, block); + self.blocks.insert(block.number(), block); self.execution_outcome.extend(execution_outcome); self.trie_updates.take(); // reset } @@ -375,22 +383,22 @@ impl fmt::Display for DisplayBlocksChain<'_> { /// All blocks in the chain #[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct ChainBlocks<'a> { - blocks: Cow<'a, BTreeMap>, +pub struct ChainBlocks<'a, B: Block> { + blocks: Cow<'a, BTreeMap>>, } -impl ChainBlocks<'_> { +impl>> ChainBlocks<'_, B> { /// Creates a consuming iterator over all blocks in the chain with increasing block number. /// /// Note: this always yields at least one block. #[inline] - pub fn into_blocks(self) -> impl Iterator { + pub fn into_blocks(self) -> impl Iterator> { self.blocks.into_owned().into_values() } /// Creates an iterator over all blocks in the chain with increasing block number. #[inline] - pub fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator)> { self.blocks.iter() } @@ -400,7 +408,7 @@ impl ChainBlocks<'_> { /// /// Chains always have at least one block. #[inline] - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { self.blocks.last_key_value().expect("Chain should have at least one block").1 } @@ -410,21 +418,21 @@ impl ChainBlocks<'_> { /// /// Chains always have at least one block. #[inline] - pub fn first(&self) -> &SealedBlockWithSenders { + pub fn first(&self) -> &SealedBlockWithSenders { self.blocks.first_key_value().expect("Chain should have at least one block").1 } /// Returns an iterator over all transactions in the chain. #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.body.transactions()) + pub fn transactions(&self) -> impl Iterator::Transaction> + '_ { + self.blocks.values().flat_map(|block| block.body.transactions().iter()) } /// Returns an iterator over all transactions and their senders. #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction)> + '_ { self.blocks.values().flat_map(|block| block.transactions_with_sender()) } @@ -434,20 +442,21 @@ impl ChainBlocks<'_> { #[inline] pub fn transactions_ecrecovered( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction>> + '_ + { self.transactions_with_sender().map(|(signer, tx)| tx.clone().with_signer(*signer)) } /// Returns an iterator over all transaction hashes in the block #[inline] pub fn transaction_hashes(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.transactions().map(|tx| tx.hash())) + self.blocks.values().flat_map(|block| block.transactions().iter().map(|tx| tx.trie_hash())) } } -impl IntoIterator for ChainBlocks<'_> { - type Item = (BlockNumber, SealedBlockWithSenders); - type IntoIter = std::collections::btree_map::IntoIter; +impl IntoIterator for ChainBlocks<'_, B> { + type Item = (BlockNumber, SealedBlockWithSenders); + type IntoIter = std::collections::btree_map::IntoIter>; fn into_iter(self) -> Self::IntoIter { #[allow(clippy::unnecessary_to_owned)] @@ -511,18 +520,16 @@ pub enum ChainSplit { } /// Bincode-compatible [`Chain`] serde implementation. -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { - use std::collections::BTreeMap; - + use crate::ExecutionOutcome; use alloc::borrow::Cow; use alloy_primitives::BlockNumber; use reth_primitives::serde_bincode_compat::SealedBlockWithSenders; - use reth_trie::serde_bincode_compat::updates::TrieUpdates; + use reth_trie_common::serde_bincode_compat::updates::TrieUpdates; use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; - - use crate::ExecutionOutcome; + use std::collections::BTreeMap; /// Bincode-compatible [`super::Chain`] serde implementation. /// diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index f98ebfe73a5f..fb872cd596e4 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -26,7 +26,7 @@ pub use execution_outcome::*; /// all fields are serialized. /// /// Read more: -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { pub use super::chain::serde_bincode_compat::*; } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index f01701d5989d..ae884bdd5f86 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -17,6 +17,7 @@ extern crate alloc; +use crate::builder::RethEvmBuilder; use alloy_consensus::BlockHeader as _; use alloy_primitives::{Address, Bytes, B256, U256}; use reth_primitives::TransactionSigned; @@ -24,8 +25,6 @@ use reth_primitives_traits::BlockHeader; use revm::{Database, Evm, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv}; -use crate::builder::RethEvmBuilder; - pub mod builder; pub mod either; pub mod execute; @@ -139,9 +138,16 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { data: Bytes, ); + /// Returns a [`CfgEnvWithHandlerCfg`] for the given header. + fn cfg_env(&self, header: &Self::Header, total_difficulty: U256) -> CfgEnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + self.fill_cfg_env(&mut cfg, header, total_difficulty); + cfg + } + /// Fill [`CfgEnvWithHandlerCfg`] fields according to the chain spec and given header. /// - /// This must set the corresponding spec id in the handler cfg, based on timestamp or total + /// This __must__ set the corresponding spec id in the handler cfg, based on timestamp or total /// difficulty fn fill_cfg_env( &self, @@ -171,6 +177,18 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { } } + /// Creates a new [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the given header. + fn cfg_and_block_env( + &self, + header: &Self::Header, + total_difficulty: U256, + ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + (cfg, block_env) + } + /// Convenience function to call both [`fill_cfg_env`](ConfigureEvmEnv::fill_cfg_env) and /// [`ConfigureEvmEnv::fill_block_env`]. /// diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index be783543f950..705f216c12aa 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -38,6 +38,7 @@ reth-tracing.workspace = true reth-scroll-storage = { workspace = true, optional = true } # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index a4754ac607f8..554336e2dfb2 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -4,12 +4,14 @@ use std::{ time::{Duration, Instant}, }; +use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use reth_evm::execute::{ BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, }; -use reth_primitives::{Block, BlockWithSenders, Receipt}; -use reth_primitives_traits::format_gas_throughput; +use reth_node_api::{Block as _, BlockBody as _}; +use reth_primitives::{BlockExt, BlockWithSenders, Receipt}; +use reth_primitives_traits::{format_gas_throughput, SignedTransaction}; use reth_provider::{ BlockReader, Chain, HeaderProvider, ProviderError, StateProviderFactory, TransactionVariant, }; @@ -37,7 +39,9 @@ pub struct BackfillJob { impl Iterator for BackfillJob where E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + P: HeaderProvider + + BlockReader + + StateProviderFactory, { type Item = BackfillJobResult; @@ -53,7 +57,9 @@ where impl BackfillJob where E: BlockExecutorProvider, - P: BlockReader + HeaderProvider + StateProviderFactory, + P: BlockReader + + HeaderProvider + + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. pub fn into_single_blocks(self) -> SingleBlockBackfillJob { @@ -106,10 +112,10 @@ where fetch_block_duration += fetch_block_start.elapsed(); - cumulative_gas += block.gas_used; + cumulative_gas += block.gas_used(); // Configure the executor to use the current state. - trace!(target: "exex::backfill", number = block_number, txs = block.body.transactions.len(), "Executing block"); + trace!(target: "exex::backfill", number = block_number, txs = block.body.transactions().len(), "Executing block"); // Execute the block let execute_start = Instant::now(); @@ -117,8 +123,7 @@ where // Unseal the block for execution let (block, senders) = block.into_components(); let (unsealed_header, hash) = block.header.split(); - let block = - Block { header: unsealed_header, body: block.body }.with_senders_unchecked(senders); + let block = P::Block::new(unsealed_header, block.body).with_senders_unchecked(senders); executor.execute_and_verify_one((&block, td).into())?; execution_duration += execute_start.elapsed(); @@ -140,7 +145,7 @@ where } } - let last_block_number = blocks.last().expect("blocks should not be empty").number; + let last_block_number = blocks.last().expect("blocks should not be empty").number(); debug!( target: "exex::backfill", range = ?*self.range.start()..=last_block_number, @@ -171,7 +176,7 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)>; @@ -183,7 +188,7 @@ where impl SingleBlockBackfillJob where E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. pub fn into_stream( @@ -195,7 +200,7 @@ where pub(crate) fn execute_block( &self, block_number: u64, - ) -> BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)> { + ) -> BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)> { let td = self .provider .header_td_by_number(block_number)? @@ -212,7 +217,7 @@ where self.provider.history_by_block_number(block_number.saturating_sub(1))?, )); - trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.block.body.transactions.len(), "Executing block"); + trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.block.body().transactions().len(), "Executing block"); let block_execution_output = executor.execute((&block_with_senders, td).into())?; diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index c55b8651daf1..46177ceda122 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -103,7 +103,13 @@ where impl Stream for StreamBackfillJob where E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, + P: HeaderProvider + + BlockReader + + StateProviderFactory + + Clone + + Send + + Unpin + + 'static, { type Item = BackfillJobResult; @@ -136,7 +142,13 @@ where impl Stream for StreamBackfillJob where E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, + P: HeaderProvider + + BlockReader + + StateProviderFactory + + Clone + + Send + + Unpin + + 'static, { type Item = BackfillJobResult; diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 169d2d758de7..6d93314e22bd 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -11,7 +11,7 @@ use reth_evm::execute::{ use reth_evm_ethereum::execute::EthExecutorProvider; use reth_node_api::FullNodePrimitives; use reth_primitives::{ - Block, BlockBody, BlockWithSenders, Receipt, SealedBlockWithSenders, Transaction, + Block, BlockBody, BlockExt, BlockWithSenders, Receipt, SealedBlockWithSenders, Transaction, }; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, @@ -58,7 +58,13 @@ pub(crate) fn execute_block_and_commit_to_database( block: &BlockWithSenders, ) -> eyre::Result> where - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, + >, { let provider = provider_factory.provider()?; @@ -162,7 +168,13 @@ pub(crate) fn blocks_and_execution_outputs( key_pair: Keypair, ) -> eyre::Result)>> where - N: ProviderNodeTypes>, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, + >, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; @@ -184,7 +196,8 @@ pub(crate) fn blocks_and_execution_outcome( ) -> eyre::Result<(Vec, ExecutionOutcome)> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives, + N::Primitives: + FullNodePrimitives, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 4e0d9f5956c7..3d303c9bbac0 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -3,6 +3,7 @@ use reth_exex_types::ExExHead; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_node_core::node_config::NodeConfig; use reth_primitives::Head; +use reth_provider::BlockReader; use reth_tasks::TaskExecutor; use std::fmt::Debug; use tokio::sync::mpsc::UnboundedSender; @@ -56,7 +57,7 @@ where impl ExExContext where Node: FullNodeComponents, - Node::Provider: Debug, + Node::Provider: Debug + BlockReader, Node::Executor: Debug, { /// Returns dynamic version of the context @@ -106,13 +107,19 @@ where /// Sets notifications stream to [`crate::ExExNotificationsWithoutHead`], a stream of /// notifications without a head. - pub fn set_notifications_without_head(&mut self) { + pub fn set_notifications_without_head(&mut self) + where + Node::Provider: BlockReader, + { self.notifications.set_without_head(); } /// Sets notifications stream to [`crate::ExExNotificationsWithHead`], a stream of notifications /// with the provided head. - pub fn set_notifications_with_head(&mut self, head: ExExHead) { + pub fn set_notifications_with_head(&mut self, head: ExExHead) + where + Node::Provider: BlockReader, + { self.notifications.set_with_head(head); } } @@ -121,6 +128,7 @@ where mod tests { use reth_exex_types::ExExHead; use reth_node_api::FullNodeComponents; + use reth_provider::BlockReader; use crate::ExExContext; @@ -132,7 +140,10 @@ mod tests { ctx: ExExContext, } - impl ExEx { + impl ExEx + where + Node::Provider: BlockReader, + { async fn _test_bounds(mut self) -> eyre::Result<()> { self.ctx.pool(); self.ctx.block_executor(); diff --git a/crates/exex/exex/src/dyn_context.rs b/crates/exex/exex/src/dyn_context.rs index b48a6ebc951f..3ce0f488f40c 100644 --- a/crates/exex/exex/src/dyn_context.rs +++ b/crates/exex/exex/src/dyn_context.rs @@ -6,6 +6,7 @@ use std::fmt::Debug; use reth_chainspec::{EthChainSpec, Head}; use reth_node_api::FullNodeComponents; use reth_node_core::node_config::NodeConfig; +use reth_provider::BlockReader; use tokio::sync::mpsc; use crate::{ExExContext, ExExEvent, ExExNotificationsStream}; @@ -51,7 +52,7 @@ impl Debug for ExExContextDyn { impl From> for ExExContextDyn where Node: FullNodeComponents, - Node::Provider: Debug, + Node::Provider: Debug + BlockReader, Node::Executor: Debug, { fn from(ctx: ExExContext) -> Self { diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index e3d3a3c06901..ea5ddf2e8c62 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1235,7 +1235,7 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .unwrap(); let provider_rw = provider_factory.database_provider_rw().unwrap(); provider_rw.insert_block(block.clone(), StorageLocation::Database).unwrap(); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index baf504166d19..954a057fc09c 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -90,7 +90,12 @@ impl ExExNotifications { impl ExExNotificationsStream for ExExNotifications where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { fn set_without_head(&mut self) { @@ -139,7 +144,12 @@ where impl Stream for ExExNotifications where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { type Item = eyre::Result; @@ -262,7 +272,12 @@ impl ExExNotificationsWithHead { impl ExExNotificationsWithHead where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { /// Checks if the ExEx head is on the canonical chain. @@ -339,7 +354,12 @@ where impl Stream for ExExNotificationsWithHead where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { type Item = eyre::Result; @@ -400,7 +420,7 @@ mod tests { use futures::StreamExt; use reth_db_common::init::init_genesis; use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_primitives::Block; + use reth_primitives::{Block, BlockExt}; use reth_provider::{ providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, @@ -567,7 +587,7 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; let node_head = Head { number: node_head_block.number, diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index 41a7829a70f3..066fbe1b58c1 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -268,21 +268,25 @@ mod tests { // Create 4 canonical blocks and one reorged block with number 2 let blocks = random_block_range(&mut rng, 0..=3, BlockRangeParams::default()) .into_iter() - .map(|block| block.seal_with_senders().ok_or_eyre("failed to recover senders")) + .map(|block| { + block + .seal_with_senders::() + .ok_or_eyre("failed to recover senders") + }) .collect::>>()?; let block_1_reorged = random_block( &mut rng, 1, BlockParams { parent: Some(blocks[0].hash()), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; let block_2_reorged = random_block( &mut rng, 2, BlockParams { parent: Some(blocks[1].hash()), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; // Create notifications for the above blocks. diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 4e1491c8aa0e..ddd363712e04 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -48,7 +48,7 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{EthPrimitives, Head, SealedBlockWithSenders}; +use reth_primitives::{BlockExt, EthPrimitives, Head, SealedBlockWithSenders}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, BlockReader, EthStorage, ProviderFactory, @@ -268,7 +268,7 @@ pub async fn test_exex_context_with_chain_spec( let (static_dir, _) = create_test_static_files_dir(); let db = create_test_rw_db(); - let provider_factory = ProviderFactory::new( + let provider_factory = ProviderFactory::>::new( db, chain_spec.clone(), StaticFileProvider::read_write(static_dir.into_path()).expect("static file provider"), @@ -292,7 +292,7 @@ pub async fn test_exex_context_with_chain_spec( let (_, payload_builder) = NoopPayloadBuilderService::::new(); - let components = NodeAdapter::, _>, _> { + let components = NodeAdapter::, _> { components: Components { transaction_pool, evm_config, @@ -309,7 +309,7 @@ pub async fn test_exex_context_with_chain_spec( .block_by_hash(genesis_hash)? .ok_or_else(|| eyre::eyre!("genesis block not found"))? .seal_slow() - .seal_with_senders() + .seal_with_senders::() .ok_or_else(|| eyre::eyre!("failed to recover senders"))?; let head = Head { diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index d242ecc98e2d..c1aa4900e03f 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -210,6 +210,12 @@ impl FsPathError { } } +/// Wrapper for [`File::open`]. +pub fn open(path: impl AsRef) -> Result { + let path = path.as_ref(); + File::open(path).map_err(|err| FsPathError::open(err, path)) +} + /// Wrapper for `std::fs::read_to_string` pub fn read_to_string(path: impl AsRef) -> Result { let path = path.as_ref(); diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 7d74085d355a..25ce7f3b3504 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -1,15 +1,14 @@ //! Types for broadcasting new data. use crate::{EthMessage, EthVersion, NetworkPrimitives}; +use alloy_primitives::{Bytes, TxHash, B256, U128}; use alloy_rlp::{ Decodable, Encodable, RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper, }; - -use alloy_primitives::{Bytes, TxHash, B256, U128}; use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; -use reth_primitives::{PooledTransactionsElement, TransactionSigned}; - +use reth_primitives::TransactionSigned; +use reth_primitives_traits::SignedTransaction; use std::{ collections::{HashMap, HashSet}, mem, @@ -555,7 +554,7 @@ pub trait HandleVersionedMempoolData { fn msg_version(&self) -> EthVersion; } -impl HandleMempoolData for Vec { +impl HandleMempoolData for Vec { fn is_empty(&self) -> bool { self.is_empty() } @@ -565,7 +564,7 @@ impl HandleMempoolData for Vec { } fn retain_by_hash(&mut self, mut f: impl FnMut(&TxHash) -> bool) { - self.retain(|tx| f(tx.hash())) + self.retain(|tx| f(tx.tx_hash())) } } diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index e54000895a79..a7d8a98fae6d 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -147,7 +147,11 @@ where impl NetworkConfig where - C: BlockReader + HeaderProvider + Clone + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Clone + + Unpin + + 'static, { /// Starts the networking stack given a [`NetworkConfig`] and returns a handle to the network. pub async fn start_network(self) -> Result { diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 0f9348a42ce4..bb45507bdbdb 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -15,7 +15,7 @@ use reth_eth_wire::{ use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::error::RequestResult; use reth_network_peers::PeerId; -use reth_primitives::BlockBody; +use reth_primitives_traits::Block; use reth_storage_api::{BlockReader, HeaderProvider, ReceiptProvider}; use std::{ future::Future, @@ -80,7 +80,7 @@ impl EthRequestHandler { impl EthRequestHandler where - C: BlockReader + HeaderProvider + ReceiptProvider, + C: BlockReader + HeaderProvider + ReceiptProvider, { /// Returns the list of requested headers fn get_headers_response(&self, request: GetBlockHeaders) -> Vec
{ @@ -157,7 +157,9 @@ where &self, _peer_id: PeerId, request: GetBlockBodies, - response: oneshot::Sender>>, + response: oneshot::Sender< + RequestResult::Body>>, + >, ) { self.metrics.eth_bodies_requests_received_total.increment(1); let mut bodies = Vec::new(); @@ -166,8 +168,7 @@ where for hash in request.0 { if let Some(block) = self.client.block_by_hash(hash).unwrap_or_default() { - let body: BlockBody = block.into(); - + let (_, body) = block.split(); total_bytes += body.length(); bodies.push(body); @@ -223,7 +224,9 @@ where /// This should be spawned or used as part of `tokio::select!`. impl Future for EthRequestHandler where - C: BlockReader + HeaderProvider + Unpin, + C: BlockReader + + HeaderProvider + + Unpin, { type Output = (); diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index d4b762e3e12c..f8d18e159946 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -375,7 +375,7 @@ impl PeersManager { if peer.is_trusted() || peer.is_static() { // For misbehaving trusted or static peers, we provide a bit more leeway when // penalizing them. - ban_duration = self.backoff_durations.medium; + ban_duration = self.backoff_durations.low / 2; } } diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 473c76c260f0..5d7c0a9f6541 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -385,10 +385,7 @@ impl NetworkState { } /// Handle the outcome of processed response, for example directly queue another request. - fn on_block_response_outcome( - &mut self, - outcome: BlockResponseOutcome, - ) -> Option> { + fn on_block_response_outcome(&mut self, outcome: BlockResponseOutcome) { match outcome { BlockResponseOutcome::Request(peer, request) => { self.handle_block_request(peer, request); @@ -397,7 +394,6 @@ impl NetworkState { self.peers_manager.apply_reputation_change(&peer, reputation_change); } } - None } /// Invoked when received a response from a connected peer. @@ -405,21 +401,19 @@ impl NetworkState { /// Delegates the response result to the fetcher which may return an outcome specific /// instruction that needs to be handled in [`Self::on_block_response_outcome`]. This could be /// a follow-up request or an instruction to slash the peer's reputation. - fn on_eth_response( - &mut self, - peer: PeerId, - resp: PeerResponseResult, - ) -> Option> { - match resp { + fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) { + let outcome = match resp { PeerResponseResult::BlockHeaders(res) => { - let outcome = self.state_fetcher.on_block_headers_response(peer, res)?; - self.on_block_response_outcome(outcome) + self.state_fetcher.on_block_headers_response(peer, res) } PeerResponseResult::BlockBodies(res) => { - let outcome = self.state_fetcher.on_block_bodies_response(peer, res)?; - self.on_block_response_outcome(outcome) + self.state_fetcher.on_block_bodies_response(peer, res) } _ => None, + }; + + if let Some(outcome) = outcome { + self.on_block_response_outcome(outcome); } } @@ -443,13 +437,14 @@ impl NetworkState { } } - // need to buffer results here to make borrow checker happy - let mut closed_sessions = Vec::new(); - let mut received_responses = Vec::new(); + loop { + // need to buffer results here to make borrow checker happy + let mut closed_sessions = Vec::new(); + let mut received_responses = Vec::new(); - // poll all connected peers for responses - for (id, peer) in &mut self.active_peers { - if let Some(mut response) = peer.pending_response.take() { + // poll all connected peers for responses + for (id, peer) in &mut self.active_peers { + let Some(mut response) = peer.pending_response.take() else { continue }; match response.poll(cx) { Poll::Ready(res) => { // check if the error is due to a closed channel to the session @@ -460,7 +455,8 @@ impl NetworkState { "Request canceled, response channel from session closed." ); // if the channel is closed, this means the peer session is also - // closed, in which case we can invoke the [Self::on_closed_session] + // closed, in which case we can invoke the + // [Self::on_closed_session] // immediately, preventing followup requests and propagate the // connection dropped error closed_sessions.push(*id); @@ -474,15 +470,17 @@ impl NetworkState { } }; } - } - for peer in closed_sessions { - self.on_session_closed(peer) - } + for peer in closed_sessions { + self.on_session_closed(peer) + } + + if received_responses.is_empty() { + break; + } - for (peer_id, resp) in received_responses { - if let Some(action) = self.on_eth_response(peer_id, resp) { - self.queued_messages.push_back(action); + for (peer_id, resp) in received_responses { + self.on_eth_response(peer_id, resp); } } @@ -491,6 +489,8 @@ impl NetworkState { self.on_peer_action(action); } + // We need to poll again tn case we have received any responses because they may have + // triggered follow-up requests. if self.queued_messages.is_empty() { return Poll::Pending } diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index a64084f2cf9b..9801ecf9293a 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -194,7 +194,11 @@ where impl Testnet where - C: BlockReader + HeaderProvider + Clone + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Clone + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, { /// Spawns the testnet to a separate task @@ -253,7 +257,10 @@ impl fmt::Debug for Testnet { impl Future for Testnet where - C: BlockReader + HeaderProvider + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, { type Output = (); @@ -448,7 +455,10 @@ where impl Future for Peer where - C: BlockReader + HeaderProvider + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, { type Output = (); diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 0833f677409d..180a619fff9e 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -45,6 +45,7 @@ use reth_eth_wire::{ DedupPayload, EthVersion, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData, PartiallyValidData, RequestTxHashes, ValidAnnouncementData, }; +use reth_eth_wire_types::{EthNetworkPrimitives, NetworkPrimitives}; use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; use reth_network_peers::PeerId; @@ -68,7 +69,7 @@ use validation::FilterOutcome; /// new requests on announced hashes. #[derive(Debug)] #[pin_project] -pub struct TransactionFetcher { +pub struct TransactionFetcher { /// All peers with to which a [`GetPooledTransactions`] request is inflight. pub active_peers: LruMap, /// All currently active [`GetPooledTransactions`] requests. @@ -77,7 +78,7 @@ pub struct TransactionFetcher { /// It's disjoint from the set of hashes which are awaiting an idle fallback peer in order to /// be fetched. #[pin] - pub inflight_requests: FuturesUnordered, + pub inflight_requests: FuturesUnordered>, /// Hashes that are awaiting an idle fallback peer so they can be fetched. /// /// This is a subset of all hashes in the fetcher, and is disjoint from the set of hashes for @@ -93,9 +94,7 @@ pub struct TransactionFetcher { metrics: TransactionFetcherMetrics, } -// === impl TransactionFetcher === - -impl TransactionFetcher { +impl TransactionFetcher { /// Removes the peer from the active set. pub(crate) fn remove_peer(&mut self, peer_id: &PeerId) { self.active_peers.remove(peer_id); @@ -429,7 +428,7 @@ impl TransactionFetcher { /// the request by checking the transactions seen by the peer against the buffer. pub fn on_fetch_pending_hashes( &mut self, - peers: &HashMap, + peers: &HashMap>, has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool, ) { let init_capacity_req = approx_capacity_get_pooled_transactions_req_eth68(&self.info); @@ -632,7 +631,7 @@ impl TransactionFetcher { pub fn request_transactions_from_peer( &mut self, new_announced_hashes: RequestTxHashes, - peer: &PeerMetadata, + peer: &PeerMetadata, ) -> Option { let peer_id: PeerId = peer.request_tx.peer_id; let conn_eth_version = peer.version; @@ -896,7 +895,9 @@ impl TransactionFetcher { approx_capacity_get_pooled_transactions_req_eth66() } } +} +impl TransactionFetcher { /// Processes a resolved [`GetPooledTransactions`] request. Queues the outcome as a /// [`FetchEvent`], which will then be streamed by /// [`TransactionsManager`](super::TransactionsManager). @@ -1044,7 +1045,7 @@ impl Stream for TransactionFetcher { } } -impl Default for TransactionFetcher { +impl Default for TransactionFetcher { fn default() -> Self { Self { active_peers: LruMap::new(DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS), @@ -1091,13 +1092,13 @@ impl TxFetchMetadata { /// Represents possible events from fetching transactions. #[derive(Debug)] -pub enum FetchEvent { +pub enum FetchEvent { /// Triggered when transactions are successfully fetched. TransactionsFetched { /// The ID of the peer from which transactions were fetched. peer_id: PeerId, /// The transactions that were fetched, if available. - transactions: PooledTransactions, + transactions: PooledTransactions, }, /// Triggered when there is an error in fetching transactions. FetchError { @@ -1115,22 +1116,22 @@ pub enum FetchEvent { /// An inflight request for [`PooledTransactions`] from a peer. #[derive(Debug)] -pub struct GetPooledTxRequest { +pub struct GetPooledTxRequest { peer_id: PeerId, /// Transaction hashes that were requested, for cleanup purposes requested_hashes: RequestTxHashes, - response: oneshot::Receiver>, + response: oneshot::Receiver>>, } /// Upon reception of a response, a [`GetPooledTxRequest`] is deconstructed to form a /// [`GetPooledTxResponse`]. #[derive(Debug)] -pub struct GetPooledTxResponse { +pub struct GetPooledTxResponse { peer_id: PeerId, /// Transaction hashes that were requested, for cleanup purposes, since peer may only return a /// subset of requested hashes. requested_hashes: RequestTxHashes, - result: Result, RecvError>, + result: Result>, RecvError>, } /// Stores the response receiver made by sending a [`GetPooledTransactions`] request to a peer's @@ -1138,24 +1139,24 @@ pub struct GetPooledTxResponse { #[must_use = "futures do nothing unless polled"] #[pin_project::pin_project] #[derive(Debug)] -pub struct GetPooledTxRequestFut { +pub struct GetPooledTxRequestFut { #[pin] - inner: Option, + inner: Option>, } -impl GetPooledTxRequestFut { +impl GetPooledTxRequestFut { #[inline] const fn new( peer_id: PeerId, requested_hashes: RequestTxHashes, - response: oneshot::Receiver>, + response: oneshot::Receiver>>, ) -> Self { Self { inner: Some(GetPooledTxRequest { peer_id, requested_hashes, response }) } } } -impl Future for GetPooledTxRequestFut { - type Output = GetPooledTxResponse; +impl Future for GetPooledTxRequestFut { + type Output = GetPooledTxResponse; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut req = self.as_mut().project().inner.take().expect("polled after completion"); @@ -1372,7 +1373,7 @@ mod test { // RIG TEST - let tx_fetcher = &mut TransactionFetcher::default(); + let tx_fetcher = &mut TransactionFetcher::::default(); let eth68_hashes = [ B256::from_slice(&[1; 32]), diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 9628dbb4f1ba..d533aee102b3 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -212,7 +212,7 @@ pub struct TransactionsManager>>, /// Transaction fetcher to handle inflight and missing transaction requests. - transaction_fetcher: TransactionFetcher, + transaction_fetcher: TransactionFetcher, /// All currently pending transactions grouped by peers. /// /// This way we can track incoming transactions and prevent multiple pool imports for the same @@ -235,7 +235,7 @@ pub struct TransactionsManager, /// All the connected peers. - peers: HashMap, + peers: HashMap>, /// Send half for the command channel. /// /// This is kept so that a new [`TransactionsHandle`] can be created at any time. @@ -681,9 +681,13 @@ where impl TransactionsManager where Pool: TransactionPool, - N: NetworkPrimitives, + N: NetworkPrimitives< + BroadcastedTransaction: SignedTransaction, + PooledTransaction: SignedTransaction, + >, <::Transaction as PoolTransaction>::Consensus: Into, + <::Transaction as PoolTransaction>::Pooled: Into, { /// Invoked when transactions in the local mempool are considered __pending__. /// @@ -955,43 +959,45 @@ where // notify pool so events get fired self.pool.on_propagated(propagated); } -} -impl TransactionsManager -where - Pool: TransactionPool + 'static, - <::Transaction as PoolTransaction>::Consensus: Into, -{ /// Request handler for an incoming request for transactions fn on_get_pooled_transactions( &mut self, peer_id: PeerId, request: GetPooledTransactions, - response: oneshot::Sender>, + response: oneshot::Sender>>, ) { if let Some(peer) = self.peers.get_mut(&peer_id) { if self.network.tx_gossip_disabled() { let _ = response.send(Ok(PooledTransactions::default())); return } - let transactions = self.pool.get_pooled_transaction_elements( + let transactions = self.pool.get_pooled_transactions_as::( request.0, GetPooledTransactionLimit::ResponseSizeSoftLimit( self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, ), ); - trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| *tx.hash()), "Sending requested transactions to peer"); + trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| tx.tx_hash()), "Sending requested transactions to peer"); // we sent a response at which point we assume that the peer is aware of the // transactions - peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.hash())); + peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.tx_hash())); let resp = PooledTransactions(transactions); let _ = response.send(Ok(resp)); } } +} +impl TransactionsManager +where + Pool: TransactionPool + 'static, + <::Transaction as PoolTransaction>::Consensus: Into, + <::Transaction as PoolTransaction>::Pooled: + Into, +{ /// Handles dedicated transaction events related to the `eth` protocol. fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { match event { @@ -1291,6 +1297,8 @@ impl Future for TransactionsManager where Pool: TransactionPool + Unpin + 'static, <::Transaction as PoolTransaction>::Consensus: Into, + <::Transaction as PoolTransaction>::Pooled: + Into, { type Output = (); @@ -1723,23 +1731,23 @@ impl TransactionSource { /// Tracks a single peer in the context of [`TransactionsManager`]. #[derive(Debug)] -pub struct PeerMetadata { +pub struct PeerMetadata { /// Optimistically keeps track of transactions that we know the peer has seen. Optimistic, in /// the sense that transactions are preemptively marked as seen by peer when they are sent to /// the peer. seen_transactions: LruCache, /// A communication channel directly to the peer's session task. - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, /// negotiated version of the session. version: EthVersion, /// The peer's client version. client_version: Arc, } -impl PeerMetadata { +impl PeerMetadata { /// Returns a new instance of [`PeerMetadata`]. fn new( - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, version: EthVersion, client_version: Arc, max_transactions_seen_by_peer: u32, @@ -2178,7 +2186,7 @@ mod tests { .await; assert!(!pool.is_empty()); - assert!(pool.get(signed_tx.hash_ref()).is_some()); + assert!(pool.get(signed_tx.tx_hash()).is_some()); handle.terminate().await; } diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 65ae704fe831..06d5294d800a 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -10,6 +10,7 @@ use crate::{ DefaultNodeLauncher, LaunchNode, Node, NodeHandle, }; use futures::Future; +use reth_blockchain_tree::externals::NodeTypesForTree; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli_util::get_secret_key; use reth_db_api::{ @@ -33,7 +34,7 @@ use reth_node_core::{ }; use reth_provider::{ providers::{BlockchainProvider, NodeTypesForProvider}, - ChainSpecProvider, FullProvider, + BlockReader, ChainSpecProvider, FullProvider, }; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; @@ -243,7 +244,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> NodeBuilderWithTypes> where - T: NodeTypesWithEngine + NodeTypesForProvider, + T: NodeTypesWithEngine + NodeTypesForTree, { self.with_types_and_provider() } @@ -267,7 +268,7 @@ where node: N, ) -> NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns> where - N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } @@ -304,7 +305,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> WithLaunchContext>> where - T: NodeTypesWithEngine + NodeTypesForProvider, + T: NodeTypesWithEngine + NodeTypesForTree, { WithLaunchContext { builder: self.builder.with_types(), task_executor: self.task_executor } } @@ -335,7 +336,7 @@ where NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns>, > where - N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } @@ -358,7 +359,7 @@ where >, > where - N: Node, ChainSpec = ChainSpec> + NodeTypesForProvider, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, N::AddOns: RethRpcAddOns< NodeAdapter< RethFullAdapter, @@ -553,10 +554,9 @@ where impl WithLaunchContext, CB, AO>> where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypesWithEngine + NodeTypesForProvider, + T: NodeTypesWithEngine + NodeTypesForTree, CB: NodeComponentsBuilder>, AO: RethRpcAddOns, CB::Components>>, - T::Primitives: FullNodePrimitives, { /// Launches the node with the [`DefaultNodeLauncher`] that sets up engine API consensus and rpc pub async fn launch( @@ -651,6 +651,8 @@ impl BuilderContext { pub fn start_network(&self, builder: NetworkBuilder<(), ()>, pool: Pool) -> NetworkHandle where Pool: TransactionPool + Unpin + 'static, + Node::Provider: + BlockReader, { self.start_network_with(builder, pool, Default::default()) } @@ -669,6 +671,8 @@ impl BuilderContext { ) -> NetworkHandle where Pool: TransactionPool + Unpin + 'static, + Node::Provider: + BlockReader, { let (handle, network, txpool, eth) = builder .transactions(pool, tx_config) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 47ec68ff0d7d..830909c8cc4c 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -11,10 +11,6 @@ use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; use reth_beacon_consensus::EthBeaconConsensus; -use reth_blockchain_tree::{ - externals::TreeNodeTypes, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, - TreeExternals, -}; use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; use reth_consensus::Consensus; @@ -46,10 +42,9 @@ use reth_node_metrics::{ }; use reth_primitives::Head; use reth_provider::{ - providers::{BlockchainProvider, BlockchainProvider2, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, BlockNumReader, CanonStateNotificationSender, ChainSpecProvider, - ProviderError, ProviderFactory, ProviderResult, StageCheckpointReader, StateProviderFactory, - StaticFileProviderFactory, TreeViewer, + providers::{ProviderNodeTypes, StaticFileProvider}, + BlockHashReader, BlockNumReader, ChainSpecProvider, ProviderError, ProviderFactory, + ProviderResult, StageCheckpointReader, StateProviderFactory, StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_api::clients::EthApiClient; @@ -65,27 +60,6 @@ use tokio::sync::{ oneshot, watch, }; -/// Allows to set a tree viewer for a configured blockchain provider. -// TODO: remove this helper trait once the engine revamp is done, the new -// blockchain provider won't require a TreeViewer. -// https://github.com/paradigmxyz/reth/issues/8742 -pub trait WithTree { - /// Setter for tree viewer. - fn set_tree(self, tree: Arc) -> Self; -} - -impl WithTree for BlockchainProvider { - fn set_tree(self, tree: Arc) -> Self { - self.with_tree(tree) - } -} - -impl WithTree for BlockchainProvider2 { - fn set_tree(self, _tree: Arc) -> Self { - self - } -} - /// Reusable setup for launching a node. /// /// This provides commonly used boilerplate for launching a node. @@ -408,7 +382,11 @@ where pub async fn create_provider_factory(&self) -> eyre::Result> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, { let factory = ProviderFactory::new( self.right().clone(), @@ -475,7 +453,11 @@ where ) -> eyre::Result, ProviderFactory>>> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, { let factory = self.create_provider_factory().await?; let ctx = LaunchContextWith { @@ -610,8 +592,6 @@ where pub fn with_blockchain_db( self, create_blockchain_provider: F, - tree_config: BlockchainTreeConfig, - canon_state_notification_sender: CanonStateNotificationSender, ) -> eyre::Result, WithMeteredProviders>>> where T: FullNodeTypes, @@ -625,8 +605,6 @@ where metrics_sender: self.sync_metrics_tx(), }, blockchain_db, - tree_config, - canon_state_notification_sender, }; let ctx = LaunchContextWith { @@ -643,7 +621,7 @@ impl Attached::ChainSpec>, WithMeteredProviders>, > where - T: FullNodeTypes, + T: FullNodeTypes, { /// Returns access to the underlying database. pub const fn database(&self) -> &::DB { @@ -674,16 +652,6 @@ where &self.right().blockchain_db } - /// Returns a reference to the `BlockchainTreeConfig`. - pub const fn tree_config(&self) -> &BlockchainTreeConfig { - &self.right().tree_config - } - - /// Returns the `CanonStateNotificationSender`. - pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { - self.right().canon_state_notification_sender.clone() - } - /// Creates a `NodeAdapter` and attaches it to the launch context. pub async fn with_components( self, @@ -712,31 +680,13 @@ where debug!(target: "reth::cli", "creating components"); let components = components_builder.build_components(&builder_ctx).await?; - let consensus: Arc = Arc::new(components.consensus().clone()); - - let tree_externals = TreeExternals::new( - self.provider_factory().clone().with_prune_modes(self.prune_modes()), - consensus.clone(), - components.block_executor().clone(), - ); - let tree = BlockchainTree::new(tree_externals, *self.tree_config())? - .with_sync_metrics_tx(self.sync_metrics_tx()) - // Note: This is required because we need to ensure that both the components and the - // tree are using the same channel for canon state notifications. This will be removed - // once the Blockchain provider no longer depends on an instance of the tree - .with_canon_state_notification_sender(self.canon_state_notification_sender()); - - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - - // Replace the tree component with the actual tree - let blockchain_db = self.blockchain_db().clone().set_tree(blockchain_tree); - - debug!(target: "reth::cli", "configured blockchain tree"); + let blockchain_db = self.blockchain_db().clone(); + let consensus = Arc::new(components.consensus().clone()); let node_adapter = NodeAdapter { components, task_executor: self.task_executor().clone(), - provider: blockchain_db.clone(), + provider: blockchain_db, }; debug!(target: "reth::cli", "calling on_component_initialized hook"); @@ -747,8 +697,6 @@ where provider_factory: self.provider_factory().clone(), metrics_sender: self.sync_metrics_tx(), }, - blockchain_db, - tree_config: self.right().tree_config, node_adapter, head, consensus, @@ -768,7 +716,7 @@ impl Attached::ChainSpec>, WithComponents>, > where - T: FullNodeTypes, + T: FullNodeTypes, CB: NodeComponentsBuilder, { /// Returns the configured `ProviderFactory`. @@ -805,9 +753,14 @@ where &self.right().node_adapter } + /// Returns mutable reference to the configured `NodeAdapter`. + pub fn node_adapter_mut(&mut self) -> &mut NodeAdapter { + &mut self.right_mut().node_adapter + } + /// Returns a reference to the blockchain provider. pub const fn blockchain_db(&self) -> &T::Provider { - &self.right().blockchain_db + &self.node_adapter().provider } /// Returns the initial backfill to sync to at launch. @@ -912,11 +865,6 @@ where self.right().db_provider_container.metrics_sender.clone() } - /// Returns a reference to the `BlockchainTreeConfig`. - pub const fn tree_config(&self) -> &BlockchainTreeConfig { - &self.right().tree_config - } - /// Returns the node adapter components. pub const fn components(&self) -> &CB::Components { &self.node_adapter().components @@ -928,10 +876,7 @@ impl Attached::ChainSpec>, WithComponents>, > where - T: FullNodeTypes< - Provider: WithTree + StateProviderFactory + ChainSpecProvider, - Types: ProviderNodeTypes, - >, + T: FullNodeTypes, CB: NodeComponentsBuilder, { /// Returns the [`InvalidBlockHook`] to use for the node. @@ -1063,7 +1008,7 @@ pub struct WithMeteredProvider { metrics_sender: UnboundedSender, } -/// Helper container to bundle the [`ProviderFactory`], [`BlockchainProvider`] +/// Helper container to bundle the [`ProviderFactory`], [`FullNodeTypes::Provider`] /// and a metrics sender. #[allow(missing_debug_implementations)] pub struct WithMeteredProviders @@ -1072,8 +1017,6 @@ where { db_provider_container: WithMeteredProvider, blockchain_db: T::Provider, - canon_state_notification_sender: CanonStateNotificationSender, - tree_config: BlockchainTreeConfig, } /// Helper container to bundle the metered providers container and [`NodeAdapter`]. @@ -1084,8 +1027,6 @@ where CB: NodeComponentsBuilder, { db_provider_container: WithMeteredProvider, - tree_config: BlockchainTreeConfig, - blockchain_db: T::Provider, node_adapter: NodeAdapter, head: Head, consensus: Arc, diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index ef1edc899ebe..b1141314d106 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -5,13 +5,13 @@ use reth_beacon_consensus::{ hooks::{EngineHooks, StaticFileHook}, BeaconConsensusEngineHandle, }; -use reth_blockchain_tree::BlockchainTreeConfig; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, + persistence::PersistenceNodeTypes, tree::TreeConfig, }; use reth_engine_util::EngineMessageStreamExt; @@ -19,8 +19,8 @@ use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ - BuiltPayload, FullNodePrimitives, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, - PayloadBuilder, PayloadTypes, + BlockTy, BuiltPayload, EngineValidator, FullNodeTypes, NodeTypesWithEngine, + PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -28,7 +28,7 @@ use reth_node_core::{ primitives::Head, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_primitives::EthereumHardforks; +use reth_primitives::{EthPrimitives, EthereumHardforks}; use reth_provider::providers::{BlockchainProvider2, ProviderNodeTypes}; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; @@ -40,7 +40,7 @@ use tokio_stream::wrappers::UnboundedReceiverStream; use crate::{ common::{Attached, LaunchContextWith, WithConfigs}, hooks::NodeHooks, - rpc::{RethRpcAddOns, RpcHandle}, + rpc::{EngineValidatorAddOn, RethRpcAddOns, RpcHandle}, setup::build_networked_pipeline, AddOns, AddOnsContext, ExExLauncher, FullNode, LaunchContext, LaunchNode, NodeAdapter, NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter, @@ -70,14 +70,22 @@ impl EngineNodeLauncher { impl LaunchNode> for EngineNodeLauncher where - Types: ProviderNodeTypes + NodeTypesWithEngine, + Types: + ProviderNodeTypes + NodeTypesWithEngine + PersistenceNodeTypes, T: FullNodeTypes>, CB: NodeComponentsBuilder, - AO: RethRpcAddOns>, + AO: RethRpcAddOns> + + EngineValidatorAddOn< + NodeAdapter, + Validator: EngineValidator< + ::Engine, + Block = BlockTy, + >, + >, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, - Types::Primitives: FullNodePrimitives, { type Node = NodeHandle, AO>; @@ -94,15 +102,6 @@ where } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - // TODO: move tree_config and canon_state_notification_sender - // initialization to with_blockchain_db once the engine revamp is done - // https://github.com/paradigmxyz/reth/issues/8742 - let tree_config = BlockchainTreeConfig::default(); - - // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - // setup the launch context let ctx = ctx .with_configured_globals() @@ -132,7 +131,7 @@ where // later the components. .with_blockchain_db::(move |provider_factory| { Ok(BlockchainProvider2::new(provider_factory)?) - }, tree_config, canon_state_notification_sender)? + })? .with_components(components_builder, on_component_initialized).await?; // spawn exexs @@ -204,10 +203,24 @@ where pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); } let pruner = pruner_builder.build_with_provider_factory(ctx.provider_factory().clone()); - let pruner_events = pruner.events(); info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); + let event_sender = EventSender::default(); + let beacon_engine_handle = + BeaconConsensusEngineHandle::new(consensus_engine_tx.clone(), event_sender.clone()); + + // extract the jwt secret from the args if possible + let jwt_secret = ctx.auth_jwt_secret()?; + + let add_ons_ctx = AddOnsContext { + node: ctx.node_adapter().clone(), + config: ctx.node_config(), + beacon_engine_handle: beacon_engine_handle.clone(), + jwt_secret, + }; + let engine_payload_validator = add_ons.engine_validator(&add_ons_ctx).await?; + let mut engine_service = if ctx.is_dev() { let eth_service = LocalEngineService::new( ctx.consensus(), @@ -216,6 +229,7 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), + engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, ctx.sync_metrics_tx(), @@ -239,6 +253,7 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), + engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, ctx.sync_metrics_tx(), @@ -247,11 +262,6 @@ where Either::Right(eth_service) }; - let event_sender = EventSender::default(); - - let beacon_engine_handle = - BeaconConsensusEngineHandle::new(consensus_engine_tx, event_sender.clone()); - info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( @@ -277,16 +287,6 @@ where ), ); - // extract the jwt secret from the args if possible - let jwt_secret = ctx.auth_jwt_secret()?; - - let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter().clone(), - config: ctx.node_config(), - beacon_engine_handle, - jwt_secret, - }; - let RpcHandle { rpc_server_handles, rpc_registry } = add_ons.launch_add_ons(add_ons_ctx).await?; diff --git a/crates/node/builder/src/launch/exex.rs b/crates/node/builder/src/launch/exex.rs index a3640690c1dc..0eef0d005763 100644 --- a/crates/node/builder/src/launch/exex.rs +++ b/crates/node/builder/src/launch/exex.rs @@ -10,7 +10,7 @@ use reth_exex::{ DEFAULT_EXEX_MANAGER_CAPACITY, }; use reth_node_api::{FullNodeComponents, NodeTypes}; -use reth_primitives::Head; +use reth_primitives::{EthPrimitives, Head}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use tracing::Instrument; @@ -25,7 +25,9 @@ pub struct ExExLauncher { config_container: WithConfigs<::ChainSpec>, } -impl ExExLauncher { +impl> + Clone> + ExExLauncher +{ /// Create a new `ExExLauncher` with the given extensions. pub const fn new( head: Head, diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index a1819948ee48..9f2c027f76b5 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -16,13 +16,16 @@ use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, BeaconConsensusEngine, }; -use reth_blockchain_tree::{noop::NoopBlockchainTree, BlockchainTreeConfig}; +use reth_blockchain_tree::{ + externals::TreeNodeTypes, noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, + ShareableBlockchainTree, TreeExternals, +}; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::BlockDownloaderProvider; -use reth_node_api::{AddOnsContext, FullNodePrimitives, FullNodeTypes, NodeTypesWithEngine}; +use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithEngine}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, @@ -67,7 +70,7 @@ pub trait LaunchNode { type Node; /// Create and return a new node asynchronously. - fn launch_node(self, target: Target) -> impl Future> + Send; + fn launch_node(self, target: Target) -> impl Future>; } impl LaunchNode for F @@ -77,7 +80,7 @@ where { type Node = Node; - fn launch_node(self, target: Target) -> impl Future> + Send { + fn launch_node(self, target: Target) -> impl Future> { self(target) } } @@ -98,11 +101,10 @@ impl DefaultNodeLauncher { impl LaunchNode> for DefaultNodeLauncher where - Types: ProviderNodeTypes + NodeTypesWithEngine, + Types: ProviderNodeTypes + NodeTypesWithEngine + TreeNodeTypes, T: FullNodeTypes, Types = Types>, CB: NodeComponentsBuilder, AO: RethRpcAddOns>, - Types::Primitives: FullNodePrimitives, { type Node = NodeHandle, AO>; @@ -133,7 +135,7 @@ where )); // setup the launch context - let ctx = ctx + let mut ctx = ctx .with_configured_globals() // load the toml config .with_loaded_toml_config(config)? @@ -161,9 +163,29 @@ where // later the components. .with_blockchain_db::(move |provider_factory| { Ok(BlockchainProvider::new(provider_factory, tree)?) - }, tree_config, canon_state_notification_sender)? + })? .with_components(components_builder, on_component_initialized).await?; + let consensus = Arc::new(ctx.components().consensus().clone()); + + let tree_externals = TreeExternals::new( + ctx.provider_factory().clone(), + consensus.clone(), + ctx.components().block_executor().clone(), + ); + let tree = BlockchainTree::new(tree_externals, tree_config)? + .with_sync_metrics_tx(ctx.sync_metrics_tx()) + // Note: This is required because we need to ensure that both the components and the + // tree are using the same channel for canon state notifications. This will be removed + // once the Blockchain provider no longer depends on an instance of the tree + .with_canon_state_notification_sender(canon_state_notification_sender); + + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); + + ctx.node_adapter_mut().provider = ctx.blockchain_db().clone().with_tree(blockchain_tree); + + debug!(target: "reth::cli", "configured blockchain tree"); + // spawn exexs let exex_manager_handle = ExExLauncher::new( ctx.head(), diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index fda8b66f8d79..55313f3e9898 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -18,6 +18,7 @@ use reth_node_core::{ version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::PayloadStore; +use reth_primitives::EthPrimitives; use reth_provider::providers::ProviderNodeTypes; use reth_rpc::{ eth::{EthApiTypes, FullEthApiServer}, @@ -402,7 +403,7 @@ where impl RpcAddOns where N: FullNodeComponents< - Types: ProviderNodeTypes, + Types: ProviderNodeTypes, PayloadBuilder: PayloadBuilder::Engine>, >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, @@ -524,7 +525,7 @@ where impl NodeAddOns for RpcAddOns where N: FullNodeComponents< - Types: ProviderNodeTypes, + Types: ProviderNodeTypes, PayloadBuilder: PayloadBuilder::Engine>, >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, @@ -566,19 +567,43 @@ pub trait EthApiBuilder: 'static { fn build(ctx: &EthApiBuilderCtx) -> Self; } -impl EthApiBuilder for EthApi { +impl>> EthApiBuilder + for EthApi +{ fn build(ctx: &EthApiBuilderCtx) -> Self { Self::with_spawner(ctx) } } +/// Helper trait that provides the validator for the engine API +pub trait EngineValidatorAddOn: Send { + /// The Validator type to use for the engine API. + type Validator: EngineValidator<::Engine>; + + /// Creates the engine validator for an engine API based node. + fn engine_validator( + &self, + ctx: &AddOnsContext<'_, Node>, + ) -> impl Future>; +} + +impl EngineValidatorAddOn for RpcAddOns +where + N: FullNodeComponents, + EthApi: EthApiTypes, + EV: EngineValidatorBuilder, +{ + type Validator = EV::Validator; + + async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + self.engine_validator_builder.clone().build(ctx).await + } +} + /// A type that knows how to build the engine validator. -pub trait EngineValidatorBuilder: Send { +pub trait EngineValidatorBuilder: Send + Sync + Clone { /// The consensus implementation to build. - type Validator: EngineValidator<::Engine> - + Clone - + Unpin - + 'static; + type Validator: EngineValidator<::Engine>; /// Creates the engine validator. fn build( @@ -592,7 +617,7 @@ where Node: FullNodeComponents, Validator: EngineValidator<::Engine> + Clone + Unpin + 'static, - F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send, + F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send + Sync + Clone, Fut: Future> + Send, { type Validator = Validator; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 3258ba8fe544..092c1fdf6518 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -14,7 +14,7 @@ use reth_exex::ExExManagerHandle; use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, EthBlockClient, }; -use reth_node_api::{FullNodePrimitives, NodePrimitives}; +use reth_node_api::{BodyTy, FullNodePrimitives}; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -41,7 +41,11 @@ where N: ProviderNodeTypes, Client: EthBlockClient + 'static, Executor: BlockExecutorProvider, - N::Primitives: FullNodePrimitives, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -87,11 +91,13 @@ pub fn build_pipeline( where N: ProviderNodeTypes, H: HeaderDownloader
+ 'static, - B: BodyDownloader< - Body = <::Block as reth_node_api::Block>::Body, - > + 'static, + B: BodyDownloader> + 'static, Executor: BlockExecutorProvider, - N::Primitives: FullNodePrimitives, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, { let mut builder = Pipeline::::builder(); diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index a23b9bfe4146..c0d266e57755 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -233,6 +233,9 @@ where type Engine = E; } +/// Helper adapter type for accessing [`NodePrimitives::Block`] on [`NodeTypes`]. +pub type BlockTy = <::Primitives as NodePrimitives>::Block; + /// Helper adapter type for accessing [`NodePrimitives::BlockHeader`] on [`NodeTypes`]. pub type HeaderTy = <::Primitives as NodePrimitives>::BlockHeader; @@ -241,3 +244,6 @@ pub type BodyTy = <::Primitives as NodePrimitives>::BlockBody /// Helper adapter type for accessing [`NodePrimitives::SignedTx`] on [`NodeTypes`]. pub type TxTy = <::Primitives as NodePrimitives>::SignedTx; + +/// Helper adapter type for accessing [`NodePrimitives::Receipt`] on [`NodeTypes`]. +pub type ReceiptTy = <::Primitives as NodePrimitives>::Receipt; diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 5eb0f0e99e03..b13eac3b52ea 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -26,6 +26,7 @@ reth-execution-types.workspace = true reth-node-core.workspace = true reth-optimism-node.workspace = true reth-primitives.workspace = true +reth-fs-util.workspace = true # so jemalloc metrics can be included reth-node-metrics.workspace = true diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index 049e160ae23a..a5c12a48cfbd 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -15,11 +15,11 @@ use reth_execution_types::ExecutionOutcome; use reth_node_core::version::SHORT_VERSION; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::bedrock::is_dup_tx; -use reth_primitives::Receipts; +use reth_primitives::{NodePrimitives, Receipts}; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, DatabaseProviderFactory, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, - StateWriter, StaticFileProviderFactory, StaticFileWriter, StatsReader, + StateWriter, StaticFileProviderFactory, StatsReader, StorageLocation, }; use reth_stages::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -85,7 +85,10 @@ pub async fn import_receipts_from_file( filter: F, ) -> eyre::Result<()> where - N: ProviderNodeTypes, + N: ProviderNodeTypes< + ChainSpec = OpChainSpec, + Primitives: NodePrimitives, + >, P: AsRef, F: FnMut(u64, &mut Receipts) -> usize, { @@ -123,7 +126,7 @@ pub async fn import_receipts_from_reader( mut filter: F, ) -> eyre::Result where - N: ProviderNodeTypes, + N: ProviderNodeTypes>, F: FnMut(u64, &mut Receipts) -> usize, { let static_file_provider = provider_factory.static_file_provider(); @@ -219,11 +222,11 @@ where ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default()); // finally, write the receipts - let mut storage_writer = UnifiedStorageWriter::from( - &provider, - static_file_provider.latest_writer(StaticFileSegment::Receipts)?, - ); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider.write_state( + execution_outcome, + OriginalValuesKnown::Yes, + StorageLocation::StaticFiles, + )?; } // Only commit if we have imported as many receipts as the number of transactions. diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index 6a36f492c503..7bbfc3bb820f 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -11,7 +11,7 @@ use reth_provider::{ BlockNumReader, ChainSpecProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; -use std::{fs::File, io::BufReader}; +use std::io::BufReader; use tracing::info; /// Initializes the database with the genesis block. @@ -70,7 +70,7 @@ impl> InitStateCommandOp { info!(target: "reth::cli", "Initiating state dump"); - let reader = BufReader::new(File::open(self.init_state.state)?); + let reader = BufReader::new(reth_fs_util::open(self.init_state.state)?); let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; provider_rw.commit()?; diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index f0468abaef5a..35c8d6a9d8fd 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -93,8 +93,8 @@ impl Consensus for OpBeaconConsensus { impl HeaderValidator for OpBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validate_header_gas(header)?; - validate_header_base_fee(header, &self.chain_spec) + validate_header_gas(header.header())?; + validate_header_base_fee(header.header(), &self.chain_spec) } fn validate_header_against_parent( diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 24ed78806513..8546b9867b3a 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -24,9 +24,7 @@ use reth_primitives::{BlockWithSenders, Receipt, TxType}; use reth_revm::{Database, State}; use reth_scroll_execution::FinalizeExecution; use revm::db::BundleState; -use revm_primitives::{ - db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, -}; +use revm_primitives::{db::DatabaseCommit, EnvWithHandlerCfg, ResultAndState, U256}; use tracing::trace; /// Factory for [`OpExecutionStrategy`]. @@ -111,10 +109,7 @@ where /// /// Caution: this does not initialize the tx environment. fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) } } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 661696aedd17..df56087e492c 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -214,14 +214,13 @@ mod tests { use reth_optimism_chainspec::BASE_MAINNET; use reth_optimism_primitives::OpPrimitives; use reth_primitives::{Account, Log, Receipt, Receipts, SealedBlockWithSenders, TxType}; - use reth_revm::{ db::{BundleState, CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, JournaledState, }; - use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; + use revm_primitives::{EnvWithHandlerCfg, HandlerCfg}; use std::{ collections::{HashMap, HashSet}, sync::Arc, @@ -233,12 +232,6 @@ mod tests { #[test] fn test_fill_cfg_and_block_env() { - // Create a new configuration environment - let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - - // Create a default block environment - let mut block_env = BlockEnv::default(); - // Create a default header let header = Header::default(); @@ -255,10 +248,10 @@ mod tests { // Define the total difficulty as zero (default) let total_difficulty = U256::ZERO; - // Use the `OpEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, + // Use the `OpEvmConfig` to create the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - OpEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) - .fill_cfg_and_block_env(&mut cfg_env, &mut block_env, &header, total_difficulty); + let (cfg_env, _) = OpEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) + .cfg_and_block_env(&header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the // ChainSpec diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 303787559735..4c080d7d0e85 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -18,6 +18,7 @@ reth-engine-local.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-util.workspace = true +reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-consensus.workspace = true reth-node-api.workspace = true @@ -31,6 +32,7 @@ reth-revm = { workspace = true, features = ["std"] } reth-beacon-consensus.workspace = true reth-trie-db.workspace = true reth-rpc-server-types.workspace = true +reth-tasks = { workspace = true, optional = true } # op-reth reth-optimism-payload-builder.workspace = true @@ -61,7 +63,6 @@ parking_lot.workspace = true serde_json.workspace = true # test-utils dependencies -reth = { workspace = true, optional = true } reth-e2e-test-utils = { workspace = true, optional = true } alloy-genesis = { workspace = true, optional = true } tokio = { workspace = true, optional = true } @@ -69,9 +70,12 @@ tokio = { workspace = true, optional = true } [dev-dependencies] reth-optimism-node = { workspace = true, features = ["test-utils"] } reth-db.workspace = true +reth-node-core.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } +reth-tasks.workspace = true + alloy-primitives.workspace = true op-alloy-consensus.workspace = true alloy-signer-local.workspace = true @@ -81,27 +85,28 @@ futures.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-optimism-evm/optimism", - "reth-optimism-payload-builder/optimism", - "reth-beacon-consensus/optimism", - "revm/optimism", - "reth-optimism-rpc/optimism", - "reth-engine-local/optimism", - "reth-optimism-consensus/optimism", - "reth-db/optimism", - "reth-optimism-node/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-optimism-evm/optimism", + "reth-optimism-payload-builder/optimism", + "reth-beacon-consensus/optimism", + "revm/optimism", + "reth-optimism-rpc/optimism", + "reth-engine-local/optimism", + "reth-optimism-consensus/optimism", + "reth-db/optimism", + "reth-optimism-node/optimism", + "reth-node-core/optimism" ] asm-keccak = [ - "reth-primitives/asm-keccak", - "reth/asm-keccak", - "alloy-primitives/asm-keccak", - "revm/asm-keccak", - "reth-optimism-node/asm-keccak", + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak", + "reth-optimism-node/asm-keccak", + "reth-node-core/asm-keccak" ] test-utils = [ - "reth", + "reth-tasks", "reth-e2e-test-utils", "alloy-genesis", "tokio", diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index dd4d0c13f24a..57b76b904bd3 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -1,6 +1,7 @@ -use std::sync::Arc; - -use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1}; +use alloy_rpc_types_engine::{ + ExecutionPayload, ExecutionPayloadEnvelopeV2, ExecutionPayloadSidecar, ExecutionPayloadV1, + PayloadError, +}; use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpPayloadAttributes, }; @@ -16,6 +17,9 @@ use reth_node_api::{ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::{OpHardfork, OpHardforks}; use reth_optimism_payload_builder::{OpBuiltPayload, OpPayloadBuilderAttributes}; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{Block, SealedBlockFor}; +use std::sync::Arc; /// The types used in the optimism beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] @@ -57,76 +61,42 @@ impl PayloadTypes for OpPayloadTypes { /// Validator for Optimism engine API. #[derive(Debug, Clone)] pub struct OpEngineValidator { - chain_spec: Arc, + inner: ExecutionPayloadValidator, } impl OpEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + Self { inner: ExecutionPayloadValidator::new(chain_spec) } } -} - -/// Validates the presence of the `withdrawals` field according to the payload timestamp. -/// -/// After Canyon, withdrawals field must be [Some]. -/// Before Canyon, withdrawals field must be [None]; -/// -/// Canyon activates the Shanghai EIPs, see the Canyon specs for more details: -/// -pub fn validate_withdrawals_presence( - chain_spec: &ChainSpec, - version: EngineApiMessageVersion, - message_validation_kind: MessageValidationKind, - timestamp: u64, - has_withdrawals: bool, -) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp); - - match version { - EngineApiMessageVersion::V1 => { - if has_withdrawals { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)) - } - if is_shanghai { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) - } - } - EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { - if is_shanghai && !has_withdrawals { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) - } - if !is_shanghai && has_withdrawals { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)) - } - } - }; - Ok(()) + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &OpChainSpec { + self.inner.chain_spec() + } } impl EngineValidator for OpEngineValidator where Types: EngineTypes, { + type Block = Block; + fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, OpPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { validate_withdrawals_presence( - &self.chain_spec, + self.chain_spec(), version, payload_or_attrs.message_validation_kind(), payload_or_attrs.timestamp(), payload_or_attrs.withdrawals().is_some(), )?; validate_parent_beacon_block_root_presence( - &self.chain_spec, + self.chain_spec(), version, payload_or_attrs.message_validation_kind(), payload_or_attrs.timestamp(), @@ -139,7 +109,7 @@ where version: EngineApiMessageVersion, attributes: &OpPayloadAttributes, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into())?; + validate_version_specific_fields(self.chain_spec(), version, attributes.into())?; if attributes.gas_limit.is_none() { return Err(EngineObjectValidationError::InvalidParams( @@ -147,7 +117,9 @@ where )) } - if self.chain_spec.is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp) + if self + .chain_spec() + .is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp) { let (elasticity, denominator) = attributes.decode_eip_1559_params().ok_or_else(|| { @@ -164,6 +136,56 @@ where Ok(()) } + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } +} + +/// Validates the presence of the `withdrawals` field according to the payload timestamp. +/// +/// After Canyon, withdrawals field must be [Some]. +/// Before Canyon, withdrawals field must be [None]; +/// +/// Canyon activates the Shanghai EIPs, see the Canyon specs for more details: +/// +pub fn validate_withdrawals_presence( + chain_spec: &ChainSpec, + version: EngineApiMessageVersion, + message_validation_kind: MessageValidationKind, + timestamp: u64, + has_withdrawals: bool, +) -> Result<(), EngineObjectValidationError> { + let is_shanghai = chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp); + + match version { + EngineApiMessageVersion::V1 => { + if has_withdrawals { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)) + } + if is_shanghai { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) + } + } + EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { + if is_shanghai && !has_withdrawals { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) + } + if !is_shanghai && has_withdrawals { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)) + } + } + }; + + Ok(()) } #[cfg(test)] diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 82b2ce2ebc2b..d6cd47cf2af1 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -1,10 +1,14 @@ //! Optimism Node types config. -use std::sync::Arc; - +use crate::{ + args::RollupArgs, + engine::OpEngineValidator, + txpool::{OpTransactionPool, OpTransactionValidator}, + OpEngineTypes, +}; use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; -use reth_chainspec::{EthChainSpec, Hardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_db::transaction::{DbTx, DbTxMut}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; @@ -17,7 +21,7 @@ use reth_node_builder::{ PayloadServiceBuilder, PoolBuilder, PoolBuilderConfigOverrides, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - rpc::{EngineValidatorBuilder, RethRpcAddOns, RpcAddOns, RpcHandle}, + rpc::{EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcAddOns, RpcHandle}, BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, }; use reth_optimism_chainspec::OpChainSpec; @@ -32,8 +36,8 @@ use reth_optimism_rpc::{ use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::BlockBody; use reth_provider::{ - providers::ChainStorage, BlockBodyWriter, CanonStateSubscriptions, DBProvider, EthStorage, - ProviderResult, + providers::ChainStorage, BlockBodyReader, BlockBodyWriter, CanonStateSubscriptions, + ChainSpecProvider, DBProvider, EthStorage, ProviderResult, ReadBodyInput, }; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -42,13 +46,7 @@ use reth_transaction_pool::{ TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; - -use crate::{ - args::RollupArgs, - engine::OpEngineValidator, - txpool::{OpTransactionPool, OpTransactionValidator}, - OpEngineTypes, -}; +use std::sync::Arc; /// Storage implementation for Optimism. #[derive(Debug, Default, Clone)] @@ -72,7 +70,31 @@ impl> BlockBodyWriter for } } +impl> + BlockBodyReader for OpStorage +{ + type Block = reth_primitives::Block; + + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult> { + self.0.read_block_bodies(provider, inputs) + } +} + impl ChainStorage for OpStorage { + fn reader( + &self, + ) -> impl reth_provider::ChainStorageReader, OpPrimitives> + where + TX: DbTx + 'static, + Types: reth_provider::providers::NodeTypesForProvider, + { + self + } + fn writer( &self, ) -> impl reth_provider::ChainStorageWriter, OpPrimitives> @@ -83,6 +105,7 @@ impl ChainStorage for OpStorage { self } } + /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] #[non_exhaustive] @@ -110,7 +133,11 @@ impl OpNode { > where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, >, { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = args; @@ -175,13 +202,13 @@ impl NodeTypesWithEngine for OpNode { #[derive(Debug)] pub struct OpAddOns(pub RpcAddOns, OpEngineValidatorBuilder>); -impl Default for OpAddOns { +impl>> Default for OpAddOns { fn default() -> Self { Self::new(None) } } -impl OpAddOns { +impl>> OpAddOns { /// Create a new instance with the given `sequencer_http` URL. pub fn new(sequencer_http: Option) -> Self { Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http), Default::default())) @@ -231,6 +258,18 @@ where } } +impl EngineValidatorAddOn for OpAddOns +where + N: FullNodeComponents>, + OpEngineValidator: EngineValidator<::Engine>, +{ + type Validator = OpEngineValidator; + + async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + OpEngineValidatorBuilder::default().build(ctx).await + } +} + /// A regular optimism evm and executor builder. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -268,7 +307,7 @@ pub struct OpPoolBuilder { impl PoolBuilder for OpPoolBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type Pool = OpTransactionPool; @@ -389,7 +428,11 @@ where ) -> eyre::Result> where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, >, Pool: TransactionPool + Unpin + 'static, Evm: ConfigureEvm
, @@ -424,8 +467,13 @@ where impl PayloadServiceBuilder for OpPayloadBuilder where - Node: - FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, + >, Pool: TransactionPool + Unpin + 'static, Txs: OpPayloadTransactions, { @@ -497,7 +545,7 @@ impl OpNetworkBuilder { impl NetworkBuilder for OpNetworkBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, { async fn build_network( diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index a5616569c86f..6db5d69568b6 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -69,7 +69,7 @@ impl OpTransactionValidator { impl OpTransactionValidator where - Client: StateProviderFactory + BlockReaderIdExt, + Client: StateProviderFactory + BlockReaderIdExt, Tx: EthPoolTransaction, { /// Create a new [`OpTransactionValidator`]. @@ -195,7 +195,7 @@ where impl TransactionValidator for OpTransactionValidator where - Client: StateProviderFactory + BlockReaderIdExt, + Client: StateProviderFactory + BlockReaderIdExt, Tx: EthPoolTransaction, { type Transaction = Tx; diff --git a/crates/optimism/node/src/utils.rs b/crates/optimism/node/src/utils.rs index b54015fef0cc..e70e35031982 100644 --- a/crates/optimism/node/src/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -1,12 +1,13 @@ use crate::{node::OpAddOns, OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes}; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; -use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; +use alloy_rpc_types_engine::PayloadAttributes; use reth_e2e_test_utils::{ transaction::TransactionTestContext, wallet::Wallet, Adapter, NodeHelperType, }; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_tasks::TaskManager; use std::sync::Arc; use tokio::sync::Mutex; diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index 3db4cfab8698..90623d9e65d3 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,6 +1,5 @@ use alloy_rpc_types_engine::PayloadStatusEnum; use futures::StreamExt; -use reth::blockchain_tree::error::BlockchainTreeError; use reth_optimism_node::utils::{advance_chain, setup}; use std::sync::Arc; use tokio::sync::Mutex; @@ -90,10 +89,10 @@ async fn can_sync() -> eyre::Result<()> { canonical_payload_chain[tip_index - reorg_depth + 1].0.clone(), canonical_payload_chain[tip_index - reorg_depth + 1].1.clone(), PayloadStatusEnum::Invalid { - validation_error: BlockchainTreeError::PendingBlockIsFinalized { - last_finalized: (tip - reorg_depth) as u64 + 1, - } - .to_string(), + validation_error: format!( + "block number is lower than the last finalized block number {}", + (tip - reorg_depth) as u64 + 1 + ), }, ) .await; diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index c1df9180ce39..35be3dfd3ee1 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -4,7 +4,6 @@ use alloy_consensus::TxEip1559; use alloy_genesis::Genesis; use alloy_network::TxSignerSync; use alloy_primitives::{Address, ChainId, TxKind}; -use reth::{args::DatadirArgs, tasks::TaskManager}; use reth_chainspec::EthChainSpec; use reth_db::test_utils::create_test_rw_db_with_path; use reth_e2e_test_utils::{ @@ -14,6 +13,7 @@ use reth_node_api::{FullNodeTypes, NodeTypesWithEngine}; use reth_node_builder::{ components::ComponentsBuilder, EngineNodeLauncher, NodeBuilder, NodeConfig, }; +use reth_node_core::args::DatadirArgs; use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder}; use reth_optimism_node::{ args::RollupArgs, @@ -25,9 +25,11 @@ use reth_optimism_node::{ OpEngineTypes, OpNode, }; use reth_optimism_payload_builder::builder::OpPayloadTransactions; +use reth_optimism_primitives::OpPrimitives; use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}; use reth_primitives::{SealedBlock, Transaction, TransactionSigned, TransactionSignedEcRecovered}; use reth_provider::providers::BlockchainProvider2; +use reth_tasks::TaskManager; use reth_transaction_pool::pool::BestPayloadTransactions; use std::sync::Arc; use tokio::sync::Mutex; @@ -90,8 +92,13 @@ fn build_components( OpConsensusBuilder, > where - Node: - FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, + >, { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = RollupArgs::default(); diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 132c26492721..fbf99c78d9e7 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -18,7 +18,9 @@ use reth_optimism_forks::OpHardforks; use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_payload_util::PayloadTransactions; -use reth_primitives::{proofs, Block, BlockBody, Receipt, SealedHeader, TransactionSigned, TxType}; +use reth_primitives::{ + proofs, Block, BlockBody, BlockExt, Receipt, SealedHeader, TransactionSigned, TxType, +}; use reth_provider::{ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index e7200c40ed80..abd27300fa59 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-node-types.workspace = true reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-codecs = { workspace = true, optional = true, features = ["optimism"] } @@ -47,7 +46,6 @@ default = ["std", "reth-codec"] std = [ "reth-primitives-traits/std", "reth-primitives/std", - "reth-node-types/std", "reth-codecs/std", "alloy-consensus/std", "alloy-eips/std", diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 0f4608a8ebe8..796f5cb06138 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -6,26 +6,29 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(not(feature = "std"), no_std)] pub mod bedrock; pub mod transaction; +use reth_primitives::EthPrimitives; pub use transaction::{tx_type::OpTxType, OpTransaction}; -use alloy_consensus::Header; -use reth_node_types::NodePrimitives; -use reth_primitives::{Block, BlockBody, Receipt, TransactionSigned}; - /// Optimism primitive types. -#[derive(Debug, Default, Clone, PartialEq, Eq)] -pub struct OpPrimitives; +pub type OpPrimitives = EthPrimitives; -impl NodePrimitives for OpPrimitives { - type Block = Block; - type BlockHeader = Header; - type BlockBody = BlockBody; - type SignedTx = TransactionSigned; - type TxType = OpTxType; - type Receipt = Receipt; -} +// TODO: once we are ready for separating primitive types, introduce a separate `NodePrimitives` +// implementation used exclusively by legacy engine. +// +// #[derive(Debug, Default, Clone, PartialEq, Eq)] +// pub struct OpPrimitives; +// +// impl NodePrimitives for OpPrimitives { +// type Block = Block; +// type BlockHeader = Header; +// type BlockBody = BlockBody; +// type SignedTx = TransactionSigned; +// type TxType = OpTxType; +// type Receipt = Receipt; +// } diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index dfa5ddb0a785..09a0b81170d7 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -33,6 +33,7 @@ reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-evm.workspace = true reth-optimism-payload-builder.workspace = true +reth-optimism-primitives.workspace = true reth-optimism-forks.workspace = true # ethereum diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 22d26e824b3b..64a55496993d 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -35,7 +35,6 @@ where let block_hash = block.hash(); let excess_blob_gas = block.excess_blob_gas; let timestamp = block.timestamp; - let block = block.unseal(); let l1_block_info = reth_optimism_evm::extract_l1_info(&block.body).map_err(OpEthApiError::from)?; diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 60af6542e282..6b909f012c55 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -8,6 +8,7 @@ mod call; mod pending_block; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; +use reth_optimism_primitives::OpPrimitives; use std::{fmt, sync::Arc}; @@ -20,8 +21,8 @@ use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; use reth_node_builder::EthApiBuilderCtx; use reth_provider::{ - BlockNumReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, - StageCheckpointReader, StateProviderFactory, + BlockNumReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, + EvmEnvProvider, StageCheckpointReader, StateProviderFactory, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -71,7 +72,11 @@ pub struct OpEthApi { impl OpEthApi where N: RpcNodeCore< - Provider: BlockReaderIdExt + ChainSpecProvider + CanonStateSubscriptions + Clone + 'static, + Provider: BlockReaderIdExt + + ChainSpecProvider + + CanonStateSubscriptions + + Clone + + 'static, >, { /// Creates a new instance for given context. @@ -249,7 +254,7 @@ where impl Trace for OpEthApi where - Self: LoadState>, + Self: RpcNodeCore + LoadState>, N: RpcNodeCore, { } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 782f78dd4aa9..98ea65778d8d 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -24,8 +24,10 @@ impl LoadPendingBlock for OpEthApi where Self: SpawnBlocking, N: RpcNodeCore< - Provider: BlockReaderIdExt - + EvmEnvProvider + Provider: BlockReaderIdExt< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool, diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 5064c9ed5cfa..e803ea210197 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -11,7 +11,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OpHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; -use reth_provider::{ChainSpecProvider, TransactionsProvider}; +use reth_provider::{ChainSpecProvider, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; @@ -21,7 +21,8 @@ impl LoadReceipt for OpEthApi where Self: Send + Sync, N: FullNodeComponents>, - Self::Provider: TransactionsProvider, + Self::Provider: + TransactionsProvider + ReceiptProvider, { async fn build_transaction_receipt( &self, diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 19bcd31daccb..3202dc46ad1b 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -74,7 +74,7 @@ where impl TransactionCompat for OpEthApi where - N: FullNodeComponents, + N: FullNodeComponents>, { type Transaction = Transaction; type Error = OpEthApiError; diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 0887a5ca74ac..b6191ea7fd11 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -31,7 +31,7 @@ //! use alloy_consensus::Header; //! use alloy_primitives::U256; //! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator, PayloadKind}; -//! use reth_primitives::Block; +//! use reth_primitives::{Block, BlockExt}; //! //! /// The generator type that creates new jobs that builds empty blocks. //! pub struct EmptyBlockPayloadJobGenerator; diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 5025a12ed718..4690ca14f0d8 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -9,7 +9,7 @@ use alloy_primitives::U256; use reth_chain_state::{CanonStateNotification, ExecutedBlock}; use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::{PayloadKind, PayloadTypes}; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use std::{ future::Future, pin::Pin, diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index d2e57da57911..ffe4e027e966 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -1,6 +1,7 @@ //! Error types emitted by types or implementations of this crate. use alloy_primitives::B256; +use alloy_rpc_types_engine::ForkchoiceUpdateError; use reth_errors::{ProviderError, RethError}; use revm_primitives::EVMError; use tokio::sync::oneshot; @@ -53,7 +54,7 @@ impl From for PayloadBuilderError { } } -/// Thrown when the payload or attributes are known to be invalid before processing. +/// Thrown when the payload or attributes are known to be invalid __before__ processing. /// /// This is used mainly for /// [`validate_version_specific_fields`](crate::validate_version_specific_fields), which validates @@ -115,3 +116,20 @@ impl EngineObjectValidationError { Self::InvalidParams(Box::new(error)) } } + +/// Thrown when validating the correctness of a payloadattributes object. +#[derive(thiserror::Error, Debug)] +pub enum InvalidPayloadAttributesError { + /// Thrown if the timestamp of the payload attributes is invalid according to the engine specs. + #[error("parent beacon block root not supported before V3")] + InvalidTimestamp, + /// Another type of error that is not covered by the above variants. + #[error("Invalid params: {0}")] + InvalidParams(#[from] Box), +} + +impl From for ForkchoiceUpdateError { + fn from(_: InvalidPayloadAttributesError) -> Self { + Self::UpdatedInvalidPayloadAttributes + } +} diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 0ff4810b8647..523e6fb057a6 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -9,7 +9,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod error; -pub use error::{EngineObjectValidationError, PayloadBuilderError, VersionSpecificValidationError}; +pub use error::{ + EngineObjectValidationError, InvalidPayloadAttributesError, PayloadBuilderError, + VersionSpecificValidationError, +}; /// Contains traits to abstract over payload attributes types and default implementations of the /// [`PayloadAttributes`] trait for ethereum mainnet and optimism types. diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index e74b5f48d40f..0a872a68ddfa 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -12,7 +12,7 @@ use alloy_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadSidecar, MaybeCancunPayloadFields, PayloadError, }; use reth_chainspec::EthereumHardforks; -use reth_primitives::SealedBlock; +use reth_primitives::{BlockExt, SealedBlock}; use reth_rpc_types_compat::engine::payload::try_into_block; use std::sync::Arc; diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index fd7f7f1c631f..76bf916add9b 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,10 +1,10 @@ //! Block body abstraction. -use alloc::fmt; +use alloc::{fmt, vec::Vec}; -use alloy_consensus::Transaction; +use alloy_eips::eip4895::Withdrawals; -use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde}; +use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, SignedTransaction}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullBlockBody: BlockBody {} @@ -12,7 +12,6 @@ pub trait FullBlockBody: BlockBody {} impl FullBlockBody for T where T: BlockBody {} /// Abstraction for block's body. -#[auto_impl::auto_impl(&, Arc)] pub trait BlockBody: Send + Sync @@ -29,8 +28,20 @@ pub trait BlockBody: + MaybeArbitrary { /// Ordered list of signed transactions as committed in block. - type Transaction: Transaction; + type Transaction: SignedTransaction; + + /// Ommer header type. + type OmmerHeader; /// Returns reference to transactions in block. fn transactions(&self) -> &[Self::Transaction]; + + /// Consume the block body and return a [`Vec`] of transactions. + fn into_transactions(self) -> Vec; + + /// Returns block withdrawals if any. + fn withdrawals(&self) -> Option<&Withdrawals>; + + /// Returns block ommers if any. + fn ommers(&self) -> Option<&[Self::OmmerHeader]>; } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index c0f5a1ffc63c..5b22ff590be5 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -6,7 +6,8 @@ pub mod header; use alloc::fmt; use crate::{ - BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeArbitrary, MaybeSerde, + BlockBody, BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeArbitrary, + MaybeSerde, }; /// Helper trait that unifies all behaviour required by block to support full node operations. @@ -26,7 +27,6 @@ impl FullBlock for T where // todo: make sealable super-trait, depends on // todo: make with senders extension trait, so block can be impl by block type already containing // senders -#[auto_impl::auto_impl(&, Arc)] pub trait Block: Send + Sync @@ -44,11 +44,17 @@ pub trait Block: type Header: BlockHeader + 'static; /// The block's body contains the transactions in the block. - type Body: Send + Sync + Unpin + 'static; + type Body: BlockBody + Send + Sync + Unpin + 'static; + + /// Create new block instance. + fn new(header: Self::Header, body: Self::Body) -> Self; /// Returns reference to block header. fn header(&self) -> &Self::Header; /// Returns reference to block body. fn body(&self) -> &Self::Body; + + /// Splits the block into its header and body. + fn split(self) -> (Self::Header, Self::Body); } diff --git a/crates/primitives-traits/src/encoded.rs b/crates/primitives-traits/src/encoded.rs index b162fc93343a..885031af1b63 100644 --- a/crates/primitives-traits/src/encoded.rs +++ b/crates/primitives-traits/src/encoded.rs @@ -1,3 +1,4 @@ +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::Bytes; /// Generic wrapper with encoded Bytes, such as transaction data. @@ -17,8 +18,8 @@ impl WithEncoded { } /// Get the encoded bytes - pub fn encoded_bytes(&self) -> Bytes { - self.0.clone() + pub const fn encoded_bytes(&self) -> &Bytes { + &self.0 } /// Get the underlying value @@ -47,6 +48,13 @@ impl WithEncoded { } } +impl WithEncoded { + /// Wraps the value with the [`Encodable2718::encoded_2718`] bytes. + pub fn from_2718_encodable(value: T) -> Self { + Self(value.encoded_2718().into(), value) + } +} + impl WithEncoded> { /// returns `None` if the inner value is `None`, otherwise returns `Some(WithEncoded)`. pub fn transpose(self) -> Option> { diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 4d068b2ff4db..338f8f621e1a 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -78,7 +78,7 @@ pub use size::InMemorySize; /// Node traits pub mod node; -pub use node::{FullNodePrimitives, NodePrimitives, ReceiptTy}; +pub use node::{BodyTy, FullNodePrimitives, HeaderTy, NodePrimitives, ReceiptTy}; /// Helper trait that requires arbitrary implementation if the feature is enabled. #[cfg(any(feature = "test-utils", feature = "arbitrary"))] diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 904ed7d12f1d..e610c094ba2d 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,7 +1,8 @@ use core::fmt; use crate::{ - FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, FullSignedTx, FullTxType, MaybeSerde, + Block, BlockBody, BlockHeader, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, + FullSignedTx, FullTxType, MaybeArbitrary, MaybeSerde, Receipt, }; /// Configures all the primitive types of the node. @@ -9,44 +10,24 @@ pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static { /// Block primitive. - type Block: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + MaybeSerde - + 'static; + type Block: Block
; /// Block header primitive. - type BlockHeader: Send - + Sync - + Unpin - + Clone - + Default - + fmt::Debug - + PartialEq - + Eq - + MaybeSerde - + 'static; + type BlockHeader: BlockHeader; /// Block body primitive. - type BlockBody: Send + type BlockBody: BlockBody; + /// Signed version of the transaction type. + type SignedTx: Send + Sync + Unpin + Clone - + Default + fmt::Debug + PartialEq + Eq + MaybeSerde + + MaybeArbitrary + 'static; - /// Signed version of the transaction type. - type SignedTx: Send + Sync + Unpin + Clone + fmt::Debug + PartialEq + Eq + MaybeSerde + 'static; /// Transaction envelope type ID. - type TxType: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static; - /// A receipt. - type Receipt: Send + type TxType: Send + Sync + Unpin + Clone @@ -54,19 +35,11 @@ pub trait NodePrimitives: + fmt::Debug + PartialEq + Eq - + MaybeSerde + + MaybeArbitrary + 'static; + /// A receipt. + type Receipt: Receipt; } - -impl NodePrimitives for () { - type Block = (); - type BlockHeader = (); - type BlockBody = (); - type SignedTx = (); - type TxType = (); - type Receipt = (); -} - /// Helper trait that sets trait bounds on [`NodePrimitives`]. pub trait FullNodePrimitives where @@ -109,5 +82,11 @@ impl FullNodePrimitives for T where { } -/// Helper adapter type for accessing [`NodePrimitives`] receipt type. +/// Helper adapter type for accessing [`NodePrimitives`] block header types. +pub type HeaderTy = ::BlockHeader; + +/// Helper adapter type for accessing [`NodePrimitives`] block body types. +pub type BodyTy = ::BlockBody; + +/// Helper adapter type for accessing [`NodePrimitives`] receipt types. pub type ReceiptTy = ::Receipt; diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index b67e51024bf6..3a0871c99a43 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -7,6 +7,11 @@ pub mod tx_type; use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; use core::{fmt, hash::Hash}; +use alloy_consensus::constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, +}; + /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullTransaction: Transaction + MaybeCompact {} @@ -27,6 +32,35 @@ pub trait Transaction: + MaybeSerde + MaybeArbitrary { + /// Returns true if the transaction is a legacy transaction. + #[inline] + fn is_legacy(&self) -> bool { + self.ty() == LEGACY_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-2930 transaction. + #[inline] + fn is_eip2930(&self) -> bool { + self.ty() == EIP2930_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-1559 transaction. + #[inline] + fn is_eip1559(&self) -> bool { + self.ty() == EIP1559_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-4844 transaction. + #[inline] + fn is_eip4844(&self) -> bool { + self.ty() == EIP4844_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-7702 transaction. + #[inline] + fn is_eip7702(&self) -> bool { + self.ty() == EIP7702_TX_TYPE_ID + } } impl Transaction for T where diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 84e5a5c5f6e3..721daf865dee 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -163,11 +163,12 @@ test-utils = [ "revm-primitives/test-utils" ] serde-bincode-compat = [ + "serde_with", + "alloy-eips/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", "op-alloy-consensus?/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", - "serde_with", - "alloy-eips/serde-bincode-compat", + "reth-trie-common/serde-bincode-compat", ] scroll = [ "reth-trie-common/scroll", diff --git a/crates/primitives/benches/recover_ecdsa_crit.rs b/crates/primitives/benches/recover_ecdsa_crit.rs index 8e8e279b2a4a..9273d71f6f56 100644 --- a/crates/primitives/benches/recover_ecdsa_crit.rs +++ b/crates/primitives/benches/recover_ecdsa_crit.rs @@ -4,6 +4,7 @@ use alloy_rlp::Decodable; use criterion::{criterion_group, criterion_main, Criterion}; use pprof::criterion::{Output, PProfProfiler}; use reth_primitives::TransactionSigned; +use reth_primitives_traits::SignedTransaction; /// Benchmarks the recovery of the public key from the ECDSA message using criterion. pub fn criterion_benchmark(c: &mut Criterion) { diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index a93b1cf538a7..5618d81bd8fc 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,13 +1,16 @@ -use crate::{GotExpected, SealedHeader, TransactionSigned, TransactionSignedEcRecovered}; +use crate::{ + traits::BlockExt, transaction::SignedTransactionIntoRecoveredExt, BlockBodyTxExt, GotExpected, + SealedHeader, TransactionSigned, TransactionSignedEcRecovered, +}; use alloc::vec::Vec; use alloy_consensus::Header; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; -use alloy_primitives::{Address, Bytes, Sealable, B256}; +use alloy_primitives::{Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; -use reth_primitives_traits::InMemorySize; +use reth_primitives_traits::{BlockBody as _, InMemorySize, SignedTransaction}; use serde::{Deserialize, Serialize}; /// Ethereum full block. @@ -23,73 +26,14 @@ pub struct Block { pub body: BlockBody, } -impl Block { - /// Calculate the header hash and seal the block so that it can't be changed. - pub fn seal_slow(self) -> SealedBlock { - SealedBlock { header: SealedHeader::seal(self.header), body: self.body } - } - - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - pub fn seal(self, hash: B256) -> SealedBlock { - SealedBlock { header: SealedHeader::new(self.header, hash), body: self.body } - } - - /// Expensive operation that recovers transaction signer. See [`SealedBlockWithSenders`]. - pub fn senders(&self) -> Option> { - self.body.recover_signers() - } - - /// Transform into a [`BlockWithSenders`]. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - pub fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders { - self.try_with_senders_unchecked(senders).expect("stored block is valid") - } - - /// Transform into a [`BlockWithSenders`] using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also [`TransactionSigned::recover_signer_unchecked`] - /// - /// Returns an error if a signature is invalid. - #[track_caller] - pub fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result { - let senders = if self.body.transactions.len() == senders.len() { - senders - } else { - let Some(senders) = self.body.recover_signers_unchecked() else { return Err(self) }; - senders - }; - - Ok(BlockWithSenders::new_unchecked(self, senders)) - } - - /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - pub fn with_recovered_senders(self) -> Option { - let senders = self.senders()?; - Some(BlockWithSenders::new_unchecked(self, senders)) - } -} - impl reth_primitives_traits::Block for Block { type Header = Header; type Body = BlockBody; + fn new(header: Self::Header, body: Self::Body) -> Self { + Self { header, body } + } + fn header(&self) -> &Self::Header { &self.header } @@ -97,6 +41,10 @@ impl reth_primitives_traits::Block for Block { fn body(&self) -> &Self::Body { &self.body } + + fn split(self) -> (Self::Header, Self::Body) { + (self.header, self.body) + } } impl InMemorySize for Block { @@ -204,44 +152,44 @@ impl<'a> arbitrary::Arbitrary<'a> for Block { /// Sealed block with senders recovered from transactions. #[derive(Debug, Clone, PartialEq, Eq, Default, Deref, DerefMut)] -pub struct BlockWithSenders { +pub struct BlockWithSenders { /// Block #[deref] #[deref_mut] - pub block: Block, + pub block: B, /// List of senders that match the transactions in the block pub senders: Vec
, } -impl BlockWithSenders { +impl BlockWithSenders { /// New block with senders - pub const fn new_unchecked(block: Block, senders: Vec
) -> Self { + pub const fn new_unchecked(block: B, senders: Vec
) -> Self { Self { block, senders } } /// New block with senders. Return none if len of tx and senders does not match - pub fn new(block: Block, senders: Vec
) -> Option { - (block.body.transactions.len() == senders.len()).then_some(Self { block, senders }) + pub fn new(block: B, senders: Vec
) -> Option { + (block.body().transactions().len() == senders.len()).then_some(Self { block, senders }) } /// Seal the block with a known hash. /// /// WARNING: This method does not perform validation whether the hash is correct. #[inline] - pub fn seal(self, hash: B256) -> SealedBlockWithSenders { + pub fn seal(self, hash: B256) -> SealedBlockWithSenders { let Self { block, senders } = self; - SealedBlockWithSenders { block: block.seal(hash), senders } + SealedBlockWithSenders:: { block: block.seal(hash), senders } } /// Calculate the header hash and seal the block with senders so that it can't be changed. #[inline] - pub fn seal_slow(self) -> SealedBlockWithSenders { + pub fn seal_slow(self) -> SealedBlockWithSenders { SealedBlockWithSenders { block: self.block.seal_slow(), senders: self.senders } } /// Split Structure to its components #[inline] - pub fn into_components(self) -> (Block, Vec
) { + pub fn into_components(self) -> (B, Vec
) { (self.block, self.senders) } @@ -249,18 +197,27 @@ impl BlockWithSenders { #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { - self.senders.iter().zip(self.block.body.transactions()) + ) -> impl Iterator::Transaction)> + + '_ { + self.senders.iter().zip(self.block.body().transactions()) } /// Returns an iterator over all transactions in the chain. #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator { + ) -> impl Iterator< + Item = TransactionSignedEcRecovered< + ::Transaction, + >, + > + where + ::Transaction: SignedTransaction, + { self.block - .body - .transactions + .split() + .1 + .into_transactions() .into_iter() .zip(self.senders) .map(|(tx, sender)| tx.with_signer(sender)) @@ -268,8 +225,10 @@ impl BlockWithSenders { /// Consumes the block and returns the transactions of the block. #[inline] - pub fn into_transactions(self) -> Vec { - self.block.body.transactions + pub fn into_transactions( + self, + ) -> Vec<::Transaction> { + self.block.split().1.into_transactions() } } @@ -308,18 +267,29 @@ impl SealedBlock { } impl SealedBlock { - /// Splits the sealed block into underlying components - #[inline] - pub fn split(self) -> (SealedHeader, Vec, Vec
) { - (self.header, self.body.transactions, self.body.ommers) - } - /// Returns an iterator over all blob transactions of the block #[inline] pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { self.body.blob_transactions_iter() } + /// Calculates the total gas used by blob transactions in the sealed block. + pub fn blob_gas_used(&self) -> u64 { + self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() + } + + /// Returns whether or not the block contains any blob transactions. + #[inline] + pub fn has_blob_transactions(&self) -> bool { + self.body.has_blob_transactions() + } + + /// Returns whether or not the block contains any eip-7702 transactions. + #[inline] + pub fn has_eip7702_transactions(&self) -> bool { + self.body.has_eip7702_transactions() + } + /// Returns only the blob transactions, if any, from the block body. #[inline] pub fn blob_transactions(&self) -> Vec<&TransactionSigned> { @@ -333,25 +303,42 @@ impl SealedBlock { .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) .flatten() } +} - /// Returns all blob versioned hashes from the block body. +impl SealedBlock +where + H: reth_primitives_traits::BlockHeader, + B: reth_primitives_traits::BlockBody, +{ + /// Splits the sealed block into underlying components #[inline] - pub fn blob_versioned_hashes(&self) -> Vec<&B256> { - self.blob_versioned_hashes_iter().collect() + pub fn split(self) -> (SealedHeader, B) { + (self.header, self.body) } /// Expensive operation that recovers transaction signer. See [`SealedBlockWithSenders`]. - pub fn senders(&self) -> Option> { + pub fn senders(&self) -> Option> + where + B::Transaction: SignedTransaction, + { self.body.recover_signers() } /// Seal sealed block with recovered transaction senders. - pub fn seal_with_senders(self) -> Option { + pub fn seal_with_senders(self) -> Option> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { self.try_seal_with_senders().ok() } /// Seal sealed block with recovered transaction senders. - pub fn try_seal_with_senders(self) -> Result { + pub fn try_seal_with_senders(self) -> Result, Self> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { match self.senders() { Some(senders) => Ok(SealedBlockWithSenders { block: self, senders }), None => Err(self), @@ -365,7 +352,11 @@ impl SealedBlock { /// If the number of senders does not match the number of transactions in the block /// and the signer recovery for one of the transactions fails. #[track_caller] - pub fn with_senders_unchecked(self, senders: Vec
) -> SealedBlockWithSenders { + pub fn with_senders_unchecked(self, senders: Vec
) -> SealedBlockWithSenders + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { self.try_with_senders_unchecked(senders).expect("stored block is valid") } @@ -377,11 +368,15 @@ impl SealedBlock { /// /// Returns an error if a signature is invalid. #[track_caller] - pub fn try_with_senders_unchecked( + pub fn try_with_senders_unchecked( self, senders: Vec
, - ) -> Result { - let senders = if self.body.transactions.len() == senders.len() { + ) -> Result, Self> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { + let senders = if self.body.transactions().len() == senders.len() { senders } else { let Some(senders) = self.body.recover_signers_unchecked() else { return Err(self) }; @@ -392,25 +387,11 @@ impl SealedBlock { } /// Unseal the block - pub fn unseal(self) -> Block { - Block { header: self.header.unseal(), body: self.body } - } - - /// Calculates the total gas used by blob transactions in the sealed block. - pub fn blob_gas_used(&self) -> u64 { - self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() - } - - /// Returns whether or not the block contains any blob transactions. - #[inline] - pub fn has_blob_transactions(&self) -> bool { - self.body.has_blob_transactions() - } - - /// Returns whether or not the block contains any eip-7702 transactions. - #[inline] - pub fn has_eip7702_transactions(&self) -> bool { - self.body.has_eip7702_transactions() + pub fn unseal(self) -> Block + where + Block: reth_primitives_traits::Block
, + { + Block::new(self.header.unseal(), self.body) } /// Ensures that the transaction root in the block header is valid. @@ -425,13 +406,16 @@ impl SealedBlock { /// /// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected` /// error containing the calculated and expected roots. - pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> { + pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> + where + B::Transaction: Encodable2718, + { let calculated_root = self.body.calculate_tx_root(); - if self.header.transactions_root != calculated_root { + if self.header.transactions_root() != calculated_root { return Err(GotExpected { got: calculated_root, - expected: self.header.transactions_root, + expected: self.header.transactions_root(), }) } @@ -440,8 +424,11 @@ impl SealedBlock { /// Returns a vector of transactions RLP encoded with /// [`alloy_eips::eip2718::Encodable2718::encoded_2718`]. - pub fn raw_transactions(&self) -> Vec { - self.body.transactions().map(|tx| tx.encoded_2718().into()).collect() + pub fn raw_transactions(&self) -> Vec + where + B::Transaction: Encodable2718, + { + self.body.transactions().iter().map(|tx| tx.encoded_2718().into()).collect() } } @@ -471,12 +458,16 @@ where impl reth_primitives_traits::Block for SealedBlock where H: reth_primitives_traits::BlockHeader + 'static, - B: reth_primitives_traits::BlockBody + 'static, + B: reth_primitives_traits::BlockBody + 'static, Self: Serialize + for<'a> Deserialize<'a>, { type Header = H; type Body = B; + fn new(header: Self::Header, body: Self::Body) -> Self { + Self { header: SealedHeader::seal(header), body } + } + fn header(&self) -> &Self::Header { self.header.header() } @@ -484,6 +475,10 @@ where fn body(&self) -> &Self::Body { &self.body } + + fn split(self) -> (Self::Header, Self::Body) { + (self.header.unseal(), self.body) + } } #[cfg(any(test, feature = "arbitrary"))] @@ -497,47 +492,55 @@ where } } +/// A helepr trait to construct [`SealedBlock`] from a [`reth_primitives_traits::Block`]. +pub type SealedBlockFor = SealedBlock< + ::Header, + ::Body, +>; /// Sealed block with senders recovered from transactions. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] -pub struct SealedBlockWithSenders { +pub struct SealedBlockWithSenders { /// Sealed block #[deref] #[deref_mut] - pub block: SealedBlock, + #[serde(bound = "SealedBlock: Serialize + serde::de::DeserializeOwned")] + pub block: SealedBlock, /// List of senders that match transactions from block. pub senders: Vec
, } -impl Default for SealedBlockWithSenders { +impl Default for SealedBlockWithSenders { fn default() -> Self { Self { block: SealedBlock::default(), senders: Default::default() } } } -impl SealedBlockWithSenders { +impl SealedBlockWithSenders { /// New sealed block with sender. Return none if len of tx and senders does not match - pub fn new(block: SealedBlock, senders: Vec
) -> Option { + pub fn new(block: SealedBlock, senders: Vec
) -> Option { (block.body.transactions().len() == senders.len()).then_some(Self { block, senders }) } } -impl SealedBlockWithSenders { +impl SealedBlockWithSenders { /// Split Structure to its components #[inline] - pub fn into_components(self) -> (SealedBlock, Vec
) { + pub fn into_components(self) -> (SealedBlock, Vec
) { (self.block, self.senders) } /// Returns the unsealed [`BlockWithSenders`] #[inline] - pub fn unseal(self) -> BlockWithSenders { - let Self { block, senders } = self; - BlockWithSenders::new_unchecked(block.unseal(), senders) + pub fn unseal(self) -> BlockWithSenders { + let (block, senders) = self.into_components(); + let (header, body) = block.split(); + let header = header.unseal(); + BlockWithSenders::new_unchecked(B::new(header, body), senders) } /// Returns an iterator over all transactions in the block. #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { + pub fn transactions(&self) -> &[::Transaction] { self.block.body.transactions() } @@ -545,24 +548,34 @@ impl SealedBlockWithSenders { #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction)> + + '_ { self.senders.iter().zip(self.block.body.transactions()) } /// Consumes the block and returns the transactions of the block. #[inline] - pub fn into_transactions(self) -> Vec { - self.block.body.transactions + pub fn into_transactions( + self, + ) -> Vec<::Transaction> { + self.block.body.into_transactions() } /// Returns an iterator over all transactions in the chain. #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator { + ) -> impl Iterator< + Item = TransactionSignedEcRecovered< + ::Transaction, + >, + > + where + ::Transaction: SignedTransaction, + { self.block .body - .transactions + .into_transactions() .into_iter() .zip(self.senders) .map(|(tx, sender)| tx.with_signer(sender)) @@ -608,11 +621,6 @@ impl BlockBody { Block { header, body: self } } - /// Calculate the transaction root for the block body. - pub fn calculate_tx_root(&self) -> B256 { - crate::proofs::calculate_transaction_root(&self.transactions) - } - /// Calculate the ommers root for the block body. pub fn calculate_ommers_root(&self) -> B256 { crate::proofs::calculate_ommers_root(&self.ommers) @@ -624,20 +632,6 @@ impl BlockBody { self.withdrawals.as_ref().map(|w| crate::proofs::calculate_withdrawals_root(w)) } - /// Recover signer addresses for all transactions in the block body. - pub fn recover_signers(&self) -> Option> { - TransactionSigned::recover_signers(&self.transactions, self.transactions.len()) - } - - /// Recover signer addresses for all transactions in the block body _without ensuring that the - /// signature has a low `s` value_. - /// - /// Returns `None`, if some transaction's signature is invalid, see also - /// [`TransactionSigned::recover_signer_unchecked`]. - pub fn recover_signers_unchecked(&self) -> Option> { - TransactionSigned::recover_signers_unchecked(&self.transactions, self.transactions.len()) - } - /// Returns whether or not the block body contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { @@ -675,12 +669,6 @@ impl BlockBody { pub fn blob_versioned_hashes(&self) -> Vec<&B256> { self.blob_versioned_hashes_iter().collect() } - - /// Returns an iterator over all transactions. - #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { - self.transactions.iter() - } } impl InMemorySize for BlockBody { @@ -699,10 +687,23 @@ impl InMemorySize for BlockBody { impl reth_primitives_traits::BlockBody for BlockBody { type Transaction = TransactionSigned; + type OmmerHeader = Header; fn transactions(&self) -> &[Self::Transaction] { &self.transactions } + + fn into_transactions(self) -> Vec { + self.transactions + } + + fn withdrawals(&self) -> Option<&Withdrawals> { + self.withdrawals.as_ref() + } + + fn ommers(&self) -> Option<&[Self::OmmerHeader]> { + Some(&self.ommers) + } } impl From for BlockBody { @@ -1168,9 +1169,9 @@ mod tests { Some(BlockWithSenders { block: block.clone(), senders: vec![sender] }) ); let sealed = block.seal_slow(); - assert_eq!(SealedBlockWithSenders::new(sealed.clone(), vec![]), None); + assert_eq!(SealedBlockWithSenders::::new(sealed.clone(), vec![]), None); assert_eq!( - SealedBlockWithSenders::new(sealed.clone(), vec![sender]), + SealedBlockWithSenders::::new(sealed.clone(), vec![sender]), Some(SealedBlockWithSenders { block: sealed, senders: vec![sender] }) ); } @@ -1179,7 +1180,7 @@ mod tests { fn test_default_seal() { let block: SealedBlock = SealedBlock::default(); let sealed = block.hash(); - let block = block.unseal(); + let block: Block = block.unseal(); let block = block.seal_slow(); assert_eq!(sealed, block.hash()); } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index ae840f7f84f0..7062858f4110 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -26,6 +26,9 @@ use reth_scroll_primitives as _; extern crate alloc; +mod traits; +pub use traits::*; + #[cfg(feature = "alloy-compat")] mod alloy_compat; mod block; @@ -37,7 +40,9 @@ pub use reth_static_file_types as static_file; pub mod transaction; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; -pub use block::{Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockWithSenders}; +pub use block::{ + Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockFor, SealedBlockWithSenders, +}; #[cfg(feature = "reth-codec")] pub use compression::*; pub use receipt::{ @@ -82,6 +87,7 @@ pub mod serde_bincode_compat { /// Temp helper struct for integrating [`NodePrimitives`]. #[derive(Debug, Clone, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[non_exhaustive] pub struct EthPrimitives; impl reth_primitives_traits::NodePrimitives for EthPrimitives { diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 5cfda9ab1dad..f1931bcab95e 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,7 +1,7 @@ //! Helper function for calculating Merkle proofs and hashes. -use crate::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned}; -use alloc::vec::Vec; +use crate::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; +use alloc::{borrow::Borrow, vec::Vec}; use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawal}; use alloy_primitives::{keccak256, B256}; @@ -12,9 +12,9 @@ use alloy_trie::root::{ordered_trie_root, ordered_trie_root_with_encoder}; /// `(rlp(index), encoded(tx))` pairs. pub fn calculate_transaction_root(transactions: &[T]) -> B256 where - T: AsRef, + T: Encodable2718, { - ordered_trie_root_with_encoder(transactions, |tx: &T, buf| tx.as_ref().encode_2718(buf)) + ordered_trie_root_with_encoder(transactions, |tx, buf| tx.borrow().encode_2718(buf)) } /// Calculates the root hash of the withdrawals. diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 34bfa3c890cf..4b9cc09f95d1 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -139,7 +139,6 @@ impl InMemorySize for Receipt { Debug, PartialEq, Eq, - Default, Serialize, Deserialize, From, @@ -195,6 +194,12 @@ impl From for ReceiptWithBloom { } } +impl Default for Receipts { + fn default() -> Self { + Self { receipt_vec: Vec::new() } + } +} + /// [`Receipt`] with calculated bloom filter. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] diff --git a/crates/primitives/src/traits.rs b/crates/primitives/src/traits.rs new file mode 100644 index 000000000000..307443c00b8c --- /dev/null +++ b/crates/primitives/src/traits.rs @@ -0,0 +1,137 @@ +use crate::{ + transaction::{recover_signers, recover_signers_unchecked}, + BlockWithSenders, SealedBlock, +}; +use alloc::vec::Vec; +use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; +use reth_primitives_traits::{Block, BlockBody, BlockHeader, SealedHeader, SignedTransaction}; +use revm_primitives::primitives::{Address, B256}; + +/// Extension trait for [`reth_primitives_traits::Block`] implementations +/// allowing for conversions into common block parts containers such as [`SealedBlock`], +/// [`BlockWithSenders`], etc. +pub trait BlockExt: Block { + /// Calculate the header hash and seal the block so that it can't be changed. + fn seal_slow(self) -> SealedBlock { + let (header, body) = self.split(); + SealedBlock { header: SealedHeader::seal(header), body } + } + + /// Seal the block with a known hash. + /// + /// WARNING: This method does not perform validation whether the hash is correct. + fn seal(self, hash: B256) -> SealedBlock { + let (header, body) = self.split(); + SealedBlock { header: SealedHeader::new(header, hash), body } + } + + /// Expensive operation that recovers transaction signer. + fn senders(&self) -> Option> + where + ::Transaction: SignedTransaction, + { + self.body().recover_signers() + } + + /// Transform into a [`BlockWithSenders`]. + /// + /// # Panics + /// + /// If the number of senders does not match the number of transactions in the block + /// and the signer recovery for one of the transactions fails. + /// + /// Note: this is expected to be called with blocks read from disk. + #[track_caller] + fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders + where + ::Transaction: SignedTransaction, + { + self.try_with_senders_unchecked(senders).expect("stored block is valid") + } + + /// Transform into a [`BlockWithSenders`] using the given senders. + /// + /// If the number of senders does not match the number of transactions in the block, this falls + /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. + /// See also [`recover_signers_unchecked`] + /// + /// Returns an error if a signature is invalid. + #[track_caller] + fn try_with_senders_unchecked( + self, + senders: Vec
, + ) -> Result, Self> + where + ::Transaction: SignedTransaction, + { + let senders = if self.body().transactions().len() == senders.len() { + senders + } else { + let Some(senders) = self.body().recover_signers_unchecked() else { return Err(self) }; + senders + }; + + Ok(BlockWithSenders::new_unchecked(self, senders)) + } + + /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained + /// transactions. + /// + /// Returns `None` if a transaction is invalid. + fn with_recovered_senders(self) -> Option> + where + ::Transaction: SignedTransaction, + { + let senders = self.senders()?; + Some(BlockWithSenders::new_unchecked(self, senders)) + } +} + +impl BlockExt for T {} + +/// Extension trait for [`BlockBody`] adding helper methods operating with transactions. +pub trait BlockBodyTxExt: BlockBody { + /// Calculate the transaction root for the block body. + fn calculate_tx_root(&self) -> B256 + where + Self::Transaction: Encodable2718, + { + crate::proofs::calculate_transaction_root(self.transactions()) + } + + /// Recover signer addresses for all transactions in the block body. + fn recover_signers(&self) -> Option> + where + Self::Transaction: SignedTransaction, + { + recover_signers(self.transactions(), self.transactions().len()) + } + + /// Recover signer addresses for all transactions in the block body _without ensuring that the + /// signature has a low `s` value_. + /// + /// Returns `None`, if some transaction's signature is invalid, see also + /// [`recover_signers_unchecked`]. + fn recover_signers_unchecked(&self) -> Option> + where + Self::Transaction: SignedTransaction, + { + recover_signers_unchecked(self.transactions(), self.transactions().len()) + } +} + +impl BlockBodyTxExt for T {} + +/// Extension trait for [`BlockHeader`] adding useful helper methods. +pub trait HeaderExt: BlockHeader { + /// TODO: remove once is released + /// + /// Returns the parent block's number and hash + /// + /// Note: for the genesis block the parent number is 0 and the parent hash is the zero hash. + fn parent_num_hash(&self) -> BlockNumHash { + BlockNumHash::new(self.number().saturating_sub(1), self.parent_hash()) + } +} + +impl HeaderExt for T {} diff --git a/crates/primitives/src/traits/mod.rs b/crates/primitives/src/traits/mod.rs deleted file mode 100644 index 49fb73ea5555..000000000000 --- a/crates/primitives/src/traits/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! Abstractions of primitive data types - -pub mod block; -pub mod transaction; - -pub use block::{body::BlockBody, Block}; -pub use transaction::signed::SignedTransaction; - -pub use alloy_consensus::BlockHeader; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index e3868b9c1c98..eff579cba59e 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1124,7 +1124,7 @@ impl PartialEq for TransactionSigned { fn eq(&self, other: &Self) -> bool { self.signature == other.signature && self.transaction == other.transaction && - self.hash_ref() == other.hash_ref() + self.tx_hash() == other.tx_hash() } } @@ -1143,11 +1143,6 @@ impl TransactionSigned { Self { hash: Default::default(), signature, transaction } } - /// Transaction signature. - pub const fn signature(&self) -> &Signature { - &self.signature - } - /// Transaction pub const fn transaction(&self) -> &Transaction { &self.transaction @@ -1155,56 +1150,7 @@ impl TransactionSigned { /// Transaction hash. Used to identify transaction. pub fn hash(&self) -> TxHash { - *self.hash_ref() - } - - /// Reference to transaction hash. Used to identify transaction. - pub fn hash_ref(&self) -> &TxHash { - self.hash.get_or_init(|| self.recalculate_hash()) - } - - /// Recover signer from signature and hash. - /// - /// Returns `None` if the transaction's signature is invalid following [EIP-2](https://eips.ethereum.org/EIPS/eip-2), see also [`recover_signer`]. - /// - /// Note: - /// - /// This can fail for some early ethereum mainnet transactions pre EIP-2, use - /// [`Self::recover_signer_unchecked`] if you want to recover the signer without ensuring that - /// the signature has a low `s` value. - pub fn recover_signer(&self) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(all(feature = "optimism", not(feature = "scroll")))] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - #[cfg(all(feature = "scroll", not(feature = "optimism")))] - if let Transaction::L1Message(TxL1Message { sender, .. }) = self.transaction { - return Some(sender) - } - let signature_hash = self.signature_hash(); - recover_signer(&self.signature, signature_hash) - } - - /// Recover signer from signature and hash _without ensuring that the signature has a low `s` - /// value_. - /// - /// Returns `None` if the transaction's signature is invalid, see also - /// [`recover_signer_unchecked`]. - pub fn recover_signer_unchecked(&self) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(all(feature = "optimism", not(feature = "scroll")))] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - #[cfg(all(feature = "scroll", not(feature = "optimism")))] - if let Transaction::L1Message(TxL1Message { sender, .. }) = self.transaction { - return Some(sender) - } - let signature_hash = self.signature_hash(); - recover_signer_unchecked(&self.signature, signature_hash) + *self.tx_hash() } /// Recovers a list of signers from a transaction list iterator. @@ -1370,7 +1316,7 @@ impl SignedTransaction for TransactionSigned { type Type = TxType; fn tx_hash(&self) -> &TxHash { - self.hash_ref() + self.hash.get_or_init(|| self.recalculate_hash()) } fn signature(&self) -> &Signature { @@ -1378,11 +1324,31 @@ impl SignedTransaction for TransactionSigned { } fn recover_signer(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + #[cfg(all(feature = "optimism", not(feature = "scroll")))] + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) + } + #[cfg(all(feature = "scroll", not(feature = "optimism")))] + if let Transaction::L1Message(TxL1Message { sender, .. }) = self.transaction { + return Some(sender) + } let signature_hash = self.signature_hash(); recover_signer(&self.signature, signature_hash) } fn recover_signer_unchecked(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + #[cfg(all(feature = "optimism", not(feature = "scroll")))] + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) + } + #[cfg(all(feature = "scroll", not(feature = "optimism")))] + if let Transaction::L1Message(TxL1Message { sender, .. }) = self.transaction { + return Some(sender) + } let signature_hash = self.signature_hash(); recover_signer_unchecked(&self.signature, signature_hash) } @@ -1822,6 +1788,11 @@ pub trait SignedTransactionIntoRecoveredExt: SignedTransaction { let signer = self.recover_signer_unchecked()?; Some(TransactionSignedEcRecovered::from_signed_transaction(self, signer)) } + + /// Returns the [`TransactionSignedEcRecovered`] transaction with the given sender. + fn with_signer(self, signer: Address) -> TransactionSignedEcRecovered { + TransactionSignedEcRecovered::from_signed_transaction(self, signer) + } } impl SignedTransactionIntoRecoveredExt for T where T: SignedTransaction {} @@ -2052,6 +2023,22 @@ where } } +/// Recovers a list of signers from a transaction list iterator _without ensuring that the +/// signature has a low `s` value_. +/// +/// Returns `None`, if some transaction's signature is invalid. +pub fn recover_signers_unchecked<'a, I, T>(txes: I, num_txes: usize) -> Option> +where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, +{ + if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { + txes.into_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } else { + txes.into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } +} + #[cfg(test)] mod tests { use crate::{ @@ -2066,6 +2053,7 @@ mod tests { use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_codecs::Compact; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; #[test] diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index ea9e91455ed4..078e150b212f 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -672,48 +672,43 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { /// A signed pooled transaction with recovered signer. #[derive(Debug, Clone, PartialEq, Eq, AsRef, Deref)] -pub struct PooledTransactionsElementEcRecovered { +pub struct PooledTransactionsElementEcRecovered { /// Signer of the transaction signer: Address, /// Signed transaction #[deref] #[as_ref] - transaction: PooledTransactionsElement, + transaction: T, } -// === impl PooledTransactionsElementEcRecovered === +impl PooledTransactionsElementEcRecovered { + /// Create an instance from the given transaction and the [`Address`] of the signer. + pub const fn from_signed_transaction(transaction: T, signer: Address) -> Self { + Self { transaction, signer } + } -impl PooledTransactionsElementEcRecovered { /// Signer of transaction recovered from signature pub const fn signer(&self) -> Address { self.signer } - /// Transform back to [`PooledTransactionsElement`] - pub fn into_transaction(self) -> PooledTransactionsElement { + /// Consume the type and return the transaction + pub fn into_transaction(self) -> T { self.transaction } + /// Dissolve Self to its component + pub fn into_components(self) -> (T, Address) { + (self.transaction, self.signer) + } +} +impl PooledTransactionsElementEcRecovered { /// Transform back to [`TransactionSignedEcRecovered`] pub fn into_ecrecovered_transaction(self) -> TransactionSignedEcRecovered { let (tx, signer) = self.into_components(); tx.into_ecrecovered_transaction(signer) } - /// Dissolve Self to its component - pub fn into_components(self) -> (PooledTransactionsElement, Address) { - (self.transaction, self.signer) - } - - /// Create [`TransactionSignedEcRecovered`] from [`PooledTransactionsElement`] and [`Address`] - /// of the signer. - pub const fn from_signed_transaction( - transaction: PooledTransactionsElement, - signer: Address, - ) -> Self { - Self { transaction, signer } - } - /// Converts from an EIP-4844 [`TransactionSignedEcRecovered`] to a /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// @@ -744,6 +739,24 @@ impl TryFrom for PooledTransactionsElementEcRecove } } +impl Encodable2718 for PooledTransactionsElementEcRecovered { + fn type_flag(&self) -> Option { + self.transaction.type_flag() + } + + fn encode_2718_len(&self) -> usize { + self.transaction.encode_2718_len() + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.transaction.encode_2718(out) + } + + fn trie_hash(&self) -> B256 { + self.transaction.trie_hash() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 8fab719947a1..6056266ae0fe 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -72,6 +72,7 @@ mod tests { }; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, Address, PrimitiveSignature as Signature, B256, U256}; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; #[test] diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 41156d3e56b8..f772ff546691 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -22,9 +22,11 @@ reth-provider.workspace = true reth-tokio-util.workspace = true reth-config.workspace = true reth-prune-types.workspace = true +reth-primitives-traits.workspace = true reth-static-file-types.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true # metrics diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index 85697160115b..4fd56617121a 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -1,11 +1,13 @@ use crate::{segments::SegmentSet, Pruner}; +use alloy_eips::eip2718::Encodable2718; use reth_chainspec::MAINNET; use reth_config::PruneConfig; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; use reth_exex_types::FinishedExExHeight; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, DatabaseProviderFactory, - PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, + NodePrimitivesProvider, PruneCheckpointWriter, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use std::time::Duration; @@ -77,9 +79,13 @@ impl PrunerBuilder { pub fn build_with_provider_factory(self, provider_factory: PF) -> Pruner where PF: DatabaseProviderFactory< - ProviderRW: PruneCheckpointWriter + BlockReader + StaticFileProviderFactory, + ProviderRW: PruneCheckpointWriter + + BlockReader + + StaticFileProviderFactory< + Primitives: NodePrimitives, + >, > + StaticFileProviderFactory< - Primitives = ::Primitives, + Primitives = ::Primitives, >, { let segments = @@ -101,11 +107,10 @@ impl PrunerBuilder { static_file_provider: StaticFileProvider, ) -> Pruner where - Provider: StaticFileProviderFactory + Provider: StaticFileProviderFactory> + DBProvider - + BlockReader - + PruneCheckpointWriter - + TransactionsProvider, + + BlockReader + + PruneCheckpointWriter, { let segments = SegmentSet::::from_components(static_file_provider, self.segments); diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index b3b40aab5b3b..e828512fa824 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -148,6 +148,7 @@ impl PruneInput { mod tests { use super::*; use alloy_primitives::B256; + use reth_primitives_traits::BlockBody; use reth_provider::{ providers::BlockchainProvider2, test_utils::{create_test_provider_factory, MockEthProvider}, @@ -245,7 +246,7 @@ mod tests { // Calculate the total number of transactions let num_txs = - blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); assert_eq!(range, 0..=num_txs - 1); } @@ -292,7 +293,7 @@ mod tests { // Calculate the total number of transactions let num_txs = - blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); assert_eq!(range, 0..=num_txs - 1,); } @@ -327,7 +328,7 @@ mod tests { // Get the last tx number // Calculate the total number of transactions let num_txs = - blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); let max_range = num_txs - 1; // Create a prune input with a previous checkpoint that is the last tx number diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index c081bf88c7d2..a365738a777d 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -6,10 +6,11 @@ //! node after static file producer has finished use crate::{db_ext::DbTxPruneExt, segments::PruneInput, PrunerError}; -use reth_db::{tables, transaction::DbTxMut}; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, + PruneCheckpointWriter, TransactionsProvider, }; use reth_prune_types::{ PruneCheckpoint, PruneProgress, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, @@ -21,7 +22,10 @@ pub(crate) fn prune( input: PruneInput, ) -> Result where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: DBProvider + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, @@ -35,7 +39,9 @@ where let mut limiter = input.limiter; let mut last_pruned_transaction = tx_range_end; - let (pruned, done) = provider.tx_ref().prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::::Receipt, + >>( tx_range, &mut limiter, |_| false, diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 62c252fc54b6..d7bbee1042ba 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -2,10 +2,12 @@ use crate::segments::{ AccountHistory, ReceiptsByLogs, Segment, SenderRecovery, StorageHistory, TransactionLookup, UserReceipts, }; -use reth_db::transaction::DbTxMut; +use alloy_eips::eip2718::Encodable2718; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter, - StaticFileProviderFactory, TransactionsProvider, + StaticFileProviderFactory, }; use reth_prune_types::PruneModes; @@ -45,11 +47,10 @@ impl SegmentSet { impl SegmentSet where - Provider: StaticFileProviderFactory + Provider: StaticFileProviderFactory> + DBProvider - + TransactionsProvider + PruneCheckpointWriter - + BlockReader, + + BlockReader, { /// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and /// [`PruneModes`]. diff --git a/crates/prune/prune/src/segments/static_file/receipts.rs b/crates/prune/prune/src/segments/static_file/receipts.rs index 5221418674aa..6cdc53759904 100644 --- a/crates/prune/prune/src/segments/static_file/receipts.rs +++ b/crates/prune/prune/src/segments/static_file/receipts.rs @@ -2,7 +2,8 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, @@ -23,7 +24,7 @@ impl Receipts { impl Segment for Receipts where - Provider: StaticFileProviderFactory + Provider: StaticFileProviderFactory> + DBProvider + PruneCheckpointWriter + TransactionsProvider diff --git a/crates/prune/prune/src/segments/static_file/transactions.rs b/crates/prune/prune/src/segments/static_file/transactions.rs index 7dc7a23191a0..20274e5dc706 100644 --- a/crates/prune/prune/src/segments/static_file/transactions.rs +++ b/crates/prune/prune/src/segments/static_file/transactions.rs @@ -3,7 +3,8 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::{tables, transaction::DbTxMut}; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, StaticFileProviderFactory, TransactionsProvider, @@ -27,8 +28,10 @@ impl Transactions { impl Segment for Transactions where - Provider: - DBProvider + TransactionsProvider + BlockReader + StaticFileProviderFactory, + Provider: DBProvider + + TransactionsProvider + + BlockReader + + StaticFileProviderFactory>, { fn segment(&self) -> PruneSegment { PruneSegment::Transactions @@ -56,7 +59,9 @@ where let mut limiter = input.limiter; let mut last_pruned_transaction = *tx_range.end(); - let (pruned, done) = provider.tx_ref().prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::::SignedTx, + >>( tx_range, &mut limiter, |_| false, diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index 5bc9feaf023d..97708ad6de18 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -2,10 +2,11 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, + PruneCheckpointWriter, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; use tracing::instrument; @@ -23,7 +24,11 @@ impl Receipts { impl Segment for Receipts where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { fn segment(&self) -> PruneSegment { PruneSegment::Receipts diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index ee404b074c3c..778aac1e7b9b 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -3,8 +3,12 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{BlockReader, DBProvider, PruneCheckpointWriter, TransactionsProvider}; +use alloy_consensus::TxReceipt; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, +}; use reth_prune_types::{ PruneCheckpoint, PruneMode, PruneProgress, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, SegmentOutput, MINIMUM_PRUNING_DISTANCE, @@ -23,7 +27,11 @@ impl ReceiptsByLogs { impl Segment for ReceiptsByLogs where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { fn segment(&self) -> PruneSegment { PruneSegment::ContractLogs @@ -141,12 +149,14 @@ where // Delete receipts, except the ones in the inclusion list let mut last_skipped_transaction = 0; let deleted; - (deleted, done) = provider.tx_ref().prune_table_with_range::( + (deleted, done) = provider.tx_ref().prune_table_with_range::::Receipt, + >>( tx_range, &mut limiter, |(tx_num, receipt)| { let skip = num_addresses > 0 && - receipt.logs.iter().any(|log| { + receipt.logs().iter().any(|log| { filtered_addresses[..num_addresses].contains(&&log.address) }); diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index f189e6c36af4..77bb0a5e2d47 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -90,6 +90,7 @@ mod tests { Itertools, }; use reth_db::tables; + use reth_primitives_traits::SignedTransaction; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment}; use reth_stages::test_utils::{StorageKind, TestStageDB}; diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index ce9d90c291b7..27f4f5085d2b 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -6,7 +6,7 @@ use crate::{ use alloy_eips::eip2718::Encodable2718; use rayon::prelude::*; use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{BlockReader, DBProvider, TransactionsProvider}; +use reth_provider::{BlockReader, DBProvider}; use reth_prune_types::{ PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutputCheckpoint, }; @@ -25,7 +25,7 @@ impl TransactionLookup { impl Segment for TransactionLookup where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: DBProvider + BlockReader, { fn segment(&self) -> PruneSegment { PruneSegment::TransactionLookup diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index ede5c78f45bf..7f636474986c 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -58,11 +58,11 @@ test-utils = [ ] serde = [ "revm/serde", - "reth-trie?/serde", "alloy-eips/serde", "alloy-primitives/serde", "alloy-consensus/serde", "reth-primitives-traits/serde", + "reth-trie?/serde", "reth-scroll-primitives?/serde" ] scroll = [ diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 813997c72d11..443d1d5ebcf2 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -11,8 +11,8 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, - TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, StorageProof, TrieInput, }; /// Mock state for testing @@ -112,6 +112,15 @@ impl StorageRootProvider for StateProviderTest { ) -> ProviderResult { unimplemented!("proof generation is not supported") } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + unimplemented!("proof generation is not supported") + } } impl StateProofProvider for StateProviderTest { diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 04e97f99e34d..a0712d617b66 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -19,6 +19,7 @@ reth-consensus.workspace = true reth-network-api.workspace = true reth-node-core.workspace = true reth-provider.workspace = true +reth-primitives.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index e88f6aa86bbd..59b3ef870fe2 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,5 +1,6 @@ use alloy_consensus::Header; use reth_evm::ConfigureEvm; +use reth_primitives::EthPrimitives; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; use reth_rpc::{EthFilter, EthPubSub}; use reth_rpc_eth_api::EthApiTypes; @@ -27,10 +28,15 @@ pub struct EthHandlers { impl EthHandlers where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EthApi: EthApiTypes + 'static, { /// Returns a new instance with handlers for `eth` namespace. diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ccf19ed1a0bc..8f5c84835aaa 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -37,11 +37,16 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: -//! FullRpcProvider + AccountReader + ChangeSetReader, +//! Provider: FullRpcProvider< +//! Transaction = TransactionSigned, +//! Block = reth_primitives::Block, +//! Receipt = reth_primitives::Receipt, +//! > + AccountReader +//! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, -//! Events: CanonStateSubscriptions + Clone + 'static, +//! Events: +//! CanonStateSubscriptions + Clone + 'static, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, //! Consensus: reth_consensus::Consensus + Clone + 'static, @@ -112,11 +117,16 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: -//! FullRpcProvider + AccountReader + ChangeSetReader, +//! Provider: FullRpcProvider< +//! Transaction = TransactionSigned, +//! Block = reth_primitives::Block, +//! Receipt = reth_primitives::Receipt, +//! > + AccountReader +//! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, -//! Events: CanonStateSubscriptions + Clone + 'static, +//! Events: +//! CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, //! EngineT: EngineTypes, //! EvmConfig: ConfigureEvm
, @@ -188,9 +198,10 @@ use reth_consensus::Consensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; +use reth_primitives::EthPrimitives; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, FullRpcProvider, StateProviderFactory, + EvmEnvProvider, FullRpcProvider, ReceiptProvider, StateProviderFactory, }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, @@ -258,11 +269,13 @@ pub async fn launch, ) -> Result where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider + + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, EthApi: FullEthApiServer, BlockExecutor: BlockExecutorProvider, @@ -615,7 +628,7 @@ where Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::Consensus + Clone + 'static, @@ -641,6 +654,10 @@ where EngineT: EngineTypes, EngineApi: EngineApiServer, EthApi: FullEthApiServer, + Provider: BlockReader< + Block = ::Block, + Receipt = ::Receipt, + >, { let Self { provider, @@ -716,6 +733,7 @@ where ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, + Provider: BlockReader, { let Self { provider, @@ -750,6 +768,10 @@ where ) -> TransportRpcModules<()> where EthApi: FullEthApiServer, + Provider: BlockReader< + Block = ::Block, + Receipt = ::Receipt, + >, { let mut modules = TransportRpcModules::default(); @@ -907,10 +929,15 @@ pub struct RpcRegistryInner< impl RpcRegistryInner where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, EthApi: EthApiTypes + 'static, BlockExecutor: BlockExecutorProvider, @@ -1112,6 +1139,10 @@ where pub fn register_debug(&mut self) -> &mut Self where EthApi: EthApiSpec + EthTransactions + TraceExt, + Provider: BlockReader< + Block = ::Block, + Receipt = reth_primitives::Receipt, + >, { let debug_api = self.debug_api(); self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into()); @@ -1126,6 +1157,7 @@ where pub fn register_trace(&mut self) -> &mut Self where EthApi: TraceExt, + Provider: BlockReader::Block>, { let trace_api = self.trace_api(); self.modules.insert(RethRpcModule::Trace, trace_api.into_rpc().into()); @@ -1264,11 +1296,15 @@ where impl RpcRegistryInner where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider< + Block = ::Block, + Receipt = ::Receipt, + > + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EthApi: FullEthApiServer, BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::Consensus + Clone + 'static, diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 71e8bf39f9ea..390ea7d6ba40 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -5,7 +5,7 @@ use alloy_primitives::U64; use alloy_rpc_types_engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_api::clients::EngineApiClient; use reth_rpc_layer::JwtSecret; use reth_rpc_types_compat::engine::payload::{ diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 7773b5084c9e..1062363eafb8 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -75,7 +75,11 @@ struct EngineApiInner EngineApi where - Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, + Provider: HeaderProvider + + BlockReader + + StateProviderFactory + + EvmEnvProvider + + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, @@ -487,7 +491,7 @@ where f: F, ) -> EngineApiResult>> where - F: Fn(Block) -> R + Send + 'static, + F: Fn(Provider::Block) -> R + Send + 'static, R: Send + 'static, { let (tx, rx) = oneshot::channel(); @@ -735,7 +739,11 @@ where impl EngineApiServer for EngineApi where - Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, + Provider: HeaderProvider + + BlockReader + + StateProviderFactory + + EvmEnvProvider + + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 251ca225eb19..cce0aa01b01a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -2,21 +2,34 @@ use std::sync::Arc; +use alloy_consensus::BlockHeader; use alloy_eips::BlockId; use alloy_rpc_types_eth::{Block, Header, Index}; use futures::Future; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders}; -use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; +use reth_node_api::BlockBody; +use reth_primitives::{SealedBlockFor, SealedBlockWithSenders}; +use reth_provider::{ + BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider, ProviderReceipt, +}; use reth_rpc_types_compat::block::from_block; -use crate::{node::RpcNodeCoreExt, FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; +use crate::{ + node::RpcNodeCoreExt, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, + RpcReceipt, +}; use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; /// Result type of the fetched block receipts. pub type BlockReceiptsResult = Result>>, E>; /// Result type of the fetched block and its receipts. -pub type BlockAndReceiptsResult = Result>)>, E>; +pub type BlockAndReceiptsResult = Result< + Option<( + SealedBlockFor<<::Provider as BlockReader>::Block>, + Arc::Provider>>>, + )>, + ::Error, +>; /// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the /// `eth_` namespace. @@ -49,7 +62,7 @@ pub trait EthBlocks: LoadBlock { let block_hash = block.hash(); let mut total_difficulty = self .provider() - .header_td_by_number(block.number) + .header_td_by_number(block.number()) .map_err(Self::Error::from_eth_err)?; if total_difficulty.is_none() { // if we failed to find td after we successfully loaded the block, try again using @@ -83,7 +96,7 @@ pub trait EthBlocks: LoadBlock { .provider() .pending_block() .map_err(Self::Error::from_eth_err)? - .map(|block| block.body.transactions.len())) + .map(|block| block.body.transactions().len())) } let block_hash = match self @@ -120,7 +133,7 @@ pub trait EthBlocks: LoadBlock { fn load_block_and_receipts( &self, block_id: BlockId, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadReceipt, { @@ -198,10 +211,16 @@ pub trait EthBlocks: LoadBlock { /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. pub trait LoadBlock: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt { /// Returns the block object for the given block id. + #[expect(clippy::type_complexity)] fn block_with_senders( &self, block_id: BlockId, - ) -> impl Future>, Self::Error>> + Send { + ) -> impl Future< + Output = Result< + Option::Block>>>, + Self::Error, + >, + > + Send { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 077fee2826fa..d9b4b2c894c4 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -18,6 +18,7 @@ use alloy_rpc_types_eth::{ use futures::Future; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_node_api::BlockBody; use reth_primitives::TransactionSigned; use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; use reth_revm::{ @@ -204,7 +205,6 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { parent_hash, total_difficulty, return_full_transactions, - &db, this.tx_resp_builder(), )?; @@ -282,14 +282,15 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { // we're essentially replaying the transactions in the block here, hence we need the // state that points to the beginning of the block, which is the state at // the parent block - let mut at = block.parent_hash; + let mut at = block.parent_hash(); let mut replay_block_txs = true; - let num_txs = transaction_index.index().unwrap_or(block.body.transactions.len()); + let num_txs = + transaction_index.index().unwrap_or_else(|| block.body.transactions().len()); // but if all transactions are to be replayed, we can use the state at the block itself, // however only if we're not targeting the pending block, because for pending we can't // rely on the block's state being available - if !is_block_target_pending && num_txs == block.body.transactions.len() { + if !is_block_target_pending && num_txs == block.body.transactions().len() { at = block.hash(); replay_block_txs = false; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index 465c33ada387..f9d62855be12 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -57,7 +57,7 @@ pub trait EstimateCall: Call { request.nonce = None; // Keep a copy of gas related request values - let tx_request_gas_limit = request.gas; + let tx_request_gas_limit = request.gas.map(U256::from); let tx_request_gas_price = request.gas_price; // the gas limit of the corresponding block let block_env_gas_limit = block.gas_limit; @@ -65,7 +65,13 @@ pub trait EstimateCall: Call { // Determine the highest possible gas limit, considering both the request's specified limit // and the block's limit. let mut highest_gas_limit = tx_request_gas_limit - .map(|tx_gas_limit| U256::from(tx_gas_limit).max(block_env_gas_limit)) + .map(|mut tx_gas_limit| { + if block_env_gas_limit < tx_gas_limit { + // requested gas limit is higher than the allowed gas limit, capping + tx_gas_limit = block_env_gas_limit; + } + tx_gas_limit + }) .unwrap_or(block_env_gas_limit); // Configure the evm env diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 8ed45d2ac080..0099e0f6b160 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -1,5 +1,6 @@ //! Loads fee history from database. Helper trait for `eth_` fee and transaction RPC methods. +use alloy_consensus::BlockHeader; use alloy_primitives::U256; use alloy_rpc_types_eth::{BlockNumberOrTag, FeeHistory}; use futures::Future; @@ -287,7 +288,7 @@ pub trait LoadFee: LoadBlock { .block_with_senders(BlockNumberOrTag::Pending.into()) .await? .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Pending.into()))? - .base_fee_per_gas + .base_fee_per_gas() .ok_or(EthApiError::InvalidTransaction( RpcInvalidTransactionError::TxTypeNotSupported, ))?; @@ -324,7 +325,7 @@ pub trait LoadFee: LoadBlock { let suggested_tip = self.suggested_priority_fee(); async move { let (header, suggested_tip) = futures::try_join!(header, suggested_tip)?; - let base_fee = header.and_then(|h| h.base_fee_per_gas).unwrap_or_default(); + let base_fee = header.and_then(|h| h.base_fee_per_gas()).unwrap_or_default(); Ok(suggested_tip + U256::from(base_fee)) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 631e46787420..8fbde6bc78eb 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -17,12 +17,12 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - proofs::calculate_transaction_root, Block, BlockBody, Receipt, SealedBlockWithSenders, - SealedHeader, TransactionSignedEcRecovered, + proofs::calculate_transaction_root, Block, BlockBody, BlockExt, InvalidTransactionError, + Receipt, SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, - ReceiptProvider, StateProviderFactory, + ProviderReceipt, ReceiptProvider, StateProviderFactory, }; use reth_revm::primitives::{ BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, @@ -30,7 +30,9 @@ use reth_revm::primitives::{ }; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; use reth_scroll_execution::FinalizeExecution; -use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; +use reth_transaction_pool::{ + error::InvalidPoolTransactionError, BestTransactionsAttributes, TransactionPool, +}; use reth_trie::HashedPostState; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; use std::time::{Duration, Instant}; @@ -43,8 +45,10 @@ use tracing::debug; pub trait LoadPendingBlock: EthApiTypes + RpcNodeCore< - Provider: BlockReaderIdExt - + EvmEnvProvider + Provider: BlockReaderIdExt< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool, @@ -112,9 +116,18 @@ pub trait LoadPendingBlock: } /// Returns the locally built pending block + #[expect(clippy::type_complexity)] fn local_pending_block( &self, - ) -> impl Future)>, Self::Error>> + Send + ) -> impl Future< + Output = Result< + Option<( + SealedBlockWithSenders<::Block>, + Vec>, + )>, + Self::Error, + >, + > + Send where Self: SpawnBlocking, { @@ -282,7 +295,13 @@ pub trait LoadPendingBlock: // we can't fit this transaction into the block, so we need to mark it as invalid // which also removes all dependent transaction from the iterator before we can // continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + pool_tx.gas_limit(), + block_gas_limit, + ), + ); continue } @@ -290,7 +309,12 @@ pub trait LoadPendingBlock: // we don't want to leak any state changes made by private transactions, so we mark // them as invalid here which removes all dependent transactions from the iterator // before we can continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); continue } @@ -306,7 +330,13 @@ pub trait LoadPendingBlock: // invalid, which removes its dependent transactions from // the iterator. This is similar to the gas limit condition // for regular transactions above. - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + tx_blob_gas, + MAX_DATA_GAS_PER_BLOCK, + ), + ); continue } } @@ -330,7 +360,12 @@ pub trait LoadPendingBlock: } else { // if the transaction is invalid, we can skip it and all of its // descendants - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); } continue } diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 7e1992017d84..f663c5863b55 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -2,8 +2,8 @@ //! loads receipt data w.r.t. network. use futures::Future; -use reth_primitives::{Receipt, TransactionMeta}; -use reth_provider::TransactionsProvider; +use reth_primitives::TransactionMeta; +use reth_provider::{ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider}; use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; @@ -11,13 +11,13 @@ use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. pub trait LoadReceipt: - EthApiTypes + RpcNodeCoreExt + Send + Sync + EthApiTypes + RpcNodeCoreExt + Send + Sync { /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( &self, - tx: ::Transaction, + tx: ProviderTx, meta: TransactionMeta, - receipt: Receipt, + receipt: ProviderReceipt, ) -> impl Future, Self::Error>> + Send; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index a1e6084da55d..114b4c41d905 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -10,6 +10,7 @@ use futures::Future; use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::SealedBlockWithSenders; +use reth_provider::BlockReader; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -24,7 +25,7 @@ use revm_primitives::{ use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; /// Executes CPU heavy tasks. -pub trait Trace: LoadState> { +pub trait Trace: LoadState> { /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state /// changes. fn inspect( @@ -230,7 +231,7 @@ pub trait Trace: LoadState> { fn trace_block_until( &self, block_id: BlockId, - block: Option>, + block: Option::Block>>>, highest_index: Option, config: TracingInspectorConfig, f: F, diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 2223ecdc9f71..6ad8f8fd6ec0 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -1,16 +1,17 @@ //! Database access for `eth_` transaction RPC methods. Loads transaction and receipt data w.r.t. //! network. -use alloy_consensus::Transaction; +use alloy_consensus::{BlockHeader, Transaction}; use alloy_dyn_abi::TypedData; use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; -use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned}; +use reth_primitives::{SealedBlockWithSenders, TransactionMeta, TransactionSigned}; use reth_provider::{ - BlockNumReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, TransactionsProvider, + BlockNumReader, BlockReaderIdExt, ProviderReceipt, ProviderTx, ReceiptProvider, + TransactionsProvider, }; use reth_rpc_eth_types::{ utils::{binary_search, recover_raw_transaction}, @@ -159,7 +160,7 @@ pub trait EthTransactions: LoadTransaction { hash: TxHash, ) -> impl Future< Output = Result< - Option<(ProviderTx, TransactionMeta, Receipt)>, + Option<(ProviderTx, TransactionMeta, ProviderReceipt)>, Self::Error, >, > + Send @@ -199,8 +200,8 @@ pub trait EthTransactions: LoadTransaction { async move { if let Some(block) = self.block_with_senders(block_id).await? { let block_hash = block.hash(); - let block_number = block.number; - let base_fee_per_gas = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee_per_gas = block.base_fee_per_gas(); if let Some((signer, tx)) = block.transactions_with_sender().nth(index) { let tx_info = TransactionInfo { hash: Some(tx.hash()), @@ -275,8 +276,8 @@ pub trait EthTransactions: LoadTransaction { .await? .and_then(|block| { let block_hash = block.hash(); - let block_number = block.number; - let base_fee_per_gas = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee_per_gas = block.base_fee_per_gas(); block .transactions_with_sender() @@ -315,7 +316,7 @@ pub trait EthTransactions: LoadTransaction { { async move { if let Some(block) = self.block_with_senders(block_id).await? { - if let Some(tx) = block.transactions().nth(index) { + if let Some(tx) = block.transactions().get(index) { return Ok(Some(tx.encoded_2718().into())) } } diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 994f9ac884d0..2bac068483c7 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -8,7 +8,7 @@ use std::{ use alloy_network::Network; use alloy_rpc_types_eth::Block; use reth_primitives::TransactionSigned; -use reth_provider::TransactionsProvider; +use reth_provider::{ReceiptProvider, TransactionsProvider}; use reth_rpc_types_compat::TransactionCompat; use crate::{AsEthApiError, FromEthApiError, FromEvmError, RpcNodeCore}; @@ -47,8 +47,10 @@ pub type RpcError = ::Error; /// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API. pub trait FullEthApiTypes where - Self: RpcNodeCore> - + EthApiTypes< + Self: RpcNodeCore< + Provider: TransactionsProvider + + ReceiptProvider, + > + EthApiTypes< TransactionCompat: TransactionCompat< ::Transaction, Transaction = RpcTransaction, @@ -59,8 +61,10 @@ where } impl FullEthApiTypes for T where - T: RpcNodeCore> - + EthApiTypes< + T: RpcNodeCore< + Provider: TransactionsProvider + + ReceiptProvider, + > + EthApiTypes< TransactionCompat: TransactionCompat< ::Transaction, Transaction = RpcTransaction, diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index ef7a9b99cbfc..59704aa8112f 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -29,13 +29,13 @@ reth-transaction-pool.workspace = true reth-trie.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-sol-types.workspace = true alloy-rpc-types-eth.workspace = true revm-inspectors.workspace = true revm-primitives = { workspace = true, features = ["dev"] } -alloy-eips.workspace = true # scroll revm.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/builder/ctx.rs b/crates/rpc/rpc-eth-types/src/builder/ctx.rs index 2132dd0e22c4..db2beb4a4549 100644 --- a/crates/rpc/rpc-eth-types/src/builder/ctx.rs +++ b/crates/rpc/rpc-eth-types/src/builder/ctx.rs @@ -2,6 +2,7 @@ use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; +use reth_primitives::NodePrimitives; use reth_storage_api::BlockReaderIdExt; use reth_tasks::TaskSpawner; @@ -41,7 +42,12 @@ where where Provider: ChainSpecProvider + 'static, Tasks: TaskSpawner, - Events: CanonStateSubscriptions, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { let fee_history_cache = FeeHistoryCache::new(self.cache.clone(), self.config.fee_history_cache); diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 50fd4b04625f..1fbe16a2ed9c 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -67,6 +67,15 @@ impl reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper<'_> ) -> ProviderResult { self.0.storage_proof(address, slot, hashed_storage) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + self.0.storage_multiproof(address, slots, hashed_storage) + } } impl reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'_> { diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index b4a110e96af7..70c8b1a4f54f 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -105,7 +105,12 @@ impl EthStateCache { evm_config: EvmConfig, ) -> Self where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, EvmConfig: ConfigureEvm
, { Self::spawn_with(provider, config, TokioTaskExecutor::default(), evm_config) @@ -122,7 +127,12 @@ impl EthStateCache { evm_config: EvmConfig, ) -> Self where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Tasks: TaskSpawner + Clone + 'static, EvmConfig: ConfigureEvm
, { @@ -337,7 +347,12 @@ where impl Future for EthStateCacheService where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Tasks: TaskSpawner + Clone + 'static, EvmConfig: ConfigureEvm
, { diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 6c8b66246f33..922c3f9d474a 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -16,7 +16,7 @@ use futures::{ use metrics::atomics::AtomicU64; use reth_chain_state::CanonStateNotification; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; -use reth_primitives::{Receipt, SealedBlock, TransactionSigned}; +use reth_primitives::{NodePrimitives, Receipt, SealedBlock, TransactionSigned}; use reth_storage_api::BlockReaderIdExt; use revm_primitives::{calc_blob_gasprice, calc_excess_blob_gas}; use serde::{Deserialize, Serialize}; @@ -205,13 +205,14 @@ struct FeeHistoryCacheInner { /// Awaits for new chain events and directly inserts them into the cache so they're available /// immediately before they need to be fetched from disk. -pub async fn fee_history_cache_new_blocks_task( +pub async fn fee_history_cache_new_blocks_task( fee_history_cache: FeeHistoryCache, mut events: St, provider: Provider, ) where - St: Stream + Unpin + 'static, + St: Stream> + Unpin + 'static, Provider: BlockReaderIdExt + ChainSpecProvider + 'static, + N: NodePrimitives, { // We're listening for new blocks emitted when the node is in live sync. // If the node transitions to stage sync, we need to fetch the missing blocks @@ -248,7 +249,7 @@ pub async fn fee_history_cache_new_blocks_task( break; }; - let committed = event .committed(); + let committed = event.committed(); let (blocks, receipts): (Vec<_>, Vec<_>) = committed .blocks_and_receipts() .map(|(block, receipts)| { diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index d73cd72b650c..3f8186ae1502 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -7,7 +7,14 @@ use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockId; use derive_more::{Deref, DerefMut, From, Into}; use itertools::Itertools; -use reth_rpc_server_types::constants; +use reth_primitives_traits::SignedTransaction; +use reth_rpc_server_types::{ + constants, + constants::gas_oracle::{ + DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, + DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, SAMPLE_NUMBER, + }, +}; use reth_storage_api::BlockReaderIdExt; use schnellru::{ByLength, LruMap}; use serde::{Deserialize, Serialize}; @@ -15,11 +22,6 @@ use std::fmt::{self, Debug, Formatter}; use tokio::sync::Mutex; use tracing::warn; -use reth_rpc_server_types::constants::gas_oracle::{ - DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, - DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, SAMPLE_NUMBER, -}; - use super::{EthApiError, EthResult, EthStateCache, RpcInvalidTransactionError}; /// The default gas limit for `eth_call` and adjacent calls. See diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 5ead11b71156..2e41c7a1183d 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -8,6 +8,7 @@ use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; use reth_primitives::{Receipt, SealedBlockWithSenders}; +use reth_primitives_traits::SignedTransaction; use reth_storage_api::BlockReader; use std::sync::Arc; @@ -58,7 +59,7 @@ pub enum ProviderOrBlock<'a, P: BlockReader> { /// Appends all matching logs of a block's receipts. /// If the log matches, look up the corresponding transaction hash. -pub fn append_matching_block_logs( +pub fn append_matching_block_logs>( all_logs: &mut Vec, provider_or_block: ProviderOrBlock<'_, P>, filter: &FilteredParams, diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index d8f413650a30..116026c2ddde 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,10 +4,12 @@ use std::time::Instant; +use alloy_consensus::BlockHeader; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::B256; use derive_more::Constructor; use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives_traits::Block; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; /// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block. @@ -23,26 +25,26 @@ pub struct PendingBlockEnv { /// The origin for a configured [`PendingBlockEnv`] #[derive(Clone, Debug)] -pub enum PendingBlockEnvOrigin { +pub enum PendingBlockEnvOrigin { /// The pending block as received from the CL. - ActualPending(SealedBlockWithSenders), + ActualPending(SealedBlockWithSenders), /// The _modified_ header of the latest block. /// /// This derives the pending state based on the latest header by modifying: /// - the timestamp /// - the block number /// - fees - DerivedFromLatest(SealedHeader), + DerivedFromLatest(SealedHeader), } -impl PendingBlockEnvOrigin { +impl PendingBlockEnvOrigin { /// Returns true if the origin is the actual pending block as received from the CL. pub const fn is_actual_pending(&self) -> bool { matches!(self, Self::ActualPending(_)) } /// Consumes the type and returns the actual pending block. - pub fn into_actual_pending(self) -> Option { + pub fn into_actual_pending(self) -> Option> { match self { Self::ActualPending(block) => Some(block), _ => None, @@ -67,13 +69,13 @@ impl PendingBlockEnvOrigin { /// header. pub fn build_target_hash(&self) -> B256 { match self { - Self::ActualPending(block) => block.parent_hash, + Self::ActualPending(block) => block.header().parent_hash(), Self::DerivedFromLatest(header) => header.hash(), } } /// Returns the header this pending block is based on. - pub fn header(&self) -> &SealedHeader { + pub fn header(&self) -> &SealedHeader { match self { Self::ActualPending(block) => &block.header, Self::DerivedFromLatest(header) => header, diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 247b4449ef5d..3136d42e9580 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,13 +1,13 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. +use super::{EthApiError, EthResult}; use alloy_consensus::{ReceiptEnvelope, Transaction}; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt}; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; +use reth_primitives_traits::SignedTransaction; use revm_primitives::calc_blob_gasprice; -use super::{EthApiError, EthResult}; - /// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. pub fn build_receipt( transaction: &TransactionSigned, diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index d296286b83b5..a10b4afff9d7 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -14,13 +14,10 @@ use reth_primitives::{ }; use reth_rpc_server_types::result::rpc_err; use reth_rpc_types_compat::{block::from_block, TransactionCompat}; -use reth_storage_api::StateRootProvider; -use reth_trie::{HashedPostState, HashedStorage}; -use revm::{db::CacheDB, Database}; -use revm_primitives::{keccak256, Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; +use revm::Database; +use revm_primitives::{Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; use crate::{ - cache::db::StateProviderTraitObjWrapper, error::{api::FromEthApiError, ToRpcError}, EthApiError, RevertError, RpcInvalidTransactionError, }; @@ -142,7 +139,6 @@ where } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -#[expect(clippy::complexity)] pub fn build_block>( results: Vec<(Address, ExecutionResult)>, transactions: Vec, @@ -150,12 +146,6 @@ pub fn build_block>( parent_hash: B256, total_difficulty: U256, full_transactions: bool, - #[cfg(feature = "scroll")] db: &CacheDB< - reth_scroll_storage::ScrollStateProviderDatabase>, - >, - #[cfg(not(feature = "scroll"))] db: &CacheDB< - reth_revm::database::StateProviderDatabase>, - >, tx_resp_builder: &T, ) -> Result>, T::Error> { let mut calls: Vec = Vec::with_capacity(results.len()); @@ -233,30 +223,27 @@ pub fn build_block>( calls.push(call); } - let mut hashed_state = HashedPostState::default(); - for (address, account) in &db.accounts { - let hashed_address = keccak256(address); - #[cfg(feature = "scroll")] - let hashed_account = - Into::::into((account.info.clone(), &db.db.post_execution_context)) - .into(); - #[cfg(not(feature = "scroll"))] - let hashed_account = account.info.clone().into(); - hashed_state.accounts.insert(hashed_address, Some(hashed_account)); - - let storage = hashed_state - .storages - .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); - - for (slot, value) in &account.storage { - let slot = B256::from(*slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, *value); - } - } - - let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; + // TODO: uncomment once performance cost is acceptable + // + // let mut hashed_state = HashedPostState::default(); + // for (address, account) in &db.accounts { + // let hashed_address = keccak256(address); + // hashed_state.accounts.insert(hashed_address, Some(account.info.clone().into())); + + // let storage = hashed_state + // .storages + // .entry(hashed_address) + // .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); + + // for (slot, value) in &account.storage { + // let slot = B256::from(*slot); + // let hashed_slot = keccak256(slot); + // storage.storage.insert(hashed_slot, *value); + // } + // } + + // let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; + let state_root = B256::ZERO; let header = alloy_consensus::Header { beneficiary: block_env.coinbase, diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index b963fa69d8b9..ee3fce68d3b5 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -5,6 +5,7 @@ use alloy_primitives::{map::HashSet, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, Index}; use alloy_rpc_types_trace::{ filter::TraceFilter, + opcode::BlockOpcodeGas, parity::{LocalizedTransactionTrace, TraceResults, TraceType}, tracerequest::TraceCallRequest, }; @@ -23,6 +24,9 @@ type RawTransactionTraceResult<'a> = /// A result type for the `trace_block` method that also captures the requested block. pub type TraceBlockResult = Result<(Vec, BlockId), (RpcError, BlockId)>; +/// A result type for the `trace_blockOpcodeGas` method that also captures the requested block. +pub type TraceBlockOpCodeGasResult = Result<(BlockOpcodeGas, BlockId), (RpcError, BlockId)>; + /// Type alias representing the result of replaying a transaction. pub type ReplayTransactionResult = Result<(TraceResults, TxHash), (RpcError, TxHash)>; @@ -65,6 +69,18 @@ pub trait TraceApiExt { I: IntoIterator, B: Into; + /// Returns a new stream that yields the traces the opcodes for the given blocks. + /// + /// See also [`StreamExt::buffered`]. + fn trace_block_opcode_gas_unordered( + &self, + params: I, + n: usize, + ) -> TraceBlockOpcodeGasStream<'_> + where + I: IntoIterator, + B: Into; + /// Returns a new stream that replays the transactions for the given transaction hashes. /// /// This returns all results in order. @@ -269,6 +285,26 @@ impl TraceApiExt for T { TraceBlockStream { stream: Box::pin(stream) } } + fn trace_block_opcode_gas_unordered( + &self, + params: I, + n: usize, + ) -> TraceBlockOpcodeGasStream<'_> + where + I: IntoIterator, + B: Into, + { + let blocks = params.into_iter().map(|b| b.into()).collect::>(); + let stream = futures::stream::iter(blocks.into_iter().map(move |block| async move { + match self.trace_block_opcode_gas(block).await { + Ok(result) => Ok((result.unwrap(), block)), + Err(err) => Err((err, block)), + } + })) + .buffered(n); + TraceBlockOpcodeGasStream { stream: Box::pin(stream) } + } + fn replay_transactions( &self, tx_hashes: I, @@ -406,6 +442,38 @@ impl std::fmt::Debug for TraceBlockStream<'_> { } } +/// A stream that yields the opcodes for the requested blocks. +#[must_use = "streams do nothing unless polled"] +pub struct TraceBlockOpcodeGasStream<'a> { + stream: Pin + 'a>>, +} + +impl TraceBlockOpcodeGasStream<'_> { + /// Returns the next error result of the stream. + pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { + loop { + match self.next().await? { + Ok(_) => continue, + Err(err) => return Some(err), + } + } + } +} + +impl Stream for TraceBlockOpcodeGasStream<'_> { + type Item = TraceBlockOpCodeGasResult; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.stream.as_mut().poll_next(cx) + } +} + +impl std::fmt::Debug for TraceBlockOpcodeGasStream<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TraceBlockOpcodeGasStream").finish_non_exhaustive() + } +} + /// A utility to compare RPC responses from two different clients. /// /// The `RpcComparer` is designed to perform comparisons between two RPC clients. @@ -670,4 +738,14 @@ mod tests { println!("Total successes: {successes}"); println!("Total failures: {failures}"); } + + #[tokio::test] + #[ignore] + async fn block_opcode_gas_stream() { + let client = HttpClientBuilder::default().build("http://localhost:8545").unwrap(); + let block = vec![BlockNumberOrTag::Latest]; + let mut stream = client.trace_block_opcode_gas_unordered(block, 2); + assert_is_stream(&stream); + let _opcodes = stream.next().await.unwrap(); + } } diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index f2b1d93be83e..564f5a939fc1 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -43,7 +43,7 @@ pub fn from_block_with_tx_hashes( block_hash: Option, ) -> Block { let block_hash = block_hash.unwrap_or_else(|| block.header.hash_slow()); - let transactions = block.body.transactions().map(|tx| tx.hash()).collect(); + let transactions = block.body.transactions.iter().map(|tx| tx.hash()).collect(); from_block_with_transactions( block.length(), diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 7f260a7693c4..46bc9502c579 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -15,7 +15,7 @@ use alloy_rpc_types_engine::{ }; use reth_primitives::{ proofs::{self}, - Block, BlockBody, SealedBlock, TransactionSigned, + Block, BlockBody, BlockExt, SealedBlock, TransactionSigned, }; /// Converts [`ExecutionPayloadV1`] to [`Block`] @@ -363,6 +363,7 @@ mod tests { CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, }; + use reth_primitives::BlockExt; #[test] fn roundtrip_payload_to_block() { diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index f79c30fac642..d30c3f297d02 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true reth-errors.workspace = true diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index ca8c4493f1c5..2e71419a1922 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -18,7 +18,8 @@ use reth_evm::{ execute::{BlockExecutorProvider, Executor}, ConfigureEvmEnv, }; -use reth_primitives::{Block, SealedBlockWithSenders}; +use reth_primitives::{Block, BlockExt, SealedBlockWithSenders}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, TransactionVariant, @@ -800,7 +801,7 @@ where #[async_trait] impl DebugApiServer for DebugApi where - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider + StateProviderFactory diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index dac371529426..b6b37c9f393e 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -7,6 +7,7 @@ use alloy_eips::BlockNumberOrTag; use alloy_network::Ethereum; use alloy_primitives::U256; use derive_more::Deref; +use reth_primitives::NodePrimitives; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, SpawnBlocking}, @@ -102,7 +103,12 @@ where ) -> Self where Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { let blocking_task_pool = BlockingTaskPool::build().expect("failed to build blocking task pool"); @@ -432,7 +438,7 @@ mod tests { use crate::EthApi; fn build_test_eth_api< - P: BlockReaderIdExt + P: BlockReaderIdExt + BlockReader + ChainSpecProvider + EvmEnvProvider diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index bc1e9344799e..f6aae34b961e 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -32,7 +32,6 @@ where let block_hash = block.hash(); let excess_blob_gas = block.excess_blob_gas; let timestamp = block.timestamp; - let block = block.unseal(); return block .body diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 8540a4684bf8..a67522ce0326 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -18,8 +18,10 @@ impl LoadPendingBlock where Self: SpawnBlocking + RpcNodeCore< - Provider: BlockReaderIdExt - + EvmEnvProvider + Provider: BlockReaderIdExt< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool, diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 13b0dab2593d..ae723fc5314f 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,7 +1,7 @@ //! Builds an RPC receipt response w.r.t. data layout of network. use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_provider::TransactionsProvider; +use reth_provider::{ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; @@ -9,7 +9,10 @@ use crate::EthApi; impl LoadReceipt for EthApi where - Self: RpcNodeCoreExt>, + Self: RpcNodeCoreExt< + Provider: TransactionsProvider + + ReceiptProvider, + >, { async fn build_transaction_receipt( &self, diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index d9fe5e18a05b..9c60a4c105f9 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -2,11 +2,12 @@ use alloy_consensus::Header; use reth_evm::ConfigureEvm; +use reth_provider::BlockReader; use reth_rpc_eth_api::helpers::{LoadState, Trace}; use crate::EthApi; impl Trace for EthApi where - Self: LoadState> + Self: LoadState> { } diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 8ea6d1f87c81..8ad809b8b186 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::TxHash; use alloy_rpc_types_eth::{ pubsub::{ @@ -15,6 +16,7 @@ use jsonrpsee::{ server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink, }; use reth_network_api::NetworkInfo; +use reth_primitives::NodePrimitives; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; use reth_rpc_eth_api::{pubsub::EthPubSubApiServer, TransactionCompat}; use reth_rpc_eth_types::logs_utils; @@ -84,7 +86,14 @@ impl EthPubSubApiServer where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + SignedTx: Encodable2718, + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, + > + Clone + + 'static, Network: NetworkInfo + Clone + 'static, Eth: TransactionCompat + 'static, { @@ -117,7 +126,14 @@ async fn handle_accepted( where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + SignedTx: Encodable2718, + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, + > + Clone + + 'static, Network: NetworkInfo + Clone + 'static, Eth: TransactionCompat, { @@ -333,7 +349,13 @@ where impl EthPubSubInner where Provider: BlockReader + EvmEnvProvider + 'static, - Events: CanonStateSubscriptions + 'static, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + SignedTx: Encodable2718, + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, + > + 'static, Network: NetworkInfo + 'static, Pool: 'static, { diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 81dc8ff8b8a8..f81eefdc5ff1 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -74,7 +74,7 @@ impl TraceApi { impl TraceApi where - Provider: BlockReader + Provider: BlockReader::Block> + StateProviderFactory + EvmEnvProvider + ChainSpecProvider @@ -565,7 +565,7 @@ where #[async_trait] impl TraceApiServer for TraceApi where - Provider: BlockReader + Provider: BlockReader::Block> + StateProviderFactory + EvmEnvProvider + ChainSpecProvider diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index b90729c71311..c1fde11c2354 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -5,18 +5,16 @@ use std::{ use futures_util::TryStreamExt; use reth_codecs::Compact; -use reth_primitives_traits::BlockBody; +use reth_primitives_traits::{Block, BlockBody}; use tracing::*; -use alloy_primitives::TxNumber; use reth_db::{tables, transaction::DbTx}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTxMut}; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::StaticFileSegment; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, BlockWriter, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, - StorageLocation, + providers::StaticFileWriter, BlockReader, BlockWriter, DBProvider, ProviderError, + StaticFileProviderFactory, StatsReader, StorageLocation, }; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, @@ -24,6 +22,8 @@ use reth_stages_api::{ }; use reth_storage_errors::provider::ProviderResult; +use super::missing_static_data_error; + /// The body stage downloads block bodies. /// /// The body stage downloads block bodies for all block headers stored locally in storage. @@ -128,6 +128,7 @@ impl BodyStage { next_static_file_tx_num.saturating_sub(1), &static_file_provider, provider, + StaticFileSegment::Transactions, )?) } } else { @@ -135,6 +136,7 @@ impl BodyStage { next_static_file_tx_num.saturating_sub(1), &static_file_provider, provider, + StaticFileSegment::Transactions, )?) } } @@ -151,7 +153,7 @@ where + StaticFileProviderFactory + StatsReader + BlockReader - + BlockWriter, + + BlockWriter>, D: BodyDownloader>, { /// Return the id of the stage @@ -242,42 +244,6 @@ where } } -/// Called when database is ahead of static files. Attempts to find the first block we are missing -/// transactions for. -fn missing_static_data_error( - last_tx_num: TxNumber, - static_file_provider: &StaticFileProvider, - provider: &Provider, -) -> Result -where - Provider: BlockReader + StaticFileProviderFactory, -{ - let mut last_block = static_file_provider - .get_highest_static_file_block(StaticFileSegment::Transactions) - .unwrap_or_default(); - - // To be extra safe, we make sure that the last tx num matches the last block from its indices. - // If not, get it. - loop { - if let Some(indices) = provider.block_body_indices(last_block)? { - if indices.last_tx_num() <= last_tx_num { - break - } - } - if last_block == 0 { - break - } - last_block -= 1; - } - - let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - - Ok(StageError::MissingStaticFileData { - block: missing_block, - segment: StaticFileSegment::Transactions, - }) -} - // TODO(alexey): ideally, we want to measure Bodies stage progress in bytes, but it's hard to know // beforehand how many bytes we need to download. So the good solution would be to measure the // progress in gas as a proxy to size. Execution stage uses a similar approach. diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 16d87c35c3a2..1c33b243db12 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,5 +1,5 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader, Header}; use alloy_primitives::BlockNumber; use num_traits::Zero; use reth_config::config::ExecutionConfig; @@ -12,13 +12,12 @@ use reth_evm::{ use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; use reth_primitives::{SealedHeader, StaticFileSegment}; -use reth_primitives_traits::{format_gas_throughput, NodePrimitives}; +use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimitives}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - writer::UnifiedStorageWriter, + providers::{StaticFileProvider, StaticFileWriter}, BlockHashReader, BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, StateChangeWriter, StateWriter, StaticFileProviderFactory, - StatsReader, TransactionVariant, + OriginalValuesKnown, ProviderError, StateCommitmentProvider, StateWriter, + StaticFileProviderFactory, StatsReader, StorageLocation, TransactionVariant, }; use reth_prune_types::PruneModes; use reth_stages_api::{ @@ -35,6 +34,8 @@ use std::{ }; use tracing::*; +use super::missing_static_data_error; + /// The execution stage executes all transactions and /// update history indexes. /// @@ -169,19 +170,100 @@ impl ExecutionStage { } Ok(prune_modes) } + + /// Performs consistency check on static files. + /// + /// This function compares the highest receipt number recorded in the database with that in the + /// static file to detect any discrepancies due to unexpected shutdowns or database rollbacks. + /// **If the height in the static file is higher**, it rolls back (unwinds) the static file. + /// **Conversely, if the height in the database is lower**, it triggers a rollback in the + /// database (by returning [`StageError`]) until the heights in both the database and static + /// file match. + fn ensure_consistency( + &self, + provider: &Provider, + checkpoint: u64, + unwind_to: Option, + ) -> Result<(), StageError> + where + Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider, + { + // If thre's any receipts pruning configured, receipts are written directly to database and + // inconsistencies are expected. + if self.prune_modes.has_receipts_pruning() { + return Ok(()) + } + + // Get next expected receipt number + let tx = provider.tx_ref(); + let next_receipt_num = tx + .cursor_read::()? + .seek_exact(checkpoint)? + .map(|(_, value)| value.next_tx_num()) + .unwrap_or(0); + + let static_file_provider = provider.static_file_provider(); + + // Get next expected receipt number in static files + let next_static_file_receipt_num = static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Receipts) + .map(|num| num + 1) + .unwrap_or(0); + + // Check if we had any unexpected shutdown after committing to static files, but + // NOT committing to database. + match next_static_file_receipt_num.cmp(&next_receipt_num) { + // It can be equal when it's a chain of empty blocks, but we still need to update the + // last block in the range. + Ordering::Greater | Ordering::Equal => { + let mut static_file_producer = + static_file_provider.latest_writer(StaticFileSegment::Receipts)?; + static_file_producer + .prune_receipts(next_static_file_receipt_num - next_receipt_num, checkpoint)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } + Ordering::Less => { + // If we are already in the process of unwind, this might be fine because we will + // fix the inconsistency right away. + if let Some(unwind_to) = unwind_to { + let next_receipt_num_after_unwind = provider + .tx_ref() + .get::(unwind_to)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; + + if next_receipt_num_after_unwind > next_static_file_receipt_num { + // This means we need a deeper unwind. + } else { + return Ok(()) + } + } + + return Err(missing_static_data_error( + next_static_file_receipt_num.saturating_sub(1), + &static_file_provider, + provider, + StaticFileSegment::Receipts, + )?) + } + } + + Ok(()) + } } impl Stage for ExecutionStage where E: BlockExecutorProvider, Provider: DBProvider - + BlockReader + + BlockReader + StaticFileProviderFactory + StatsReader - + StateChangeWriter - + BlockHashReader, - for<'a> UnifiedStorageWriter<'a, Provider, StaticFileProviderRWRefMut<'a, Provider::Primitives>>: - StateWriter, + + BlockHashReader + + StateWriter + + StateCommitmentProvider, { /// Return the id of the stage fn id(&self) -> StageId { @@ -209,20 +291,7 @@ where let prune_modes = self.adjust_prune_modes(provider, start_block, max_block)?; let static_file_provider = provider.static_file_provider(); - // We only use static files for Receipts, if there is no receipt pruning of any kind. - let static_file_producer = if self.prune_modes.receipts.is_none() && - self.prune_modes.receipts_log_filter.is_empty() - { - debug!(target: "sync::stages::execution", start = start_block, "Preparing static file producer"); - let mut producer = - prepare_static_file_producer(provider, &static_file_provider, start_block)?; - // Since there might be a database <-> static file inconsistency (read - // `prepare_static_file_producer` for context), we commit the change straight away. - producer.commit()?; - Some(producer) - } else { - None - }; + self.ensure_consistency(provider, input.checkpoint().block_number, None)?; let state = LatestStateProviderRef::new(provider); #[cfg(feature = "scroll")] @@ -273,17 +342,17 @@ where fetch_block_duration += fetch_block_start.elapsed(); - cumulative_gas += block.gas_used; + cumulative_gas += block.header().gas_used(); // Configure the executor to use the current state. - trace!(target: "sync::stages::execution", number = block_number, txs = block.body.transactions.len(), "Executing block"); + trace!(target: "sync::stages::execution", number = block_number, txs = block.body().transactions().len(), "Executing block"); // Execute the block let execute_start = Instant::now(); self.metrics.metered_one((&block, td).into(), |input| { executor.execute_and_verify_one(input).map_err(|error| StageError::Block { - block: Box::new(SealedHeader::seal(block.header.clone())), + block: Box::new(SealedHeader::seal(block.header().clone())), error: BlockErrorKind::Execution(error), }) })?; @@ -307,7 +376,7 @@ where } stage_progress = block_number; - stage_checkpoint.progress.processed += block.gas_used; + stage_checkpoint.progress.processed += block.gas_used(); // If we have ExExes we need to save the block in memory for later if self.exex_manager_handle.has_exexs() { @@ -346,7 +415,7 @@ where // the `has_exexs` check here as well if !blocks.is_empty() { let blocks = blocks.into_iter().map(|block| { - let hash = block.header.hash_slow(); + let hash = block.header().hash_slow(); block.seal(hash) }); @@ -365,8 +434,7 @@ where let time = Instant::now(); // write output - let mut writer = UnifiedStorageWriter::new(provider, static_file_producer); - writer.write_to_storage(state, OriginalValuesKnown::Yes)?; + provider.write_state(state, OriginalValuesKnown::Yes, StorageLocation::StaticFiles)?; let db_write_duration = time.elapsed(); debug!( @@ -413,10 +481,13 @@ where }) } + self.ensure_consistency(provider, input.checkpoint.block_number, Some(unwind_to))?; + // Unwind account and storage changesets, as well as receipts. // // This also updates `PlainStorageState` and `PlainAccountState`. - let bundle_state_with_receipts = provider.take_state(range.clone())?; + let bundle_state_with_receipts = + provider.take_state_above(unwind_to, StorageLocation::Both)?; // Prepare the input for post unwind commit hook, where an `ExExNotification` will be sent. if self.exex_manager_handle.has_exexs() { @@ -437,25 +508,6 @@ where } } - let static_file_provider = provider.static_file_provider(); - - // Unwind all receipts for transactions in the block range - if self.prune_modes.receipts.is_none() && self.prune_modes.receipts_log_filter.is_empty() { - // We only use static files for Receipts, if there is no receipt pruning of any kind. - - // prepare_static_file_producer does a consistency check that will unwind static files - // if the expected highest receipt in the files is higher than the database. - // Which is essentially what happens here when we unwind this stage. - let _static_file_producer = - prepare_static_file_producer(provider, &static_file_provider, *range.start())?; - } else { - // If there is any kind of receipt pruning/filtering we use the database, since static - // files do not support filters. - // - // If we hit this case, the receipts have already been unwound by the call to - // `take_state`. - } - // Update the checkpoint. let mut stage_checkpoint = input.checkpoint.execution_stage_checkpoint(); if let Some(stage_checkpoint) = stage_checkpoint.as_mut() { @@ -581,85 +633,6 @@ fn calculate_gas_used_from_headers( Ok(gas_total) } -/// Returns a `StaticFileProviderRWRefMut` static file producer after performing a consistency -/// check. -/// -/// This function compares the highest receipt number recorded in the database with that in the -/// static file to detect any discrepancies due to unexpected shutdowns or database rollbacks. **If -/// the height in the static file is higher**, it rolls back (unwinds) the static file. -/// **Conversely, if the height in the database is lower**, it triggers a rollback in the database -/// (by returning [`StageError`]) until the heights in both the database and static file match. -fn prepare_static_file_producer<'a, 'b, Provider>( - provider: &'b Provider, - static_file_provider: &'a StaticFileProvider, - start_block: u64, -) -> Result, StageError> -where - Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider, - 'b: 'a, -{ - // Get next expected receipt number - let tx = provider.tx_ref(); - let next_receipt_num = tx - .cursor_read::()? - .seek_exact(start_block)? - .map(|(_, value)| value.first_tx_num) - .unwrap_or(0); - - // Get next expected receipt number in static files - let next_static_file_receipt_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Receipts) - .map(|num| num + 1) - .unwrap_or(0); - - let mut static_file_producer = - static_file_provider.get_writer(start_block, StaticFileSegment::Receipts)?; - - // Check if we had any unexpected shutdown after committing to static files, but - // NOT committing to database. - match next_static_file_receipt_num.cmp(&next_receipt_num) { - // It can be equal when it's a chain of empty blocks, but we still need to update the last - // block in the range. - Ordering::Greater | Ordering::Equal => static_file_producer.prune_receipts( - next_static_file_receipt_num - next_receipt_num, - start_block.saturating_sub(1), - )?, - Ordering::Less => { - let mut last_block = static_file_provider - .get_highest_static_file_block(StaticFileSegment::Receipts) - .unwrap_or(0); - - let last_receipt_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Receipts) - .unwrap_or(0); - - // To be extra safe, we make sure that the last receipt num matches the last block from - // its indices. If not, get it. - loop { - if let Some(indices) = provider.block_body_indices(last_block)? { - if indices.last_tx_num() <= last_receipt_num { - break - } - } - if last_block == 0 { - break - } - last_block -= 1; - } - - let missing_block = - Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - - return Err(StageError::MissingStaticFileData { - block: missing_block, - segment: StaticFileSegment::Receipts, - }) - } - } - - Ok(static_file_producer) -} - #[cfg(test)] mod tests { use super::*; @@ -919,7 +892,7 @@ mod tests { // Tests node with database and node with static files for mut mode in modes { - let provider = factory.database_provider_rw().unwrap(); + let mut provider = factory.database_provider_rw().unwrap(); if let Some(mode) = &mut mode { // Simulating a full node where we write receipts to database @@ -928,6 +901,7 @@ mod tests { let mut execution_stage = stage(); execution_stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let output = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); @@ -1003,9 +977,10 @@ mod tests { "Post changed of a account" ); - let provider = factory.database_provider_rw().unwrap(); + let mut provider = factory.database_provider_rw().unwrap(); let mut stage = stage(); - stage.prune_modes = mode.unwrap_or_default(); + stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.unwrap_or_default()); let _result = stage .unwind( @@ -1092,6 +1067,7 @@ mod tests { // Test Execution let mut execution_stage = stage(); execution_stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let result = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); @@ -1099,7 +1075,8 @@ mod tests { // Test Unwind provider = factory.database_provider_rw().unwrap(); let mut stage = stage(); - stage.prune_modes = mode.unwrap_or_default(); + stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let result = stage .unwind( diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 8adf2fcad546..7e5d7af46eef 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -1,4 +1,5 @@ -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives::NodePrimitives; use reth_provider::{ BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, @@ -41,7 +42,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory>, { fn id(&self) -> StageId { StageId::Prune @@ -130,7 +131,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory>, { fn id(&self) -> StageId { StageId::PruneSenderRecovery @@ -171,6 +172,7 @@ mod tests { }; use alloy_primitives::B256; use reth_primitives::SealedBlock; + use reth_primitives_traits::SignedTransaction; use reth_provider::{ providers::StaticFileWriter, TransactionsProvider, TransactionsProviderExt, }; diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index d611062b565d..a6c2537c1855 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -361,10 +361,16 @@ struct FailedSenderRecoveryError { #[cfg(test)] mod tests { + use super::*; + use crate::test_utils::{ + stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, + TestRunnerError, TestStageDB, UnwindStageTestRunner, + }; use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db_api::cursor::DbCursorRO; use reth_primitives::{SealedBlock, TransactionSigned}; + use reth_primitives_traits::SignedTransaction; use reth_provider::{ providers::StaticFileWriter, DatabaseProviderFactory, PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, @@ -375,12 +381,6 @@ mod tests { self, random_block, random_block_range, BlockParams, BlockRangeParams, }; - use super::*; - use crate::test_utils::{ - stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, - TestRunnerError, TestStageDB, UnwindStageTestRunner, - }; - stage_test_suite_ext!(SenderRecoveryTestRunner, sender_recovery); /// Execute a block range with a single transaction diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index caf039faca10..5aa1f3f880c3 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -1,5 +1,5 @@ //! Utils for `stages`. -use alloy_primitives::BlockNumber; +use alloy_primitives::{BlockNumber, TxNumber}; use reth_config::config::EtlConfig; use reth_db::BlockNumberList; use reth_db_api::{ @@ -10,7 +10,11 @@ use reth_db_api::{ DatabaseError, }; use reth_etl::Collector; -use reth_provider::DBProvider; +use reth_primitives::StaticFileSegment; +use reth_provider::{ + providers::StaticFileProvider, BlockReader, DBProvider, ProviderError, + StaticFileProviderFactory, +}; use reth_stages_api::StageError; use std::{collections::HashMap, hash::Hash, ops::RangeBounds}; use tracing::info; @@ -244,3 +248,36 @@ impl LoadMode { matches!(self, Self::Flush) } } + +/// Called when database is ahead of static files. Attempts to find the first block we are missing +/// transactions for. +pub(crate) fn missing_static_data_error( + last_tx_num: TxNumber, + static_file_provider: &StaticFileProvider, + provider: &Provider, + segment: StaticFileSegment, +) -> Result +where + Provider: BlockReader + StaticFileProviderFactory, +{ + let mut last_block = + static_file_provider.get_highest_static_file_block(segment).unwrap_or_default(); + + // To be extra safe, we make sure that the last tx num matches the last block from its indices. + // If not, get it. + loop { + if let Some(indices) = provider.block_body_indices(last_block)? { + if indices.last_tx_num() <= last_tx_num { + break + } + } + if last_block == 0 { + break + } + last_block -= 1; + } + + let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); + + Ok(StageError::MissingStaticFileData { block: missing_block, segment }) +} diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index 168ae94817b6..5b686cfe109f 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -44,7 +44,7 @@ where .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; let mut transactions_cursor = provider.tx_ref().cursor_read::::Primitives as NodePrimitives>::SignedTx, + ::SignedTx, >>()?; let transactions_walker = transactions_cursor.walk_range(block_body_indices.tx_num_range())?; diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 48fb946a111c..d0acfdb32066 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -13,8 +13,8 @@ use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, writer::UnifiedStorageWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter, - OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointWriter, StateChangeWriter, - StateWriter, StaticFileProviderFactory, TrieWriter, + OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointWriter, StateWriter, + StaticFileProviderFactory, StorageLocation, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; @@ -75,7 +75,8 @@ where + HistoryWriter + HeaderProvider + HashingWriter - + StateChangeWriter + + StateWriter + + StateWriter + AsRef, { let chain = factory.chain_spec(); @@ -145,8 +146,8 @@ pub fn insert_genesis_state<'a, 'b, Provider>( where Provider: StaticFileProviderFactory + DBProvider - + StateChangeWriter + HeaderProvider + + StateWriter + AsRef, { insert_state(provider, alloc, 0) @@ -161,8 +162,8 @@ pub fn insert_state<'a, 'b, Provider>( where Provider: StaticFileProviderFactory + DBProvider - + StateChangeWriter + HeaderProvider + + StateWriter + AsRef, { let capacity = alloc.size_hint().1.unwrap_or(0); @@ -236,8 +237,7 @@ where Vec::new(), ); - let mut storage_writer = UnifiedStorageWriter::from_database(&provider); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider.write_state(execution_outcome, OriginalValuesKnown::Yes, StorageLocation::Database)?; trace!(target: "reth::cli", "Inserted state"); @@ -355,8 +355,8 @@ where + HistoryWriter + HeaderProvider + HashingWriter - + StateChangeWriter + TrieWriter + + StateWriter + AsRef, { let block = provider_rw.last_block_number()?; @@ -476,7 +476,7 @@ where + HeaderProvider + HashingWriter + HistoryWriter - + StateChangeWriter + + StateWriter + AsRef, { let accounts_len = collector.len(); diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 7429f76afd71..c723a3a37645 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -21,8 +21,8 @@ reth-storage-errors.workspace = true reth-nippy-jar.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true +reth-trie-common = { workspace = true, features = ["serde"] } reth-tracing.workspace = true -reth-trie-common.workspace = true # ethereum alloy-primitives.workspace = true @@ -49,7 +49,7 @@ thiserror.workspace = true tempfile = { workspace = true, optional = true } derive_more.workspace = true rustc-hash = { workspace = true, optional = true } -sysinfo = { version = "0.31", default-features = false, features = ["system"] } +sysinfo = { version = "0.32", default-features = false, features = ["system"] } parking_lot = { workspace = true, optional = true } # arbitrary utils diff --git a/crates/storage/db/src/lockfile.rs b/crates/storage/db/src/lockfile.rs index a87ab7393f1f..b28a83f11ca4 100644 --- a/crates/storage/db/src/lockfile.rs +++ b/crates/storage/db/src/lockfile.rs @@ -110,6 +110,7 @@ impl ProcessUID { let pid2 = sysinfo::Pid::from(pid); system.refresh_processes_specifics( sysinfo::ProcessesToUpdate::Some(&[pid2]), + true, ProcessRefreshKind::new(), ); system.process(pid2).map(|process| Self { pid, start_time: process.start_time() }) diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index aafdf606bb3d..a1fea62f0d8b 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -346,9 +346,9 @@ tables! { } /// Canonical only Stores transaction receipts. - table Receipts { + table Receipts { type Key = TxNumber; - type Value = Receipt; + type Value = R; } /// Stores all smart contract bytecodes. diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 9e6720b84403..e69c0343f564 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -133,6 +133,8 @@ pub enum ProviderError { StorageLockError(StorageLockError), /// Storage writer error. UnifiedStorageWriterError(UnifiedStorageWriterError), + /// Received invalid output from configured storage implementation. + InvalidStorageOutput, } impl From for ProviderError { diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 6ca1e9c33efd..74924f1fbd9b 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -103,21 +103,21 @@ optimism = [ "revm/optimism", ] serde = [ - "reth-execution-types/serde", - "reth-trie-db/serde", - "reth-trie/serde", - "alloy-consensus/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "alloy-rpc-types-engine/serde", "dashmap/serde", "notify/serde", "parking_lot/serde", "rand/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-rpc-types-engine/serde", "revm/serde", "reth-codecs/serde", "reth-optimism-primitives?/serde", "reth-primitives-traits/serde", + "reth-execution-types/serde", + "reth-trie-db/serde", + "reth-trie/serde", "reth-scroll-primitives?/serde" ] test-utils = [ diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 967ac785b47e..08f5e4680a2b 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -25,14 +25,16 @@ use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; -use reth_node_types::{NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Account, Block, BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, SealedBlock, + SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, + TransactionSigned, TransactionSignedNoHash, }; +use reth_primitives_traits::BlockBody as _; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{DBProvider, StorageChangeSetReader}; +use reth_storage_api::{DBProvider, NodePrimitivesProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -55,7 +57,7 @@ pub struct BlockchainProvider2 { pub(crate) database: ProviderFactory, /// Tracks the chain info wrt forkchoice updates and in memory canonical /// state. - pub(crate) canonical_in_memory_state: CanonicalInMemoryState, + pub(crate) canonical_in_memory_state: CanonicalInMemoryState, } impl Clone for BlockchainProvider2 { @@ -115,7 +117,7 @@ impl BlockchainProvider2 { } /// Gets a clone of `canonical_in_memory_state`. - pub fn canonical_in_memory_state(&self) -> CanonicalInMemoryState { + pub fn canonical_in_memory_state(&self) -> CanonicalInMemoryState { self.canonical_in_memory_state.clone() } @@ -130,8 +132,8 @@ impl BlockchainProvider2 { /// This uses a given [`BlockState`] to initialize a state provider for that block. fn block_state_provider( &self, - state: &BlockState, - ) -> ProviderResult { + state: &BlockState, + ) -> ProviderResult> { let anchor_hash = state.anchor().hash; let latest_historical = self.database.history_by_block_hash(anchor_hash)?; Ok(state.state_provider(latest_historical)) @@ -143,11 +145,15 @@ impl BlockchainProvider2 { pub fn get_state( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>>> { self.consistent_provider()?.get_state(range) } } +impl NodePrimitivesProvider for BlockchainProvider2 { + type Primitives = N::Primitives; +} + impl DatabaseProviderFactory for BlockchainProvider2 { type DB = N::DB; type Provider = as DatabaseProviderFactory>::Provider; @@ -163,8 +169,6 @@ impl DatabaseProviderFactory for BlockchainProvider2 { } impl StaticFileProviderFactory for BlockchainProvider2 { - type Primitives = N::Primitives; - fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } @@ -258,23 +262,33 @@ impl BlockIdReader for BlockchainProvider2 { } impl BlockReader for BlockchainProvider2 { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { self.consistent_provider()?.find_block_by_hash(hash, source) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { self.consistent_provider()?.block(id) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block()) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block_with_senders()) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -299,7 +313,7 @@ impl BlockReader for BlockchainProvider2 { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.block_with_senders(id, transaction_kind) } @@ -307,25 +321,25 @@ impl BlockReader for BlockchainProvider2 { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.consistent_provider()?.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_block_with_senders_range(range) } } @@ -397,28 +411,33 @@ impl TransactionsProvider for BlockchainProvider2 { } impl ReceiptProvider for BlockchainProvider2 { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.consistent_provider()?.receipt(id) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.consistent_provider()?.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.consistent_provider()?.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.consistent_provider()?.receipts_by_tx_range(range) } } impl ReceiptProviderIdExt for BlockchainProvider2 { - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { self.consistent_provider()?.receipts_by_block_id(block) } } @@ -633,7 +652,7 @@ impl StateProviderFactory for BlockchainProvider2 { } } -impl CanonChainTracker for BlockchainProvider2 +impl CanonChainTracker for BlockchainProvider2 where Self: BlockReader, { @@ -669,9 +688,9 @@ where impl BlockReaderIdExt for BlockchainProvider2 where - Self: BlockReader + ReceiptProviderIdExt, + Self: ReceiptProviderIdExt, { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { self.consistent_provider()?.block_by_id(id) } @@ -699,13 +718,15 @@ where } } -impl CanonStateSubscriptions for BlockchainProvider2 { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { +impl> CanonStateSubscriptions + for BlockchainProvider2 +{ + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.canonical_in_memory_state.subscribe_canon_state() } } -impl ForkChoiceSubscriptions for BlockchainProvider2 { +impl ForkChoiceSubscriptions for BlockchainProvider2 { fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let receiver = self.canonical_in_memory_state.subscribe_safe_block(); ForkChoiceNotifications(receiver) @@ -743,6 +764,8 @@ impl AccountReader for BlockchainProvider2 { } impl StateReader for BlockchainProvider2 { + type Receipt = ReceiptTy; + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. /// /// If data for the block does not exist, this will return [`None`]. @@ -752,19 +775,16 @@ impl StateReader for BlockchainProvider2 { /// inconsistent. Currently this can safely be called within the blockchain tree thread, /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the /// first place. - fn get_state(&self, block: BlockNumber) -> ProviderResult> { + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>> { StateReader::get_state(&self.consistent_provider()?, block) } } #[cfg(test)] mod tests { - use std::{ - ops::{Range, RangeBounds}, - sync::Arc, - time::Instant, - }; - use crate::{ providers::BlockchainProvider2, test_utils::{ @@ -793,7 +813,10 @@ mod tests { use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives::{Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash}; + use reth_primitives::{ + BlockExt, Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash, + }; + use reth_primitives_traits::{BlockBody as _, SignedTransaction}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, ReceiptProvider, @@ -805,7 +828,11 @@ mod tests { random_receipt, BlockParams, BlockRangeParams, }; use revm::db::BundleState; - use std::ops::Bound; + use std::{ + ops::{Bound, Range, RangeBounds}, + sync::Arc, + time::Instant, + }; const TEST_BLOCKS_COUNT: usize = 5; @@ -885,13 +912,18 @@ mod tests { .unwrap_or_default(); // Insert blocks into the database - for block in &database_blocks { + for (block, receipts) in database_blocks.iter().zip(&receipts) { // TODO: this should be moved inside `insert_historical_block`: let mut transactions_writer = static_file_provider.latest_writer(StaticFileSegment::Transactions)?; + let mut receipts_writer = + static_file_provider.latest_writer(StaticFileSegment::Receipts)?; transactions_writer.increment_block(block.number)?; - for tx in block.body.transactions() { + receipts_writer.increment_block(block.number)?; + + for (tx, receipt) in block.body.transactions().iter().zip(receipts) { transactions_writer.append_transaction(tx_num, tx)?; + receipts_writer.append_receipt(tx_num, receipt)?; tx_num += 1; } @@ -900,19 +932,6 @@ mod tests { )?; } - // Insert receipts into the static files - UnifiedStorageWriter::new( - &provider_rw, - Some(factory.static_file_provider().latest_writer(StaticFileSegment::Receipts)?), - ) - .append_receipts_from_blocks( - // The initial block number is required - database_blocks.first().map(|b| b.number).unwrap_or_default(), - receipts[..database_blocks.len()] - .iter() - .map(|vec| vec.clone().into_iter().map(Some).collect::>()), - )?; - // Commit to both storages: database and static files UnifiedStorageWriter::commit(provider_rw)?; @@ -1000,7 +1019,7 @@ mod tests { // Push to disk let provider_rw = hook_provider.database_provider_rw().unwrap(); UnifiedStorageWriter::from(&provider_rw, &hook_provider.static_file_provider()) - .save_blocks(&[lowest_memory_block]) + .save_blocks(vec![lowest_memory_block]) .unwrap(); UnifiedStorageWriter::commit(provider_rw).unwrap(); @@ -2338,7 +2357,7 @@ mod tests { (block_range, |block: &SealedBlock| block.clone().unseal()), (block_with_senders_range, |block: &SealedBlock| block .clone() - .unseal() + .unseal::() .with_senders_unchecked(vec![])), (sealed_block_with_senders_range, |block: &SealedBlock| block .clone() @@ -2533,7 +2552,7 @@ mod tests { block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), - block.clone().unseal().with_recovered_senders() + block.clone().unseal::().with_recovered_senders() ), (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) ), @@ -2542,7 +2561,7 @@ mod tests { block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), - block.clone().unseal().with_recovered_senders() + block.clone().unseal::().with_recovered_senders() ), (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) ), @@ -2552,7 +2571,12 @@ mod tests { |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), Some( - block.clone().unseal().with_recovered_senders().unwrap().seal(block.hash()) + block + .clone() + .unseal::() + .with_recovered_senders() + .unwrap() + .seal(block.hash()) ) ), (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) @@ -2563,7 +2587,12 @@ mod tests { |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), Some( - block.clone().unseal().with_recovered_senders().unwrap().seal(block.hash()) + block + .clone() + .unseal::() + .with_recovered_senders() + .unwrap() + .seal(block.hash()) ) ), (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index be6549033cde..652f6fb33fd2 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -9,7 +9,8 @@ use reth_primitives::{Account, Bytecode}; use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, TrieInput, }; /// A state provider that resolves to data from either a wrapped [`crate::ExecutionOutcome`] @@ -138,6 +139,17 @@ impl StorageRootProvider storage.extend(&hashed_storage); self.state_provider.storage_proof(address, slot, storage) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + let mut storage = self.get_hashed_storage(address); + storage.extend(&hashed_storage); + self.state_provider.storage_multiproof(address, slots, storage) + } } impl StateProofProvider diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index fc9d739b0fea..e70f4b4e5e1d 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -8,6 +8,7 @@ use crate::{ }; use alloy_consensus::Header; use alloy_eips::{ + eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber, }; @@ -18,14 +19,17 @@ use reth_db::models::BlockNumberAddress; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; -use reth_node_types::TxTy; +use reth_node_types::{BlockTy, ReceiptTy, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StorageEntry, TransactionMeta, + Account, BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, + TransactionMeta, }; +use reth_primitives_traits::{Block, BlockBody}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{DatabaseProviderFactory, StateProvider, StorageChangeSetReader}; +use reth_storage_api::{ + DatabaseProviderFactory, NodePrimitivesProvider, StateProvider, StorageChangeSetReader, +}; use reth_storage_errors::provider::ProviderResult; use revm::{ db::states::PlainStorageRevert, @@ -45,13 +49,14 @@ use tracing::trace; /// CAUTION: Avoid holding this provider for too long or the inner database transaction will /// time-out. #[derive(Debug)] +#[doc(hidden)] // triggers ICE for `cargo docs` pub struct ConsistentProvider { /// Storage provider. storage_provider: as DatabaseProviderFactory>::Provider, /// Head block at time of [`Self`] creation - head_block: Option>, + head_block: Option>>, /// In-memory canonical state. This is not a snapshot, and can change! Use with caution. - canonical_in_memory_state: CanonicalInMemoryState, + canonical_in_memory_state: CanonicalInMemoryState, } impl ConsistentProvider { @@ -62,7 +67,7 @@ impl ConsistentProvider { /// view of memory and database. pub fn new( storage_provider_factory: ProviderFactory, - state: CanonicalInMemoryState, + state: CanonicalInMemoryState, ) -> ProviderResult { // Each one provides a snapshot at the time of instantiation, but its order matters. // @@ -110,7 +115,7 @@ impl ConsistentProvider { Ok(self.block_state_provider_ref(state)?.boxed()) } else { trace!(target: "providers::blockchain", "Using database state for latest state provider"); - self.storage_provider.latest() + Ok(self.storage_provider.latest()) } } @@ -146,7 +151,7 @@ impl ConsistentProvider { pub fn get_state( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>>> { if range.is_empty() { return Ok(None) } @@ -304,7 +309,7 @@ impl ConsistentProvider { RangeInclusive, &mut P, ) -> ProviderResult>, - G: Fn(&BlockState, &mut P) -> Option, + G: Fn(&BlockState, &mut P) -> Option, P: FnMut(&T) -> bool, { // Each one provides a snapshot at the time of instantiation, but its order matters. @@ -396,8 +401,8 @@ impl ConsistentProvider { /// This uses a given [`BlockState`] to initialize a state provider for that block. fn block_state_provider_ref( &self, - state: &BlockState, - ) -> ProviderResult> { + state: &BlockState, + ) -> ProviderResult> { let anchor_hash = state.anchor().hash; let latest_historical = self.history_by_block_hash_ref(anchor_hash)?; let in_memory = state.chain().map(|block_state| block_state.block()).collect(); @@ -420,7 +425,7 @@ impl ConsistentProvider { &DatabaseProviderRO, RangeInclusive, ) -> ProviderResult>, - M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, + M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, { let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); let provider = &self.storage_provider; @@ -442,7 +447,7 @@ impl ConsistentProvider { let (start, end) = self.convert_range_bounds(range, || { in_mem_chain .iter() - .map(|b| b.block_ref().block().body.transactions.len() as u64) + .map(|b| b.block_ref().block().body.transactions().len() as u64) .sum::() + last_block_body_index.last_tx_num() }); @@ -474,7 +479,7 @@ impl ConsistentProvider { // Iterate from the lowest block to the highest in-memory chain for block_state in in_mem_chain.iter().rev() { - let block_tx_count = block_state.block_ref().block().body.transactions.len(); + let block_tx_count = block_state.block_ref().block().body.transactions().len(); let remaining = (tx_range.end() - tx_range.start() + 1) as usize; // If the transaction range start is equal or higher than the next block first @@ -516,7 +521,7 @@ impl ConsistentProvider { ) -> ProviderResult> where S: FnOnce(&DatabaseProviderRO) -> ProviderResult>, - M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, + M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, { let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); let provider = &self.storage_provider; @@ -548,10 +553,10 @@ impl ConsistentProvider { let executed_block = block_state.block_ref(); let block = executed_block.block(); - for tx_index in 0..block.body.transactions.len() { + for tx_index in 0..block.body.transactions().len() { match id { HashOrNumber::Hash(tx_hash) => { - if tx_hash == block.body.transactions[tx_index].hash() { + if tx_hash == block.body.transactions()[tx_index].trie_hash() { return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) } } @@ -583,7 +588,7 @@ impl ConsistentProvider { ) -> ProviderResult where S: FnOnce(&DatabaseProviderRO) -> ProviderResult, - M: Fn(&BlockState) -> ProviderResult, + M: Fn(&BlockState) -> ProviderResult, { if let Some(Some(block_state)) = self.head_block.as_ref().map(|b| b.block_on_chain(id)) { return fetch_from_block_state(block_state) @@ -612,9 +617,11 @@ impl ConsistentProvider { } } -impl StaticFileProviderFactory for ConsistentProvider { +impl NodePrimitivesProvider for ConsistentProvider { type Primitives = N::Primitives; +} +impl StaticFileProviderFactory for ConsistentProvider { fn static_file_provider(&self) -> StaticFileProvider { self.storage_provider.static_file_provider() } @@ -778,7 +785,13 @@ impl BlockIdReader for ConsistentProvider { } impl BlockReader for ConsistentProvider { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { match source { BlockSource::Any | BlockSource::Canonical => { // Note: it's fine to return the unsealed block because the caller already has @@ -795,7 +808,7 @@ impl BlockReader for ConsistentProvider { } } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.block(id), @@ -803,15 +816,19 @@ impl BlockReader for ConsistentProvider { ) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block()) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block_with_senders()) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -824,7 +841,7 @@ impl BlockReader for ConsistentProvider { return Ok(Some(Vec::new())) } - Ok(Some(block_state.block_ref().block().body.ommers.clone())) + Ok(block_state.block_ref().block().body.ommers().map(|o| o.to_vec())) }, ) } @@ -850,7 +867,7 @@ impl BlockReader for ConsistentProvider { // Iterate from the lowest block in memory until our target block for state in block_state.chain().collect::>().into_iter().rev() { - let block_tx_count = state.block_ref().block.body.transactions.len() as u64; + let block_tx_count = state.block_ref().block.body.transactions().len() as u64; if state.block_ref().block().number == number { stored_indices.tx_count = block_tx_count; } else { @@ -873,7 +890,7 @@ impl BlockReader for ConsistentProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.block_with_senders(id, transaction_kind), @@ -885,7 +902,7 @@ impl BlockReader for ConsistentProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), @@ -893,7 +910,7 @@ impl BlockReader for ConsistentProvider { ) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.block_range(range), @@ -905,7 +922,7 @@ impl BlockReader for ConsistentProvider { fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.block_with_senders_range(range), @@ -917,7 +934,7 @@ impl BlockReader for ConsistentProvider { fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), @@ -947,7 +964,7 @@ impl TransactionsProvider for ConsistentProvider { .block_ref() .block() .body - .transactions + .transactions() .get(tx_index) .cloned() .map(Into::into)) @@ -967,7 +984,7 @@ impl TransactionsProvider for ConsistentProvider { .block_ref() .block() .body - .transactions + .transactions() .get(tx_index) .cloned() .map(Into::into)) @@ -977,7 +994,7 @@ impl TransactionsProvider for ConsistentProvider { fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(tx) = self.head_block.as_ref().and_then(|b| b.transaction_on_chain(hash)) { - return Ok(Some(tx.into())) + return Ok(Some(tx)) } self.storage_provider.transaction_by_hash(hash) @@ -990,7 +1007,7 @@ impl TransactionsProvider for ConsistentProvider { if let Some((tx, meta)) = self.head_block.as_ref().and_then(|b| b.transaction_meta_on_chain(tx_hash)) { - return Ok(Some((tx.into(), meta))) + return Ok(Some((tx, meta))) } self.storage_provider.transaction_by_hash_with_meta(tx_hash) @@ -1011,18 +1028,7 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |provider| provider.transactions_by_block(id), - |block_state| { - Ok(Some( - block_state - .block_ref() - .block() - .body - .transactions - .iter() - .map(|tx| tx.clone().into()) - .collect(), - )) - }, + |block_state| Ok(Some(block_state.block_ref().block().body().transactions().to_vec())), ) } @@ -1033,18 +1039,7 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| { - Some( - block_state - .block_ref() - .block() - .body - .transactions - .iter() - .map(|tx| tx.clone().into()) - .collect(), - ) - }, + |block_state, _| Some(block_state.block_ref().block().body().transactions().to_vec()), |_| true, ) } @@ -1057,11 +1052,7 @@ impl TransactionsProvider for ConsistentProvider { range, |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), |index_range, block_state| { - Ok(block_state.block_ref().block().body.transactions[index_range] - .iter() - .cloned() - .map(Into::into) - .collect()) + Ok(block_state.block_ref().block().body.transactions()[index_range].to_vec()) }, ) } @@ -1087,7 +1078,9 @@ impl TransactionsProvider for ConsistentProvider { } impl ReceiptProvider for ConsistentProvider { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.receipt(id), @@ -1097,7 +1090,7 @@ impl ReceiptProvider for ConsistentProvider { ) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { for block_state in self.head_block.iter().flat_map(|b| b.chain()) { let executed_block = block_state.block_ref(); let block = executed_block.block(); @@ -1105,12 +1098,13 @@ impl ReceiptProvider for ConsistentProvider { // assuming 1:1 correspondence between transactions and receipts debug_assert_eq!( - block.body.transactions.len(), + block.body.transactions().len(), receipts.len(), "Mismatch between transaction and receipt count" ); - if let Some(tx_index) = block.body.transactions.iter().position(|tx| tx.hash() == hash) + if let Some(tx_index) = + block.body.transactions().iter().position(|tx| tx.trie_hash() == hash) { // safe to use tx_index for receipts due to 1:1 correspondence return Ok(receipts.get(tx_index).cloned()); @@ -1120,7 +1114,10 @@ impl ReceiptProvider for ConsistentProvider { self.storage_provider.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( block, |db_provider| db_provider.receipts_by_block(block), @@ -1131,7 +1128,7 @@ impl ReceiptProvider for ConsistentProvider { fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_in_memory_or_storage_by_tx_range( range, |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), @@ -1143,7 +1140,7 @@ impl ReceiptProvider for ConsistentProvider { } impl ReceiptProviderIdExt for ConsistentProvider { - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { BlockId::Hash(rpc_block_hash) => { let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; @@ -1188,7 +1185,7 @@ impl WithdrawalsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.withdrawals_by_block(id, timestamp), - |block_state| Ok(block_state.block_ref().block().body.withdrawals.clone()), + |block_state| Ok(block_state.block_ref().block().body.withdrawals().cloned()), ) } @@ -1203,8 +1200,8 @@ impl WithdrawalsProvider for ConsistentProvider { .block_ref() .block() .body - .withdrawals - .clone() + .withdrawals() + .cloned() .and_then(|mut w| w.pop())) }, ) @@ -1311,7 +1308,7 @@ impl ChainSpecProvider for ConsistentProvider { } impl BlockReaderIdExt for ConsistentProvider { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), BlockId::Hash(hash) => { @@ -1504,6 +1501,8 @@ impl AccountReader for ConsistentProvider { } impl StateReader for ConsistentProvider { + type Receipt = ReceiptTy; + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. /// /// If data for the block does not exist, this will return [`None`]. @@ -1513,7 +1512,10 @@ impl StateReader for ConsistentProvider { /// inconsistent. Currently this can safely be called within the blockchain tree thread, /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the /// first place. - fn get_state(&self, block: BlockNumber) -> ProviderResult> { + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>> { if let Some(state) = self.head_block.as_ref().and_then(|b| b.block_on_chain(block.into())) { let state = state.block_ref().execution_outcome().clone(); Ok(Some(state)) diff --git a/crates/storage/provider/src/providers/database/chain.rs b/crates/storage/provider/src/providers/database/chain.rs index 8f9a6395a9dd..57bc2e0b5ce6 100644 --- a/crates/storage/provider/src/providers/database/chain.rs +++ b/crates/storage/provider/src/providers/database/chain.rs @@ -1,25 +1,41 @@ -use crate::{providers::NodeTypes, DatabaseProvider}; +use crate::{providers::NodeTypesForProvider, DatabaseProvider}; use reth_db::transaction::{DbTx, DbTxMut}; use reth_node_types::FullNodePrimitives; use reth_primitives::EthPrimitives; -use reth_storage_api::{ChainStorageWriter, EthStorage}; +use reth_storage_api::{ChainStorageReader, ChainStorageWriter, EthStorage}; /// Trait that provides access to implementations of [`ChainStorage`] pub trait ChainStorage: Send + Sync { + /// Provides access to the chain reader. + fn reader(&self) -> impl ChainStorageReader, Primitives> + where + TX: DbTx + 'static, + Types: NodeTypesForProvider; + /// Provides access to the chain writer. fn writer(&self) -> impl ChainStorageWriter, Primitives> where TX: DbTxMut + DbTx + 'static, - Types: NodeTypes; + Types: NodeTypesForProvider; } impl ChainStorage for EthStorage { + fn reader( + &self, + ) -> impl ChainStorageReader, EthPrimitives> + where + TX: DbTx + 'static, + Types: NodeTypesForProvider, + { + self + } + fn writer( &self, ) -> impl ChainStorageWriter, EthPrimitives> where TX: DbTxMut + DbTx + 'static, - Types: NodeTypes, + Types: NodeTypesForProvider, { self } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 57f09e72306f..3c22a1a73a23 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -19,14 +19,14 @@ use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::{NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ - Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, + BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, + TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::TryIntoHistoricalStateProvider; +use reth_storage_api::{NodePrimitivesProvider, TryIntoHistoricalStateProvider}; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -202,6 +202,10 @@ impl ProviderFactory { } } +impl NodePrimitivesProvider for ProviderFactory { + type Primitives = N::Primitives; +} + impl DatabaseProviderFactory for ProviderFactory { type DB = N::DB; type Provider = DatabaseProvider<::TX, N>; @@ -217,8 +221,6 @@ impl DatabaseProviderFactory for ProviderFactory { } impl StaticFileProviderFactory for ProviderFactory { - type Primitives = N::Primitives; - /// Returns static file provider fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() @@ -353,23 +355,33 @@ impl BlockNumReader for ProviderFactory { } impl BlockReader for ProviderFactory { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { self.provider()?.find_block_by_hash(hash, source) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { self.provider()?.block(id) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { self.provider()?.pending_block() } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { self.provider()?.pending_block_with_senders() } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { self.provider()?.pending_block_and_receipts() } @@ -388,7 +400,7 @@ impl BlockReader for ProviderFactory { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.block_with_senders(id, transaction_kind) } @@ -396,25 +408,25 @@ impl BlockReader for ProviderFactory { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.provider()?.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.sealed_block_with_senders_range(range) } } @@ -496,7 +508,8 @@ impl TransactionsProvider for ProviderFactory { } impl ReceiptProvider for ProviderFactory { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Receipts, id, @@ -505,18 +518,21 @@ impl ReceiptProvider for ProviderFactory { ) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.provider()?.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.provider()?.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Receipts, to_range(range), @@ -658,6 +674,7 @@ mod tests { test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, }; use reth_primitives::StaticFileSegment; + use reth_primitives_traits::SignedTransaction; use reth_prune_types::{PruneMode, PruneModes}; use reth_storage_errors::provider::ProviderError; use reth_testing_utils::generators::{self, random_block, random_header, BlockParams}; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index bf9762037262..cfbe20cf4b44 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -9,23 +9,27 @@ use crate::{ traits::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, - writer::UnifiedStorageWriter, AccountReader, BlockBodyWriter, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, - StateChangeWriter, StateProviderBox, StateReader, StateWriter, StaticFileProviderFactory, - StatsReader, StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + StateCommitmentProvider, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, + StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, + TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; use alloy_consensus::Header; use alloy_eips::{ + eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, }; -use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap, HashSet}, + Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, +}; use itertools::Itertools; use rayon::slice::ParallelSliceMut; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, EthereumHardforks}; @@ -46,16 +50,18 @@ use reth_db_api::{ use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; -use reth_node_types::{NodeTypes, TxTy}; +use reth_node_types::{BlockTy, BodyTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives::{ - Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, NodePrimitives, Receipt, - SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, - TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, }; -use reth_primitives_traits::{BlockBody as _, SignedTransaction}; +use reth_primitives_traits::{Block as _, BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider}; +use reth_storage_api::{ + BlockBodyReader, NodePrimitivesProvider, StateProvider, StorageChangeSetReader, + TryIntoHistoricalStateProvider, +}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, @@ -69,7 +75,7 @@ use revm::{ }; use std::{ cmp::Ordering, - collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{BTreeMap, BTreeSet}, fmt::Debug, ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::{mpsc, Arc}, @@ -155,10 +161,10 @@ impl DatabaseProvider { } impl DatabaseProvider { - /// State provider for latest block - pub fn latest<'a>(&'a self) -> ProviderResult> { + /// State provider for latest state + pub fn latest<'a>(&'a self) -> Box { trace!(target: "providers::db", "Returning latest state provider"); - Ok(Box::new(LatestStateProviderRef::new(self))) + Box::new(LatestStateProviderRef::new(self)) } /// Storage provider for state at that given block hash @@ -203,11 +209,19 @@ impl DatabaseProvider { Ok(Box::new(state_provider)) } + + #[cfg(feature = "test-utils")] + /// Sets the prune modes for provider. + pub fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.prune_modes = prune_modes; + } } -impl StaticFileProviderFactory for DatabaseProvider { +impl NodePrimitivesProvider for DatabaseProvider { type Primitives = N::Primitives; +} +impl StaticFileProviderFactory for DatabaseProvider { /// Returns a static file provider fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() @@ -330,6 +344,34 @@ impl DatabaseProvider ProviderResult<()> { + if remove_from.database() { + // iterate over block body and remove receipts + self.remove::>>(from_tx..)?; + } + + if remove_from.static_files() && !self.prune_modes.has_receipts_pruning() { + let static_file_receipt_num = + self.static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts); + + let to_delete = static_file_receipt_num + .map(|static_num| (static_num + 1).saturating_sub(from_tx)) + .unwrap_or_default(); + + self.static_file_provider + .latest_writer(StaticFileSegment::Receipts)? + .prune_receipts(to_delete, last_block)?; + } + + Ok(()) + } } impl TryIntoHistoricalStateProvider for DatabaseProvider { @@ -374,13 +416,17 @@ impl TryIntoHistoricalStateProvider for Databa } } +impl StateCommitmentProvider for DatabaseProvider { + type StateCommitment = N::StateCommitment; +} + impl DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] /// Inserts an historical block. **Used for setting up test environments** pub fn insert_historical_block( &self, - block: SealedBlockWithSenders::Body>, + block: SealedBlockWithSenders<::Block>, ) -> ProviderResult { let ttd = if block.number == 0 { block.difficulty @@ -455,7 +501,7 @@ where Ok(Vec::new()) } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. pub const fn new( tx: TX, @@ -517,21 +563,11 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(BlockNumber) -> ProviderResult>, - BF: FnOnce( - H, - Vec, - Vec
, - Vec
, - Option, - ) -> ProviderResult>, + BF: FnOnce(H, BodyTy, Vec
) -> ProviderResult>, { let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; let Some(header) = header_by_number(block_number)? else { return Ok(None) }; - let ommers = self.ommers(block_number.into())?.unwrap_or_default(); - let withdrawals = - self.withdrawals_by_block(block_number.into(), header.as_ref().timestamp)?; - // Get the block body // // If the body indices are not found, this means that the transactions either do not exist @@ -548,9 +584,14 @@ impl DatabaseProvider { (self.transactions_by_tx_range(tx_range.clone())?, self.senders_by_tx_range(tx_range)?) }; - let body = transactions.into_iter().map(Into::into).collect(); + let body = self + .storage + .reader() + .read_block_bodies(self, vec![(header.as_ref(), transactions)])? + .pop() + .ok_or(ProviderError::InvalidStorageOutput)?; - construct_block(header, body, senders, ommers, withdrawals) + construct_block(header, body, senders) } /// Returns a range of blocks from the database. @@ -572,7 +613,7 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(RangeInclusive) -> ProviderResult>, - F: FnMut(H, Range, Vec
, Option) -> ProviderResult, + F: FnMut(H, BodyTy, Range) -> ProviderResult, { if range.is_empty() { return Ok(Vec::new()) @@ -582,50 +623,41 @@ impl DatabaseProvider { let mut blocks = Vec::with_capacity(len); let headers = headers_range(range)?; - let mut ommers_cursor = self.tx.cursor_read::()?; - let mut withdrawals_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; let mut block_body_cursor = self.tx.cursor_read::()?; + let mut present_headers = Vec::new(); for header in headers { - let header_ref = header.as_ref(); // If the body indices are not found, this means that the transactions either do // not exist in the database yet, or they do exit but are // not indexed. If they exist but are not indexed, we don't // have enough information to return the block anyways, so // we skip the block. if let Some((_, block_body_indices)) = - block_body_cursor.seek_exact(header_ref.number)? + block_body_cursor.seek_exact(header.as_ref().number)? { let tx_range = block_body_indices.tx_num_range(); - - // If we are past shanghai, then all blocks should have a withdrawal list, - // even if empty - let withdrawals = - if self.chain_spec.is_shanghai_active_at_timestamp(header_ref.timestamp) { - withdrawals_cursor - .seek_exact(header_ref.number)? - .map(|(_, w)| w.withdrawals) - .unwrap_or_default() - .into() - } else { - None - }; - let ommers = - if self.chain_spec.final_paris_total_difficulty(header_ref.number).is_some() { - Vec::new() - } else { - ommers_cursor - .seek_exact(header_ref.number)? - .map(|(_, o)| o.ommers) - .unwrap_or_default() - }; - - if let Ok(b) = assemble_block(header, tx_range, ommers, withdrawals) { - blocks.push(b); - } + present_headers.push((header, tx_range)); } } + let mut inputs = Vec::new(); + for (header, tx_range) in &present_headers { + let transactions = if tx_range.is_empty() { + Vec::new() + } else { + self.transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? + }; + + inputs.push((header.as_ref(), transactions)); + } + + let bodies = self.storage.reader().read_block_bodies(self, inputs)?; + + for ((header, tx_range), body) in present_headers.into_iter().zip(bodies) { + blocks.push(assemble_block(header, body, tx_range)?); + } + Ok(blocks) } @@ -649,34 +681,22 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: Fn(RangeInclusive) -> ProviderResult>, - BF: Fn( - H, - Vec, - Vec
, - Option, - Vec
, - ) -> ProviderResult, + BF: Fn(H, BodyTy, Vec
) -> ProviderResult, { - let mut tx_cursor = self.tx.cursor_read::>>()?; let mut senders_cursor = self.tx.cursor_read::()?; - self.block_range(range, headers_range, |header, tx_range, ommers, withdrawals| { - let (body, senders) = if tx_range.is_empty() { - (Vec::new(), Vec::new()) + self.block_range(range, headers_range, |header, body, tx_range| { + let senders = if tx_range.is_empty() { + Vec::new() } else { - let body = self - .transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect::>(); // fetch senders from the senders table let known_senders = senders_cursor .walk_range(tx_range.clone())? .collect::, _>>()?; - let mut senders = Vec::with_capacity(body.len()); - for (tx_num, tx) in tx_range.zip(body.iter()) { + let mut senders = Vec::with_capacity(body.transactions().len()); + for (tx_num, tx) in tx_range.zip(body.transactions()) { match known_senders.get(&tx_num) { None => { // recover the sender from the transaction if not found @@ -689,101 +709,13 @@ impl DatabaseProvider { } } - (body, senders) + senders }; - assemble_block(header, body, ommers, withdrawals, senders) + assemble_block(header, body, senders) }) } - /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. - /// - /// 1. Iterate over the [`BlockBodyIndices`][tables::BlockBodyIndices] table to get all the - /// transaction ids. - /// 2. Iterate over the [`StorageChangeSets`][tables::StorageChangeSets] table and the - /// [`AccountChangeSets`][tables::AccountChangeSets] tables in reverse order to reconstruct - /// the changesets. - /// - In order to have both the old and new values in the changesets, we also access the - /// plain state tables. - /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, - /// we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the plain state - /// 3. Save the old value to the local state - /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we - /// have seen before we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the local state - /// 3. Set the local state to the value in the changeset - /// - /// If the range is empty, or there are no blocks for the given range, then this returns `None`. - pub fn get_state( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - if range.is_empty() { - return Ok(None) - } - let start_block_number = *range.start(); - - // We are not removing block meta as it is used to get block changesets. - let block_bodies = self.get::(range.clone())?; - - // get transaction receipts - let Some(from_transaction_num) = block_bodies.first().map(|bodies| bodies.1.first_tx_num()) - else { - return Ok(None) - }; - let Some(to_transaction_num) = block_bodies.last().map(|bodies| bodies.1.last_tx_num()) - else { - return Ok(None) - }; - - let storage_range = BlockNumberAddress::range(range.clone()); - - let storage_changeset = self.get::(storage_range)?; - let account_changeset = self.get::(range)?; - - // This is not working for blocks that are not at tip. as plain state is not the last - // state of end range. We should rename the functions or add support to access - // History state. Accessing history state can be tricky but we are not gaining - // anything. - let mut plain_accounts_cursor = self.tx.cursor_read::()?; - let mut plain_storage_cursor = self.tx.cursor_dup_read::()?; - - let (state, reverts) = self.populate_bundle_state( - account_changeset, - storage_changeset, - &mut plain_accounts_cursor, - &mut plain_storage_cursor, - )?; - - // iterate over block body and create ExecutionResult - let mut receipt_iter = - self.get::(from_transaction_num..=to_transaction_num)?.into_iter(); - - let mut receipts = Vec::with_capacity(block_bodies.len()); - // loop break if we are at the end of the blocks. - for (_, block_body) in block_bodies { - let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - if let Some((_, receipt)) = receipt_iter.next() { - block_receipts.push(Some(receipt)); - } - } - receipts.push(block_receipts); - } - - Ok(Some(ExecutionOutcome::new_init( - state, - reverts, - Vec::new(), - receipts.into(), - start_block_number, - Vec::new(), - ))) - } - /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the /// [`PlainAccountState`] and [`PlainStorageState`] tables, based on the given storage and /// account changesets. @@ -1214,7 +1146,13 @@ impl BlockNumReader for DatabaseProvider BlockReader for DatabaseProvider { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { if source.is_canonical() { self.block(hash.into()) } else { @@ -1227,39 +1165,44 @@ impl BlockReader for DatabaseProvid /// If the header for this block is not found, this returns `None`. /// If the header is found, but the transactions either do not exist, or are not indexed, this /// will return None. - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { if let Some(number) = self.convert_hash_or_number(id)? { if let Some(header) = self.header_by_number(number)? { - let withdrawals = self.withdrawals_by_block(number.into(), header.timestamp)?; - let ommers = self.ommers(number.into())?.unwrap_or_default(); // If the body indices are not found, this means that the transactions either do not // exist in the database yet, or they do exit but are not indexed. // If they exist but are not indexed, we don't have enough // information to return the block anyways, so we return `None`. - let transactions = match self.transactions_by_block(number.into())? { - Some(transactions) => transactions.into_iter().map(Into::into).collect(), - None => return Ok(None), + let Some(transactions) = self.transactions_by_block(number.into())? else { + return Ok(None) }; - return Ok(Some(Block { - header, - body: BlockBody { transactions, ommers, withdrawals }, - })) + let body = self + .storage + .reader() + .read_block_bodies(self, vec![(&header, transactions)])? + .pop() + .ok_or(ProviderError::InvalidStorageOutput)?; + + return Ok(Some(Self::Block::new(header, body))) } } Ok(None) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(None) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(None) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(None) } @@ -1298,13 +1241,13 @@ impl BlockReader for DatabaseProvid &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders( id, transaction_kind, |block_number| self.header_by_number(block_number), - |header, transactions, senders, ommers, withdrawals| { - Block { header, body: BlockBody { transactions, ommers, withdrawals } } + |header, body, senders| { + Self::Block::new(header, body) // Note: we're using unchecked here because we know the block contains valid txs // wrt to its height and can ignore the s value check so pre // EIP-2 txs are allowed @@ -1319,13 +1262,13 @@ impl BlockReader for DatabaseProvid &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders( id, transaction_kind, |block_number| self.sealed_header(block_number), - |header, transactions, senders, ommers, withdrawals| { - SealedBlock { header, body: BlockBody { transactions, ommers, withdrawals } } + |header, body, senders| { + SealedBlock { header, body } // Note: we're using unchecked here because we know the block contains valid txs // wrt to its height and can ignore the s value check so pre // EIP-2 txs are allowed @@ -1336,34 +1279,23 @@ impl BlockReader for DatabaseProvid ) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - let mut tx_cursor = self.tx.cursor_read::>>()?; + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.block_range( range, |range| self.headers_range(range), - |header, tx_range, ommers, withdrawals| { - let transactions = if tx_range.is_empty() { - Vec::new() - } else { - self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect() - }; - Ok(Block { header, body: BlockBody { transactions, ommers, withdrawals } }) - }, + |header, body, _| Ok(Self::Block::new(header, body)), ) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders_range( range, |range| self.headers_range(range), - |header, transactions, ommers, withdrawals, senders| { - Block { header, body: BlockBody { transactions, ommers, withdrawals } } + |header, body, senders| { + Self::Block::new(header, body) .try_with_senders_unchecked(senders) .map_err(|_| ProviderError::SenderRecoveryError) }, @@ -1373,16 +1305,13 @@ impl BlockReader for DatabaseProvid fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders_range( range, |range| self.sealed_headers_range(range), - |header, transactions, ommers, withdrawals, senders| { - SealedBlockWithSenders::new( - SealedBlock { header, body: BlockBody { transactions, ommers, withdrawals } }, - senders, - ) - .ok_or(ProviderError::SenderRecoveryError) + |header, body, senders| { + SealedBlockWithSenders::new(SealedBlock { header, body }, senders) + .ok_or(ProviderError::SenderRecoveryError) }, ) } @@ -1402,7 +1331,7 @@ impl TransactionsProviderExt tx_range, |static_file, range, _| static_file.transaction_hashes_by_range(range), |tx_range, _| { - let mut tx_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; let tx_range_size = tx_range.clone().count(); let tx_walker = tx_cursor.walk_range(tx_range)?; @@ -1411,12 +1340,15 @@ impl TransactionsProviderExt let mut transaction_count = 0; #[inline] - fn calculate_hash( - entry: Result<(TxNumber, TransactionSignedNoHash), DatabaseError>, + fn calculate_hash( + entry: Result<(TxNumber, T), DatabaseError>, rlp_buf: &mut Vec, - ) -> Result<(B256, TxNumber), Box> { + ) -> Result<(B256, TxNumber), Box> + where + T: Encodable2718, + { let (tx_id, tx) = entry.map_err(|e| Box::new(e.into()))?; - tx.transaction.eip2718_encode(&tx.signature, rlp_buf); + tx.encode_2718(rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } @@ -1603,16 +1535,18 @@ impl TransactionsProvider for Datab } impl ReceiptProvider for DatabaseProvider { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Receipts, id, |static_file| static_file.receipt(id), - || Ok(self.tx.get::(id)?), + || Ok(self.tx.get::>(id)?), ) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { self.receipt(id) } else { @@ -1620,7 +1554,10 @@ impl ReceiptProvider for DatabasePr } } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { if let Some(number) = self.convert_hash_or_number(block)? { if let Some(body) = self.block_body_indices(number)? { let tx_range = body.tx_num_range(); @@ -1637,12 +1574,15 @@ impl ReceiptProvider for DatabasePr fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Receipts, to_range(range), |static_file, range, _| static_file.receipts_by_tx_range(range), - |range, _| self.cursor_read_collect::(range).map_err(Into::into), + |range, _| { + self.cursor_read_collect::>(range) + .map_err(Into::into) + }, |_| true, ) } @@ -1867,9 +1807,77 @@ impl StorageReader for DatabaseProvider } } -impl StateChangeWriter +impl StateWriter for DatabaseProvider { + type Receipt = ReceiptTy; + + fn write_state( + &self, + execution_outcome: ExecutionOutcome, + is_value_known: OriginalValuesKnown, + write_receipts_to: StorageLocation, + ) -> ProviderResult<()> { + let (plain_state, reverts) = + execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); + + self.write_state_reverts(reverts, execution_outcome.first_block)?; + self.write_state_changes(plain_state)?; + + let mut bodies_cursor = self.tx.cursor_read::()?; + + let has_receipts_pruning = self.prune_modes.has_receipts_pruning() || + execution_outcome.receipts.iter().flatten().any(|receipt| receipt.is_none()); + + // Prepare receipts cursor if we are going to write receipts to the database + // + // We are writing to database if requested or if there's any kind of receipt pruning + // configured + let mut receipts_cursor = (write_receipts_to.database() || has_receipts_pruning) + .then(|| self.tx.cursor_write::>()) + .transpose()?; + + // Prepare receipts static writer if we are going to write receipts to static files + // + // We are writing to static files if requested and if there's no receipt pruning configured + let mut receipts_static_writer = (write_receipts_to.static_files() && + !has_receipts_pruning) + .then(|| { + self.static_file_provider + .get_writer(execution_outcome.first_block, StaticFileSegment::Receipts) + }) + .transpose()?; + + for (idx, receipts) in execution_outcome.receipts.into_iter().enumerate() { + let block_number = execution_outcome.first_block + idx as u64; + + // Increment block number for receipts static file writer + if let Some(writer) = receipts_static_writer.as_mut() { + writer.increment_block(block_number)?; + } + + let first_tx_index = bodies_cursor + .seek_exact(block_number)? + .map(|(_, indices)| indices.first_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; + + for (idx, receipt) in receipts.into_iter().enumerate() { + let receipt_idx = first_tx_index + idx as u64; + if let Some(receipt) = receipt { + if let Some(writer) = &mut receipts_static_writer { + writer.append_receipt(receipt_idx, &receipt)?; + } + + if let Some(cursor) = &mut receipts_cursor { + cursor.append(receipt_idx, receipt)?; + } + } + } + } + + Ok(()) + } + fn write_state_reverts( &self, reverts: PlainStateReverts, @@ -2059,9 +2067,15 @@ impl StateChangeWriter /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn remove_state(&self, range: RangeInclusive) -> ProviderResult<()> { + fn remove_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult<()> { + let range = block + 1..=self.last_block_number()?; + if range.is_empty() { - return Ok(()) + return Ok(()); } // We are not removing block meta as it is used to get block changesets. @@ -2070,8 +2084,6 @@ impl StateChangeWriter // get transaction receipts let from_transaction_num = block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); - let to_transaction_num = - block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); let storage_range = BlockNumberAddress::range(range.clone()); @@ -2124,8 +2136,7 @@ impl StateChangeWriter } } - // iterate over block body and remove receipts - self.remove::(from_transaction_num..=to_transaction_num)?; + self.remove_receipts_from(from_transaction_num, block, remove_receipts_from)?; Ok(()) } @@ -2151,7 +2162,13 @@ impl StateChangeWriter /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn take_state(&self, range: RangeInclusive) -> ProviderResult { + fn take_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult> { + let range = block + 1..=self.last_block_number()?; + if range.is_empty() { return Ok(ExecutionOutcome::default()) } @@ -2219,22 +2236,45 @@ impl StateChangeWriter } } - // iterate over block body and create ExecutionResult - let mut receipt_iter = - self.take::(from_transaction_num..=to_transaction_num)?.into_iter(); + // Collect receipts into tuples (tx_num, receipt) to correctly handle pruned receipts + let mut receipts_iter = self + .static_file_provider + .get_range_with_static_file_or_database( + StaticFileSegment::Receipts, + from_transaction_num..to_transaction_num + 1, + |static_file, range, _| { + static_file + .receipts_by_tx_range(range.clone()) + .map(|r| range.into_iter().zip(r).collect()) + }, + |range, _| { + self.tx + .cursor_read::>()? + .walk_range(range)? + .map(|r| r.map_err(Into::into)) + .collect() + }, + |_| true, + )? + .into_iter() + .peekable(); let mut receipts = Vec::with_capacity(block_bodies.len()); // loop break if we are at the end of the blocks. for (_, block_body) in block_bodies { let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - if let Some((_, receipt)) = receipt_iter.next() { - block_receipts.push(Some(receipt)); + for num in block_body.tx_num_range() { + if receipts_iter.peek().is_some_and(|(n, _)| *n == num) { + block_receipts.push(receipts_iter.next().map(|(_, r)| r)); + } else { + block_receipts.push(None); } } receipts.push(block_receipts); } + self.remove_receipts_from(from_transaction_num, block, remove_receipts_from)?; + Ok(ExecutionOutcome::new_init( state, reverts, @@ -2406,7 +2446,7 @@ impl HashingWriter for DatabaseProvi // Apply values to HashedState, and remove the account if it's None. let mut hashed_storage_keys: HashMap> = - HashMap::with_capacity(hashed_storages.len()); + HashMap::with_capacity_and_hasher(hashed_storages.len(), Default::default()); let mut hashed_storage = self.tx.cursor_dup_write::()?; for (hashed_address, key, value) in hashed_storages.into_iter().rev() { hashed_storage_keys.entry(hashed_address).or_default().insert(key); @@ -2692,32 +2732,26 @@ impl HistoryWriter for DatabaseProvi } } -impl StateReader for DatabaseProvider { - fn get_state(&self, block: BlockNumber) -> ProviderResult> { - self.get_state(block..=block) - } -} - impl BlockExecutionWriter for DatabaseProvider { fn take_block_and_execution_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, - ) -> ProviderResult { + remove_from: StorageLocation, + ) -> ProviderResult> { let range = block + 1..=self.last_block_number()?; self.unwind_trie_state_range(range.clone())?; // get execution res - let execution_state = self.take_state(range.clone())?; + let execution_state = self.take_state_above(block, remove_from)?; let blocks = self.sealed_block_with_senders_range(range)?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove_blocks_above(block, remove_transactions_from)?; + self.remove_blocks_above(block, remove_from)?; // Update pipeline progress self.update_pipeline_stages(block, true)?; @@ -2728,18 +2762,18 @@ impl BlockExecu fn remove_block_and_execution_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult<()> { let range = block + 1..=self.last_block_number()?; - self.unwind_trie_state_range(range.clone())?; + self.unwind_trie_state_range(range)?; // remove execution res - self.remove_state(range)?; + self.remove_state_above(block, remove_from)?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove_blocks_above(block, remove_transactions_from)?; + self.remove_blocks_above(block, remove_from)?; // Update pipeline progress self.update_pipeline_stages(block, true)?; @@ -2751,7 +2785,8 @@ impl BlockExecu impl BlockWriter for DatabaseProvider { - type Body = <::Block as reth_primitives_traits::Block>::Body; + type Block = BlockTy; + type Receipt = ReceiptTy; /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) @@ -2775,23 +2810,13 @@ impl BlockWrite /// [`TransactionHashNumbers`](tables::TransactionHashNumbers). fn insert_block( &self, - block: SealedBlockWithSenders, - write_transactions_to: StorageLocation, + block: SealedBlockWithSenders, + write_to: StorageLocation, ) -> ProviderResult { let block_number = block.number; let mut durations_recorder = metrics::DurationsRecorder::default(); - self.tx.put::(block_number, block.hash())?; - durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); - - // Put header with canonical hashes. - self.tx.put::(block_number, block.header.as_ref().clone())?; - durations_recorder.record_relative(metrics::Action::InsertHeaders); - - self.tx.put::(block.hash(), block_number)?; - durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); - // total difficulty let ttd = if block_number == 0 { block.difficulty @@ -2802,8 +2827,26 @@ impl BlockWrite parent_ttd + block.difficulty }; - self.tx.put::(block_number, ttd.into())?; - durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties); + if write_to.database() { + self.tx.put::(block_number, block.hash())?; + durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); + + // Put header with canonical hashes. + self.tx.put::(block_number, block.header.as_ref().clone())?; + durations_recorder.record_relative(metrics::Action::InsertHeaders); + + self.tx.put::(block_number, ttd.into())?; + durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties); + } + + if write_to.static_files() { + let mut writer = + self.static_file_provider.get_writer(block_number, StaticFileSegment::Headers)?; + writer.append_header(&block.header, ttd, &block.hash())?; + } + + self.tx.put::(block.hash(), block_number)?; + durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); let mut next_tx_num = self .tx @@ -2832,10 +2875,7 @@ impl BlockWrite next_tx_num += 1; } - self.append_block_bodies( - vec![(block_number, Some(block.block.body))], - write_transactions_to, - )?; + self.append_block_bodies(vec![(block_number, Some(block.block.body))], write_to)?; debug!( target: "providers::db", @@ -2849,7 +2889,7 @@ impl BlockWrite fn append_block_bodies( &self, - bodies: Vec<(BlockNumber, Option)>, + bodies: Vec<(BlockNumber, Option>)>, write_transactions_to: StorageLocation, ) -> ProviderResult<()> { let Some(from_block) = bodies.first().map(|(block, _)| *block) else { return Ok(()) }; @@ -2868,14 +2908,10 @@ impl BlockWrite // Initialize cursor if we will be writing transactions to database let mut tx_cursor = write_transactions_to .database() - .then(|| { - self.tx.cursor_write::::Transaction, - >>() - }) + .then(|| self.tx.cursor_write::>>()) .transpose()?; - // Get id for the next tx_num of zero if there are no transactions. + // Get id for the next tx_num or zero if there are no transactions. let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); for (block_number, body) in &bodies { @@ -2963,7 +2999,7 @@ impl BlockWrite .1 .last_tx_num(); - if unwind_tx_from < unwind_tx_to { + if unwind_tx_from <= unwind_tx_to { for (hash, _) in self.transaction_hashes_by_range(unwind_tx_from..(unwind_tx_to + 1))? { self.tx.delete::(hash, None)?; } @@ -2994,7 +3030,7 @@ impl BlockWrite self.remove::(unwind_tx_from..)?; if remove_transactions_from.database() { - self.remove::(unwind_tx_from..)?; + self.remove::>>(unwind_tx_from..)?; } if remove_transactions_from.static_files() { @@ -3017,8 +3053,8 @@ impl BlockWrite /// TODO(joshie): this fn should be moved to `UnifiedStorageWriter` eventually fn append_blocks_with_state( &self, - blocks: Vec>, - execution_outcome: ExecutionOutcome, + blocks: Vec>, + execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()> { @@ -3040,12 +3076,7 @@ impl BlockWrite durations_recorder.record_relative(metrics::Action::InsertBlock); } - // Write state and changesets to the database. - // Must be written after blocks because of the receipt lookup. - // TODO: should _these_ be moved to storagewriter? seems like storagewriter should be - // _above_ db provider - let mut storage_writer = UnifiedStorageWriter::from_database(self); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + self.write_state(execution_outcome, OriginalValuesKnown::No, StorageLocation::Database)?; durations_recorder.record_relative(metrics::Action::InsertState); // insert hashes and intermediate merkle nodes @@ -3093,7 +3124,7 @@ impl PruneCheckpointWriter for DatabaseProvider StatsReader for DatabaseProvider { +impl StatsReader for DatabaseProvider { fn count_entries(&self) -> ProviderResult { let db_entries = self.tx.entries::()?; let static_file_entries = match self.static_file_provider.count_entries::() { diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 68d1a168f150..6631b5b1b31a 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -3,9 +3,10 @@ use crate::{ BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, - ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, + NodePrimitivesProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, + ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, + WithdrawalsProvider, }; use alloy_consensus::Header; use alloy_eips::{ @@ -23,10 +24,10 @@ use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, FullNodePrimitives, NodeTypes, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - TransactionMeta, TransactionSigned, + Account, BlockWithSenders, EthPrimitives, Receipt, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -77,7 +78,9 @@ where ChainSpec: EthereumHardforks, Storage: ChainStorage, Primitives: FullNodePrimitives< - SignedTx: Value + From + Into, + SignedTx: Value, + Receipt: Value, + BlockHeader = alloy_consensus::Header, >, >, { @@ -88,7 +91,9 @@ impl NodeTypesForProvider for T where ChainSpec: EthereumHardforks, Storage: ChainStorage, Primitives: FullNodePrimitives< - SignedTx: Value + From + Into, + SignedTx: Value, + Receipt: Value, + BlockHeader = alloy_consensus::Header, >, > { @@ -100,9 +105,18 @@ where Self: NodeTypesForProvider + NodeTypesWithDB, { } - impl ProviderNodeTypes for T where T: NodeTypesForProvider + NodeTypesWithDB {} +/// A helper trait with requirements for [`NodeTypesForProvider`] to be used within legacy +/// blockchain tree. +pub trait NodeTypesForTree: NodeTypesForProvider {} +impl NodeTypesForTree for T where T: NodeTypesForProvider {} + +/// Helper trait with requirements for [`ProviderNodeTypes`] to be used within legacy blockchain +/// tree. +pub trait TreeNodeTypes: ProviderNodeTypes + NodeTypesForTree {} +impl TreeNodeTypes for T where T: ProviderNodeTypes + NodeTypesForTree {} + /// The main type for interacting with the blockchain. /// /// This type serves as the main entry point for interacting with the blockchain and provides data @@ -113,9 +127,9 @@ pub struct BlockchainProvider { /// Provider type used to access the database. database: ProviderFactory, /// The blockchain tree instance. - tree: Arc, + tree: Arc>, /// Tracks the chain info wrt forkchoice updates - chain_info: ChainInfoTracker, + chain_info: ChainInfoTracker, } impl Clone for BlockchainProvider { @@ -131,7 +145,7 @@ impl Clone for BlockchainProvider { impl BlockchainProvider { /// Sets the treeviewer for the provider. #[doc(hidden)] - pub fn with_tree(mut self, tree: Arc) -> Self { + pub fn with_tree(mut self, tree: Arc>) -> Self { self.tree = tree; self } @@ -143,7 +157,7 @@ impl BlockchainProvider { /// if it exists. pub fn with_blocks( database: ProviderFactory, - tree: Arc, + tree: Arc>, latest: SealedHeader, finalized: Option, safe: Option, @@ -153,7 +167,10 @@ impl BlockchainProvider { /// Create a new provider using only the database and the tree, fetching the latest header from /// the database to initialize the provider. - pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { + pub fn new( + database: ProviderFactory, + tree: Arc>, + ) -> ProviderResult { let provider = database.provider()?; let best = provider.chain_info()?; let latest_header = provider @@ -220,6 +237,10 @@ where } } +impl NodePrimitivesProvider for BlockchainProvider { + type Primitives = N::Primitives; +} + impl DatabaseProviderFactory for BlockchainProvider { type DB = N::DB; type Provider = as DatabaseProviderFactory>::Provider; @@ -235,8 +256,6 @@ impl DatabaseProviderFactory for BlockchainProvider { } impl StaticFileProviderFactory for BlockchainProvider { - type Primitives = N::Primitives; - fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } @@ -329,8 +348,14 @@ impl BlockIdReader for BlockchainProvider { } } -impl BlockReader for BlockchainProvider { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { +impl BlockReader for BlockchainProvider { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { let block = match source { BlockSource::Any => { // check database first @@ -349,22 +374,26 @@ impl BlockReader for BlockchainProvider { Ok(block) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { match id { BlockHashOrNumber::Hash(hash) => self.find_block_by_hash(hash, BlockSource::Any), BlockHashOrNumber::Number(num) => self.database.block_by_number(num), } } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.tree.pending_block()) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(self.tree.pending_block_with_senders()) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(self.tree.pending_block_and_receipts()) } @@ -389,7 +418,7 @@ impl BlockReader for BlockchainProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.block_with_senders(id, transaction_kind) } @@ -397,25 +426,25 @@ impl BlockReader for BlockchainProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.database.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.sealed_block_with_senders_range(range) } } @@ -487,27 +516,32 @@ impl TransactionsProvider for BlockchainProvider { } impl ReceiptProvider for BlockchainProvider { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.database.receipt(id) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.database.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.database.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.receipts_by_tx_range(range) } } -impl ReceiptProviderIdExt for BlockchainProvider { +impl ReceiptProviderIdExt for BlockchainProvider { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { BlockId::Hash(rpc_block_hash) => { @@ -844,7 +878,7 @@ impl BlockReaderIdExt for BlockchainProvider where Self: BlockReader + ReceiptProviderIdExt, { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), BlockId::Hash(hash) => { @@ -929,7 +963,7 @@ impl BlockchainTreePendingStateProvider for BlockchainProv } impl CanonStateSubscriptions for BlockchainProvider { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.tree.subscribe_to_canonical_state() } } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 39a71b924431..98bfd47aefb8 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -15,13 +15,16 @@ use reth_db_api::{ transaction::DbTx, }; use reth_primitives::{Account, Bytecode}; -use reth_storage_api::{BlockNumReader, DBProvider, StateProofProvider, StorageRootProvider}; +use reth_storage_api::{ + BlockNumReader, DBProvider, StateCommitmentProvider, StateProofProvider, StorageRootProvider, +}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageMultiProof, + StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, @@ -58,7 +61,9 @@ pub enum HistoryInfo { MaybeInPlainState, } -impl<'b, Provider: DBProvider + BlockNumReader> HistoricalStateProviderRef<'b, Provider> { +impl<'b, Provider: DBProvider + BlockNumReader + StateCommitmentProvider> + HistoricalStateProviderRef<'b, Provider> +{ /// Create new `StateProvider` for historical block number pub fn new(provider: &'b Provider, block_number: BlockNumber) -> Self { Self { provider, block_number, lowest_available_blocks: Default::default() } @@ -239,7 +244,7 @@ impl HistoricalStateProviderRef<'_, Provi } } -impl AccountReader +impl AccountReader for HistoricalStateProviderRef<'_, Provider> { /// Get basic account information. @@ -280,7 +285,7 @@ impl BlockHashReader } } -impl StateRootProvider +impl StateRootProvider for HistoricalStateProviderRef<'_, Provider> { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { @@ -316,7 +321,7 @@ impl StateRootProvider } } -impl StorageRootProvider +impl StorageRootProvider for HistoricalStateProviderRef<'_, Provider> { fn storage_root( @@ -341,9 +346,21 @@ impl StorageRootProvider StorageProof::overlay_storage_proof(self.tx(), address, slot, revert_storage) .map_err(Into::::into) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + let mut revert_storage = self.revert_storage(address)?; + revert_storage.extend(&hashed_storage); + StorageProof::overlay_storage_multiproof(self.tx(), address, slots, revert_storage) + .map_err(Into::::into) + } } -impl StateProofProvider +impl StateProofProvider for HistoricalStateProviderRef<'_, Provider> { /// Get account and storage proofs. @@ -377,8 +394,8 @@ impl StateProofProvider } } -impl StateProvider - for HistoricalStateProviderRef<'_, Provider> +impl + StateProvider for HistoricalStateProviderRef<'_, Provider> { /// Get storage. fn storage( @@ -428,7 +445,9 @@ pub struct HistoricalStateProvider { lowest_available_blocks: LowestAvailableBlocks, } -impl HistoricalStateProvider { +impl + HistoricalStateProvider +{ /// Create new `StateProvider` for historical block number pub fn new(provider: Provider, block_number: BlockNumber) -> Self { Self { provider, block_number, lowest_available_blocks: Default::default() } @@ -464,7 +483,7 @@ impl HistoricalStateProvider { } // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader]); +delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + StateCommitmentProvider]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -508,7 +527,10 @@ mod tests { transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, StorageEntry}; - use reth_storage_api::{BlockHashReader, BlockNumReader, DBProvider, DatabaseProviderFactory}; + use reth_storage_api::{ + BlockHashReader, BlockNumReader, DBProvider, DatabaseProviderFactory, + StateCommitmentProvider, + }; use reth_storage_errors::provider::ProviderError; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); @@ -517,7 +539,9 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_historical_state_provider() { + const fn assert_historical_state_provider< + T: DBProvider + BlockNumReader + BlockHashReader + StateCommitmentProvider, + >() { assert_state_provider::>(); } diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 297217acece7..a2ec4972d105 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -9,13 +9,16 @@ use alloy_primitives::{ use reth_db::tables; use reth_db_api::{cursor::DbDupCursorRO, transaction::DbTx}; use reth_primitives::{Account, Bytecode}; -use reth_storage_api::{DBProvider, StateProofProvider, StorageRootProvider}; +use reth_storage_api::{ + DBProvider, StateCommitmentProvider, StateProofProvider, StorageRootProvider, +}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageMultiProof, + StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, @@ -61,7 +64,9 @@ impl BlockHashReader for LatestStateProviderRef<'_, P } } -impl StateRootProvider for LatestStateProviderRef<'_, Provider> { +impl StateRootProvider + for LatestStateProviderRef<'_, Provider> +{ fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { StateRoot::overlay_root(self.tx(), hashed_state) .map_err(|err| ProviderError::Database(err.into())) @@ -89,7 +94,9 @@ impl StateRootProvider for LatestStateProviderRef<'_, Prov } } -impl StorageRootProvider for LatestStateProviderRef<'_, Provider> { +impl StorageRootProvider + for LatestStateProviderRef<'_, Provider> +{ fn storage_root( &self, address: Address, @@ -108,9 +115,21 @@ impl StorageRootProvider for LatestStateProviderRef<'_, Pr StorageProof::overlay_storage_proof(self.tx(), address, slot, hashed_storage) .map_err(Into::::into) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + StorageProof::overlay_storage_multiproof(self.tx(), address, slots, hashed_storage) + .map_err(Into::::into) + } } -impl StateProofProvider for LatestStateProviderRef<'_, Provider> { +impl StateProofProvider + for LatestStateProviderRef<'_, Provider> +{ fn proof( &self, input: TrieInput, @@ -138,7 +157,7 @@ impl StateProofProvider for LatestStateProviderRef<'_, Pro } } -impl StateProvider +impl StateProvider for LatestStateProviderRef<'_, Provider> { /// Get storage. @@ -180,7 +199,7 @@ impl LatestStateProvider { } // Delegates all provider impls to [LatestStateProviderRef] -delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader]); +delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader + StateCommitmentProvider]); #[cfg(test)] mod tests { @@ -188,7 +207,9 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_latest_state_provider() { + const fn assert_latest_state_provider< + T: DBProvider + BlockHashReader + StateCommitmentProvider, + >() { assert_state_provider::>(); } } diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index b90924354c43..f2648fb15e6a 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -50,6 +50,7 @@ macro_rules! delegate_provider_impls { StorageRootProvider $(where [$($generics)*])? { fn storage_root(&self, address: alloy_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; fn storage_proof(&self, address: alloy_primitives::Address, slot: alloy_primitives::B256, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; + fn storage_multiproof(&self, address: alloy_primitives::Address, slots: &[alloy_primitives::B256], storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; } StateProofProvider $(where [$($generics)*])? { fn proof(&self, input: reth_trie::TrieInput, address: alloy_primitives::Address, slots: &[alloy_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index e04d46312f67..659b093d9d6a 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -18,7 +18,7 @@ use reth_db::{ table::Decompress, }; use reth_node_types::NodePrimitives; -use reth_primitives::{transaction::recover_signers, Receipt, SealedHeader, TransactionMeta}; +use reth_primitives::{transaction::recover_signers, SealedHeader, TransactionMeta}; use reth_primitives_traits::SignedTransaction; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ @@ -300,14 +300,16 @@ impl> TransactionsPr } } -impl> ReceiptProvider - for StaticFileJarProvider<'_, N> +impl> + ReceiptProvider for StaticFileJarProvider<'_, N> { - fn receipt(&self, num: TxNumber) -> ProviderResult> { - self.cursor()?.get_one::>(num.into()) + type Receipt = N::Receipt; + + fn receipt(&self, num: TxNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(tx_static_file) = &self.auxiliary_jar { if let Some(num) = tx_static_file.transaction_id(hash)? { return self.receipt(num) @@ -316,7 +318,10 @@ impl> ReceiptProvide Ok(None) } - fn receipts_by_block(&self, _block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { // Related to indexing tables. StaticFile should get the tx_range and call static file // provider with `receipt()` instead for each Err(ProviderError::UnsupportedProvider) @@ -325,13 +330,13 @@ impl> ReceiptProvide fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut receipts = Vec::with_capacity((range.end - range.start) as usize); for num in range { - if let Some(tx) = cursor.get_one::>(num.into())? { + if let Some(tx) = cursor.get_one::>(num.into())? { receipts.push(tx) } } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 14821fde547d..3b49f8d401f9 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -31,14 +31,14 @@ use reth_db_api::{ cursor::DbCursorRO, models::StoredBlockBodyIndices, table::Table, transaction::DbTx, }; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; -use reth_node_types::NodePrimitives; +use reth_node_types::{FullNodePrimitives, NodePrimitives}; use reth_primitives::{ static_file::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }, transaction::recover_signers, - Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, TransactionSignedNoHash, }; use reth_primitives_traits::SignedTransaction; @@ -1341,10 +1341,12 @@ impl BlockHashReader for StaticFileProvider { } } -impl> ReceiptProvider +impl> ReceiptProvider for StaticFileProvider { - fn receipt(&self, num: TxNumber) -> ProviderResult> { + type Receipt = N::Receipt; + + fn receipt(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Receipts, num, None) .and_then(|provider| provider.receipt(num)) .or_else(|err| { @@ -1356,31 +1358,34 @@ impl> ReceiptProvider }) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(num) = self.transaction_id(hash)? { return self.receipt(num) } Ok(None) } - fn receipts_by_block(&self, _block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { unreachable!() } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.fetch_range_with_predicate( StaticFileSegment::Receipts, to_range(range), - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::>(number.into()), |_| true, ) } } -impl> TransactionsProviderExt +impl> TransactionsProviderExt for StaticFileProvider { fn transaction_hashes_by_range( @@ -1417,7 +1422,7 @@ impl> TransactionsProvide chunk_range, |cursor, number| { Ok(cursor - .get_one::>(number.into())? + .get_one::>(number.into())? .map(|transaction| { rlp_buf.clear(); let _ = channel_tx @@ -1577,32 +1582,38 @@ impl BlockNumReader for StaticFileProvider { } } -impl> BlockReader for StaticFileProvider { +impl> BlockReader for StaticFileProvider { + type Block = N::Block; + fn find_block_by_hash( &self, _hash: B256, _source: BlockSource, - ) -> ProviderResult> { + ) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1621,7 +1632,7 @@ impl> BlockReader for Sta &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1630,12 +1641,12 @@ impl> BlockReader for Sta &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1643,14 +1654,14 @@ impl> BlockReader for Sta fn block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { Err(ProviderError::UnsupportedProvider) } fn sealed_block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { Err(ProviderError::UnsupportedProvider) } } @@ -1681,7 +1692,7 @@ impl StatsReader for StaticFileProvider { .map(|block| block + 1) .unwrap_or_default() as usize), - tables::Receipts::NAME => Ok(self + tables::Receipts::::NAME => Ok(self .get_highest_static_file_tx(StaticFileSegment::Receipts) .map(|receipts| receipts + 1) .unwrap_or_default() as usize), @@ -1697,11 +1708,14 @@ impl StatsReader for StaticFileProvider { /// Calculates the tx hash for the given transaction and its id. #[inline] -fn calculate_hash( - entry: (TxNumber, TransactionSignedNoHash), +fn calculate_hash( + entry: (TxNumber, T), rlp_buf: &mut Vec, -) -> Result<(B256, TxNumber), Box> { +) -> Result<(B256, TxNumber), Box> +where + T: Encodable2718, +{ let (tx_id, tx) = entry; - tx.transaction.eip2718_encode(&tx.signature, rlp_buf); + tx.encode_2718(rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 673451de65f1..71c6bf755e22 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -172,7 +172,7 @@ mod tests { // [ Headers Creation and Commit ] { - let sf_rw = StaticFileProvider::<()>::read_write(&static_dir) + let sf_rw = StaticFileProvider::::read_write(&static_dir) .expect("Failed to create static file provider") .with_custom_blocks_per_file(blocks_per_file); @@ -191,8 +191,8 @@ mod tests { // Helper function to prune headers and validate truncation results fn prune_and_validate( - writer: &mut StaticFileProviderRWRefMut<'_, ()>, - sf_rw: &StaticFileProvider<()>, + writer: &mut StaticFileProviderRWRefMut<'_, EthPrimitives>, + sf_rw: &StaticFileProvider, static_dir: impl AsRef, prune_count: u64, expected_tip: Option, diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 83954bde3521..6f5335ec6657 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -585,7 +585,10 @@ impl StaticFileProviderRW { /// empty blocks and this function wouldn't be called. /// /// Returns the current [`TxNumber`] as seen in the static file. - pub fn append_receipt(&mut self, tx_num: TxNumber, receipt: &Receipt) -> ProviderResult<()> { + pub fn append_receipt(&mut self, tx_num: TxNumber, receipt: &N::Receipt) -> ProviderResult<()> + where + N::Receipt: Compact, + { let start = Instant::now(); self.ensure_no_queued_prune()?; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 8924ee55b5b2..1d2ddfacaa01 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -26,14 +26,15 @@ use reth_primitives::{ Account, Block, BlockWithSenders, Bytecode, EthPrimitives, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, }; +use reth_primitives_traits::SignedTransaction; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ DatabaseProviderFactory, StageCheckpointReader, StateProofProvider, StorageRootProvider, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, - TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, StorageProof, TrieInput, }; use reth_trie_db::MerklePatriciaTrie; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; @@ -389,6 +390,8 @@ impl TransactionsProvider for MockEthProvider { } impl ReceiptProvider for MockEthProvider { + type Receipt = Receipt; + fn receipt(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } @@ -481,6 +484,8 @@ impl BlockIdReader for MockEthProvider { } impl BlockReader for MockEthProvider { + type Block = Block; + fn find_block_by_hash( &self, hash: B256, @@ -648,6 +653,15 @@ impl StorageRootProvider for MockEthProvider { ) -> ProviderResult { Ok(StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(StorageMultiProof::empty()) + } } impl StateProofProvider for MockEthProvider { @@ -827,6 +841,8 @@ impl ChangeSetReader for MockEthProvider { } impl StateReader for MockEthProvider { + type Receipt = Receipt; + fn get_state(&self, _block: BlockNumber) -> ProviderResult> { Ok(None) } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 9a88c8c9ab74..ff6b3fccbe10 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -22,12 +22,12 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, + Account, Block, BlockWithSenders, Bytecode, EthPrimitives, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_storage_api::{NodePrimitivesProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, @@ -92,6 +92,8 @@ impl BlockNumReader for NoopProvider { } impl BlockReader for NoopProvider { + type Block = Block; + fn find_block_by_hash( &self, hash: B256, @@ -258,6 +260,7 @@ impl TransactionsProvider for NoopProvider { } impl ReceiptProvider for NoopProvider { + type Receipt = Receipt; fn receipt(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } @@ -370,6 +373,15 @@ impl StorageRootProvider for NoopProvider { ) -> ProviderResult { Ok(reth_trie::StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(reth_trie::StorageMultiProof::empty()) + } } impl StateProofProvider for NoopProvider { @@ -557,9 +569,11 @@ impl PruneCheckpointReader for NoopProvider { } } -impl StaticFileProviderFactory for NoopProvider { - type Primitives = (); +impl NodePrimitivesProvider for NoopProvider { + type Primitives = EthPrimitives; +} +impl StaticFileProviderFactory for NoopProvider { fn static_file_provider(&self) -> StaticFileProvider { StaticFileProvider::read_only(PathBuf::default(), false).unwrap() } diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index c2ce477051d4..d12f240e6164 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,8 +1,9 @@ -use alloy_consensus::Header; use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_node_types::NodePrimitives; use reth_primitives::SealedBlockWithSenders; +use reth_storage_api::NodePrimitivesProvider; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; @@ -29,50 +30,86 @@ impl StorageLocation { } } -/// BlockExecution Writer -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait BlockExecutionWriter: BlockWriter + Send + Sync { +/// `BlockExecution` Writer +pub trait BlockExecutionWriter: + NodePrimitivesProvider> + BlockWriter + Send + Sync +{ /// Take all of the blocks above the provided number and their execution result /// /// The passed block number will stay in the database. + /// + /// Accepts [`StorageLocation`] specifying from where should transactions and receipts be + /// removed. fn take_block_and_execution_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, - ) -> ProviderResult; + remove_from: StorageLocation, + ) -> ProviderResult>; /// Remove all of the blocks above the provided number and their execution result /// /// The passed block number will stay in the database. + /// + /// Accepts [`StorageLocation`] specifying from where should transactions and receipts be + /// removed. fn remove_block_and_execution_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult<()>; } +impl BlockExecutionWriter for &T { + fn take_block_and_execution_above( + &self, + block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult> { + (*self).take_block_and_execution_above(block, remove_from) + } + + fn remove_block_and_execution_above( + &self, + block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult<()> { + (*self).remove_block_and_execution_above(block, remove_from) + } +} + /// This just receives state, or [`ExecutionOutcome`], from the provider #[auto_impl::auto_impl(&, Arc, Box)] pub trait StateReader: Send + Sync { + /// Receipt type in [`ExecutionOutcome`]. + type Receipt: Send + Sync; + /// Get the [`ExecutionOutcome`] for the given block - fn get_state(&self, block: BlockNumber) -> ProviderResult>; + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>>; } /// Block Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockWriter: Send + Sync { /// The body this writer can write. - type Body: Send + Sync; + type Block: reth_primitives_traits::Block; + /// The receipt type for [`ExecutionOutcome`]. + type Receipt: Send + Sync; /// Insert full block and make it canonical. Parent tx num and transition id is taken from /// parent block in database. /// /// Return [StoredBlockBodyIndices] that contains indices of the first and last transactions and /// transition in the block. + /// + /// Accepts [`StorageLocation`] value which specifies where transactions and headers should be + /// written. fn insert_block( &self, - block: SealedBlockWithSenders, - write_transactions_to: StorageLocation, + block: SealedBlockWithSenders, + write_to: StorageLocation, ) -> ProviderResult; /// Appends a batch of block bodies extending the canonical chain. This is invoked during @@ -82,7 +119,7 @@ pub trait BlockWriter: Send + Sync { /// Bodies are passed as [`Option`]s, if body is `None` the corresponding block is empty. fn append_block_bodies( &self, - bodies: Vec<(BlockNumber, Option)>, + bodies: Vec<(BlockNumber, Option<::Body>)>, write_transactions_to: StorageLocation, ) -> ProviderResult<()>; @@ -118,8 +155,8 @@ pub trait BlockWriter: Send + Sync { /// Returns `Ok(())` on success, or an error if any operation fails. fn append_blocks_with_state( &self, - blocks: Vec>, - execution_outcome: ExecutionOutcome, + blocks: Vec>, + execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()>; diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 9bb357e33a3c..0d28f83739b0 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -7,13 +7,15 @@ use crate::{ }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_chainspec::EthereumHardforks; -use reth_node_types::{NodeTypesWithDB, TxTy}; +use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_storage_api::NodePrimitivesProvider; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory - + StaticFileProviderFactory - + BlockReaderIdExt> + + NodePrimitivesProvider + + StaticFileProviderFactory + + BlockReaderIdExt, Block = BlockTy, Receipt = ReceiptTy> + AccountReader + StateProviderFactory + EvmEnvProvider @@ -30,8 +32,9 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory - + StaticFileProviderFactory - + BlockReaderIdExt> + + NodePrimitivesProvider + + StaticFileProviderFactory + + BlockReaderIdExt, Block = BlockTy, Receipt = ReceiptTy> + AccountReader + StateProviderFactory + EvmEnvProvider diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index a772204d0c19..d82e97d1db79 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -13,7 +13,7 @@ mod header_sync_gap; pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider}; mod state; -pub use state::{StateChangeWriter, StateWriter}; +pub use state::StateWriter; pub use reth_chainspec::ChainSpecProvider; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 3d62b1886e88..2c4ee2cfa8d3 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -6,21 +6,23 @@ use revm::db::{ states::{PlainStateReverts, StateChangeset}, OriginalValuesKnown, }; -use std::ops::RangeInclusive; -/// A helper trait for [`ExecutionOutcome`] to write state and receipts to storage. +use super::StorageLocation; + +/// A trait specifically for writing state changes or reverts pub trait StateWriter { - /// Write the data and receipts to the database or static files if `static_file_producer` is + /// Receipt type included into [`ExecutionOutcome`]. + type Receipt; + + /// Write the state and receipts to the database or static files if `static_file_producer` is /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. - fn write_to_storage( - &mut self, - execution_outcome: ExecutionOutcome, + fn write_state( + &self, + execution_outcome: ExecutionOutcome, is_value_known: OriginalValuesKnown, + write_receipts_to: StorageLocation, ) -> ProviderResult<()>; -} -/// A trait specifically for writing state changes or reverts -pub trait StateChangeWriter { /// Write state reverts to the database. /// /// NOTE: Reverts will delete all wiped storage from plain state. @@ -36,9 +38,19 @@ pub trait StateChangeWriter { /// Writes the hashed state changes to the database fn write_hashed_state(&self, hashed_state: &HashedPostStateSorted) -> ProviderResult<()>; - /// Remove the block range of state. - fn remove_state(&self, range: RangeInclusive) -> ProviderResult<()>; + /// Remove the block range of state above the given block. The state of the passed block is not + /// removed. + fn remove_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult<()>; - /// Take the block range of state, recreating the [`ExecutionOutcome`]. - fn take_state(&self, range: RangeInclusive) -> ProviderResult; + /// Take the block range of state, recreating the [`ExecutionOutcome`]. The state of the passed + /// block is not removed. + fn take_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult>; } diff --git a/crates/storage/provider/src/traits/static_file_provider.rs b/crates/storage/provider/src/traits/static_file_provider.rs index d465121fb46c..9daab7e5a8f9 100644 --- a/crates/storage/provider/src/traits/static_file_provider.rs +++ b/crates/storage/provider/src/traits/static_file_provider.rs @@ -1,12 +1,9 @@ -use reth_node_types::NodePrimitives; +use reth_storage_api::NodePrimitivesProvider; use crate::providers::StaticFileProvider; /// Static file provider factory. -pub trait StaticFileProviderFactory { - /// The network primitives type [`StaticFileProvider`] is using. - type Primitives: NodePrimitives; - +pub trait StaticFileProviderFactory: NodePrimitivesProvider { /// Create new instance of static file provider. fn static_file_provider(&self) -> StaticFileProvider; } diff --git a/crates/storage/provider/src/writer/database.rs b/crates/storage/provider/src/writer/database.rs deleted file mode 100644 index 1436fb8a6ab9..000000000000 --- a/crates/storage/provider/src/writer/database.rs +++ /dev/null @@ -1,29 +0,0 @@ -use alloy_primitives::{BlockNumber, TxNumber}; -use reth_db::{ - cursor::{DbCursorRO, DbCursorRW}, - tables, -}; -use reth_errors::ProviderResult; -use reth_primitives::Receipt; -use reth_storage_api::ReceiptWriter; - -pub(crate) struct DatabaseWriter<'a, W>(pub(crate) &'a mut W); - -impl ReceiptWriter for DatabaseWriter<'_, W> -where - W: DbCursorRO + DbCursorRW, -{ - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - _: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()> { - for (tx_idx, receipt) in receipts.into_iter().enumerate() { - if let Some(receipt) = receipt { - self.0.append(first_tx_index + tx_idx as u64, receipt)?; - } - } - Ok(()) - } -} diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 88034d82794c..459b5bcad972 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -1,38 +1,19 @@ use crate::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter as SfWriter}, - writer::static_file::StaticFileWriter, - BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, - StaticFileProviderFactory, StorageLocation, TrieWriter, + providers::{StaticFileProvider, StaticFileWriter as SfWriter}, + BlockExecutionWriter, BlockWriter, HistoryWriter, StateWriter, StaticFileProviderFactory, + StorageLocation, TrieWriter, }; -use alloy_consensus::Header; -use alloy_primitives::{BlockNumber, B256, U256}; +use alloy_consensus::BlockHeader; use reth_chain_state::ExecutedBlock; -use reth_db::{ - cursor::DbCursorRO, - models::CompactU256, - tables, - transaction::{DbTx, DbTxMut}, -}; -use reth_errors::{ProviderError, ProviderResult}; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment}; -use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{ - DBProvider, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, -}; +use reth_db::transaction::{DbTx, DbTxMut}; +use reth_errors::ProviderResult; +use reth_primitives::{NodePrimitives, StaticFileSegment}; +use reth_primitives_traits::SignedTransaction; +use reth_storage_api::{DBProvider, StageCheckpointWriter, TransactionsProviderExt}; use reth_storage_errors::writer::UnifiedStorageWriterError; use revm::db::OriginalValuesKnown; -use std::{borrow::Borrow, sync::Arc}; -use tracing::{debug, instrument}; - -mod database; -mod static_file; -use database::DatabaseWriter; - -enum StorageType { - Database(C), - StaticFile(S), -} +use std::sync::Arc; +use tracing::debug; /// [`UnifiedStorageWriter`] is responsible for managing the writing to storage with both database /// and static file providers. @@ -85,14 +66,6 @@ impl<'a, ProviderDB, ProviderSF> UnifiedStorageWriter<'a, ProviderDB, ProviderSF self.static_file.as_ref().expect("should exist") } - /// Returns a mutable reference to the static file instance. - /// - /// # Panics - /// If the static file instance is not set. - fn static_file_mut(&mut self) -> &mut ProviderSF { - self.static_file.as_mut().expect("should exist") - } - /// Ensures that the static file instance is set. /// /// # Returns @@ -148,10 +121,10 @@ impl UnifiedStorageWriter<'_, (), ()> { impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> where ProviderDB: DBProvider - + BlockWriter + + BlockWriter + TransactionsProviderExt - + StateChangeWriter + TrieWriter + + StateWriter + HistoryWriter + StageCheckpointWriter + BlockExecutionWriter @@ -159,7 +132,11 @@ where + StaticFileProviderFactory, { /// Writes executed blocks and receipts to storage. - pub fn save_blocks(&self, blocks: &[ExecutedBlock]) -> ProviderResult<()> { + pub fn save_blocks(&self, blocks: Vec>) -> ProviderResult<()> + where + N: NodePrimitives, + ProviderDB: BlockWriter + StateWriter, + { if blocks.is_empty() { debug!(target: "provider::storage_writer", "Attempted to write empty block range"); return Ok(()) @@ -167,23 +144,14 @@ where // NOTE: checked non-empty above let first_block = blocks.first().unwrap().block(); - let last_block = blocks.last().unwrap().block().clone(); - let first_number = first_block.number; - let last_block_number = last_block.number; - debug!(target: "provider::storage_writer", block_count = %blocks.len(), "Writing blocks and execution data to storage"); + let last_block = blocks.last().unwrap().block(); + let first_number = first_block.number(); + let last_block_number = last_block.number(); - // Only write receipts to static files if there is no receipt pruning configured. - let mut state_writer = if self.database().prune_modes_ref().has_receipts_pruning() { - UnifiedStorageWriter::from_database(self.database()) - } else { - UnifiedStorageWriter::from( - self.database(), - self.static_file().get_writer(first_block.number, StaticFileSegment::Receipts)?, - ) - }; + debug!(target: "provider::storage_writer", block_count = %blocks.len(), "Writing blocks and execution data to storage"); - // TODO: remove all the clones and do performant / batched writes for each type of object + // TODO: Do performant / batched writes for each type of object // instead of a loop over all blocks, // meaning: // * blocks @@ -192,24 +160,24 @@ where // * trie updates (cannot naively extend, need helper) // * indices (already done basically) // Insert the blocks - for block in blocks { - let sealed_block = - block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); + for ExecutedBlock { block, senders, execution_output, hashed_state, trie } in blocks { + let sealed_block = Arc::unwrap_or_clone(block) + .try_with_senders_unchecked(Arc::unwrap_or_clone(senders)) + .unwrap(); self.database().insert_block(sealed_block, StorageLocation::Both)?; - self.save_header_and_transactions(block.block.clone())?; // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. - let execution_outcome = block.execution_outcome().clone(); - state_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + self.database().write_state( + Arc::unwrap_or_clone(execution_output), + OriginalValuesKnown::No, + StorageLocation::StaticFiles, + )?; // insert hashes and intermediate merkle nodes - { - let trie_updates = block.trie_updates().clone(); - let hashed_state = block.hashed_state(); - self.database().write_hashed_state(&hashed_state.clone().into_sorted())?; - self.database().write_trie_updates(&trie_updates)?; - } + self.database() + .write_hashed_state(&Arc::unwrap_or_clone(hashed_state).into_sorted())?; + self.database().write_trie_updates(&trie)?; } // update history indices @@ -223,58 +191,20 @@ where Ok(()) } - /// Writes the header & transactions to static files, and updates their respective checkpoints - /// on database. - #[instrument(level = "trace", skip_all, fields(block = ?block.num_hash()) target = "storage")] - fn save_header_and_transactions(&self, block: Arc) -> ProviderResult<()> { - debug!(target: "provider::storage_writer", "Writing headers and transactions."); - - { - let header_writer = - self.static_file().get_writer(block.number, StaticFileSegment::Headers)?; - let mut storage_writer = UnifiedStorageWriter::from(self.database(), header_writer); - let td = storage_writer.append_headers_from_blocks( - block.header().number, - std::iter::once(&(block.header(), block.hash())), - )?; - - debug!(target: "provider::storage_writer", block_num=block.number, "Updating transaction metadata after writing"); - self.database() - .tx_ref() - .put::(block.number, CompactU256(td))?; - self.database() - .save_stage_checkpoint(StageId::Headers, StageCheckpoint::new(block.number))?; - } - - self.database() - .save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block.number))?; - - Ok(()) - } - /// Removes all block, transaction and receipt data above the given block number from the /// database and static files. This is exclusive, i.e., it only removes blocks above /// `block_number`, and does not remove `block_number`. pub fn remove_blocks_above(&self, block_number: u64) -> ProviderResult<()> { + // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block + debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); + self.database().remove_block_and_execution_above(block_number, StorageLocation::Both)?; + // Get highest static file block for the total block range let highest_static_file_block = self .static_file() .get_highest_static_file_block(StaticFileSegment::Headers) .expect("todo: error handling, headers should exist"); - // Get the total txs for the block range, so we have the correct number of columns for - // receipts and transactions - // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block - let tx_range = self - .database() - .transaction_range_by_block_range(block_number + 1..=highest_static_file_block)?; - // We are using end + 1 - start here because the returned range is inclusive. - let total_txs = (tx_range.end() + 1).saturating_sub(*tx_range.start()); - - // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block - debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); - self.database().remove_block_and_execution_above(block_number, StorageLocation::Both)?; - // IMPORTANT: we use `highest_static_file_block.saturating_sub(block_number)` to make sure // we remove only what is ABOVE the block. // @@ -285,187 +215,6 @@ where .get_writer(block_number, StaticFileSegment::Headers)? .prune_headers(highest_static_file_block.saturating_sub(block_number))?; - if !self.database().prune_modes_ref().has_receipts_pruning() { - self.static_file() - .get_writer(block_number, StaticFileSegment::Receipts)? - .prune_receipts(total_txs, block_number)?; - } - - Ok(()) - } -} - -impl - UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_, ProviderDB::Primitives>> -where - ProviderDB: DBProvider + HeaderProvider + StaticFileProviderFactory, -{ - /// Ensures that the static file writer is set and of the right [`StaticFileSegment`] variant. - /// - /// # Returns - /// - `Ok(())` if the static file writer is set. - /// - `Err(StorageWriterError::MissingStaticFileWriter)` if the static file instance is not set. - fn ensure_static_file_segment( - &self, - segment: StaticFileSegment, - ) -> Result<(), UnifiedStorageWriterError> { - match &self.static_file { - Some(writer) => { - if writer.user_header().segment() == segment { - Ok(()) - } else { - Err(UnifiedStorageWriterError::IncorrectStaticFileWriter( - writer.user_header().segment(), - segment, - )) - } - } - None => Err(UnifiedStorageWriterError::MissingStaticFileWriter), - } - } - - /// Appends headers to static files, using the - /// [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) table to determine the - /// total difficulty of the parent block during header insertion. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Headers segment. - pub fn append_headers_from_blocks( - &mut self, - initial_block_number: BlockNumber, - headers: impl Iterator, - ) -> ProviderResult - where - I: Borrow<(H, B256)>, - H: Borrow
, - { - self.ensure_static_file_segment(StaticFileSegment::Headers)?; - - let mut td = self - .database() - .header_td_by_number(initial_block_number)? - .ok_or(ProviderError::TotalDifficultyNotFound(initial_block_number))?; - - for pair in headers { - let (header, hash) = pair.borrow(); - let header = header.borrow(); - td += header.difficulty; - self.static_file_mut().append_header(header, td, hash)?; - } - - Ok(td) - } -} - -impl - UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_, ProviderDB::Primitives>> -where - ProviderDB: DBProvider + HeaderProvider + StaticFileProviderFactory, -{ - /// Appends receipts block by block. - /// - /// ATTENTION: If called from [`UnifiedStorageWriter`] without a static file producer, it will - /// always write them to database. Otherwise, it will look into the pruning configuration to - /// decide. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Receipts segment. - /// - /// # Parameters - /// - `initial_block_number`: The starting block number. - /// - `blocks`: An iterator over blocks, each block having a vector of optional receipts. If - /// `receipt` is `None`, it has been pruned. - pub fn append_receipts_from_blocks( - &mut self, - initial_block_number: BlockNumber, - blocks: impl Iterator>>, - ) -> ProviderResult<()> { - let mut bodies_cursor = - self.database().tx_ref().cursor_read::()?; - - // We write receipts to database in two situations: - // * If we are in live sync. In this case, `UnifiedStorageWriter` is built without a static - // file writer. - // * If there is any kind of receipt pruning - let mut storage_type = if self.static_file.is_none() || - self.database().prune_modes_ref().has_receipts_pruning() - { - StorageType::Database(self.database().tx_ref().cursor_write::()?) - } else { - self.ensure_static_file_segment(StaticFileSegment::Receipts)?; - StorageType::StaticFile(self.static_file_mut()) - }; - - let mut last_tx_idx = None; - for (idx, receipts) in blocks.enumerate() { - let block_number = initial_block_number + idx as u64; - - let mut first_tx_index = - bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); - - // If there are no indices, that means there have been no transactions - // - // So instead of returning an error, use zero - if block_number == initial_block_number && first_tx_index.is_none() { - first_tx_index = Some(0); - } - - let first_tx_index = first_tx_index - .or(last_tx_idx) - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - - // update for empty blocks - last_tx_idx = Some(first_tx_index); - - match &mut storage_type { - StorageType::Database(cursor) => { - DatabaseWriter(cursor).append_block_receipts( - first_tx_index, - block_number, - receipts, - )?; - } - StorageType::StaticFile(sf) => { - StaticFileWriter(*sf).append_block_receipts( - first_tx_index, - block_number, - receipts, - )?; - } - }; - } - - Ok(()) - } -} - -impl StateWriter - for UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_, ProviderDB::Primitives>> -where - ProviderDB: DBProvider - + StateChangeWriter - + HeaderProvider - + StaticFileProviderFactory, -{ - /// Write the data and receipts to the database or static files if `static_file_producer` is - /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. - fn write_to_storage( - &mut self, - execution_outcome: ExecutionOutcome, - is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()> { - let (plain_state, reverts) = - execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); - - self.database().write_state_reverts(reverts, execution_outcome.first_block)?; - - self.append_receipts_from_blocks( - execution_outcome.first_block, - execution_outcome.receipts.into_iter(), - )?; - - self.database().write_state_changes(plain_state)?; - Ok(()) } } @@ -485,6 +234,7 @@ mod tests { models::{AccountBeforeTx, BlockNumberAddress}, transaction::{DbTx, DbTxMut}, }; + use reth_execution_types::ExecutionOutcome; use reth_primitives::{Account, Receipt, Receipts, StorageEntry}; use reth_storage_api::DatabaseProviderFactory; use reth_trie::{ @@ -751,9 +501,8 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle().into(), Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); // Check plain storage state @@ -852,9 +601,8 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(state.take_bundle().into(), Receipts::default(), 2, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); assert_eq!( @@ -924,9 +672,8 @@ mod tests { 0, Vec::new(), ); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -1073,9 +820,8 @@ mod tests { let outcome: ExecutionOutcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider @@ -1243,9 +989,8 @@ mod tests { 0, Vec::new(), ); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -1291,9 +1036,8 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(state.take_bundle().into(), Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider diff --git a/crates/storage/provider/src/writer/static_file.rs b/crates/storage/provider/src/writer/static_file.rs deleted file mode 100644 index f7227d21ef34..000000000000 --- a/crates/storage/provider/src/writer/static_file.rs +++ /dev/null @@ -1,30 +0,0 @@ -use crate::providers::StaticFileProviderRWRefMut; -use alloy_primitives::{BlockNumber, TxNumber}; -use reth_errors::ProviderResult; -use reth_node_types::NodePrimitives; -use reth_primitives::Receipt; -use reth_storage_api::ReceiptWriter; - -pub(crate) struct StaticFileWriter<'a, W>(pub(crate) &'a mut W); - -impl ReceiptWriter for StaticFileWriter<'_, StaticFileProviderRWRefMut<'_, N>> { - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - block_number: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()> { - // Increment block on static file header. - self.0.increment_block(block_number)?; - let receipts = receipts.iter().enumerate().map(|(tx_idx, receipt)| { - Ok(( - first_tx_index + tx_idx as u64, - receipt - .as_ref() - .expect("receipt should not be filtered when saving to static files."), - )) - }); - self.0.append_receipts(receipts)?; - Ok(()) - } -} diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index c059eb0d6e9b..ba2ccf1b1573 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -23,6 +23,7 @@ reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie.workspace = true +reth-trie-db.workspace = true reth-db.workspace = true # ethereum diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 37c7857f1c25..204e9027da28 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -6,9 +6,7 @@ use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, B256}; use reth_db_models::StoredBlockBodyIndices; -use reth_primitives::{ - Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, -}; +use reth_primitives::{BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader}; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; @@ -47,7 +45,6 @@ impl BlockSource { /// /// If not requested otherwise, implementers of this trait should prioritize fetching blocks from /// the database. -#[auto_impl::auto_impl(&, Arc)] pub trait BlockReader: BlockNumReader + HeaderProvider @@ -57,32 +54,46 @@ pub trait BlockReader: + Send + Sync { + /// The block type this provider reads. + type Block: reth_primitives_traits::Block< + Body: reth_primitives_traits::BlockBody, + >; + /// Tries to find in the given block source. /// /// Note: this only operates on the hash because the number might be ambiguous. /// /// Returns `None` if block is not found. - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult>; + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult>; /// Returns the block with given id from the database. /// /// Returns `None` if block is not found. - fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; + fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; /// Returns the pending block if available /// - /// Note: This returns a [SealedBlock] because it's expected that this is sealed by the provider - /// and the caller does not know the hash. - fn pending_block(&self) -> ProviderResult>; + /// Note: This returns a [`SealedBlockFor`] because it's expected that this is sealed by the + /// provider and the caller does not know the hash. + fn pending_block(&self) -> ProviderResult>>; /// Returns the pending block if available /// - /// Note: This returns a [SealedBlockWithSenders] because it's expected that this is sealed by + /// Note: This returns a [`SealedBlockWithSenders`] because it's expected that this is sealed by /// the provider and the caller does not know the hash. - fn pending_block_with_senders(&self) -> ProviderResult>; + fn pending_block_with_senders( + &self, + ) -> ProviderResult>>; /// Returns the pending block and receipts if available. - fn pending_block_and_receipts(&self) -> ProviderResult)>>; + #[allow(clippy::type_complexity)] + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>>; /// Returns the ommers/uncle headers of the given block from the database. /// @@ -92,14 +103,14 @@ pub trait BlockReader: /// Returns the block with matching hash from the database. /// /// Returns `None` if block is not found. - fn block_by_hash(&self, hash: B256) -> ProviderResult> { + fn block_by_hash(&self, hash: B256) -> ProviderResult> { self.block(hash.into()) } /// Returns the block with matching number from database. /// /// Returns `None` if block is not found. - fn block_by_number(&self, num: u64) -> ProviderResult> { + fn block_by_number(&self, num: u64) -> ProviderResult> { self.block(num.into()) } @@ -117,7 +128,7 @@ pub trait BlockReader: &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns the sealed block with senders with matching number or hash from database. /// @@ -128,26 +139,164 @@ pub trait BlockReader: &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns all blocks in the given inclusive range. /// /// Note: returns only available blocks - fn block_range(&self, range: RangeInclusive) -> ProviderResult>; + fn block_range(&self, range: RangeInclusive) -> ProviderResult>; /// Returns a range of blocks from the database, along with the senders of each /// transaction in the blocks. fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns a range of sealed blocks from the database, along with the senders of each /// transaction in the blocks. fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>; + ) -> ProviderResult>>; +} + +impl BlockReader for std::sync::Arc { + type Block = T::Block; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { + T::find_block_by_hash(self, hash, source) + } + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + T::block(self, id) + } + fn pending_block(&self) -> ProviderResult>> { + T::pending_block(self) + } + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + T::pending_block_with_senders(self) + } + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + T::pending_block_and_receipts(self) + } + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + T::ommers(self, id) + } + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + T::block_by_hash(self, hash) + } + fn block_by_number(&self, num: u64) -> ProviderResult> { + T::block_by_number(self, num) + } + fn block_body_indices(&self, num: u64) -> ProviderResult> { + T::block_body_indices(self, num) + } + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::block_with_senders(self, id, transaction_kind) + } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::sealed_block_with_senders(self, id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + T::block_range(self, range) + } + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::block_with_senders_range(self, range) + } + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::sealed_block_with_senders_range(self, range) + } +} + +impl BlockReader for &T { + type Block = T::Block; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { + T::find_block_by_hash(self, hash, source) + } + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + T::block(self, id) + } + fn pending_block(&self) -> ProviderResult>> { + T::pending_block(self) + } + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + T::pending_block_with_senders(self) + } + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + T::pending_block_and_receipts(self) + } + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + T::ommers(self, id) + } + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + T::block_by_hash(self, hash) + } + fn block_by_number(&self, num: u64) -> ProviderResult> { + T::block_by_number(self, num) + } + fn block_body_indices(&self, num: u64) -> ProviderResult> { + T::block_body_indices(self, num) + } + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::block_with_senders(self, id, transaction_kind) + } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::sealed_block_with_senders(self, id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + T::block_range(self, range) + } + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::block_with_senders_range(self, range) + } + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::sealed_block_with_senders_range(self, range) + } } /// Trait extension for `BlockReader`, for types that implement `BlockId` conversion. @@ -160,12 +309,11 @@ pub trait BlockReader: /// so this trait can only be implemented for types that implement `BlockIdReader`. The /// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and /// retrieving the block should be done using the type's `BlockReader` methods. -#[auto_impl::auto_impl(&, Arc)] pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the block with matching tag from the database /// /// Returns `None` if block is not found. - fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.block(num.into())) } @@ -204,7 +352,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the block with the matching [`BlockId`] from the database. /// /// Returns `None` if block is not found. - fn block_by_id(&self, id: BlockId) -> ProviderResult>; + fn block_by_id(&self, id: BlockId) -> ProviderResult>; /// Returns the block with senders with matching [`BlockId`]. /// @@ -215,7 +363,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { &self, id: BlockId, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { match id { BlockId::Hash(hash) => { self.block_with_senders(hash.block_hash.into(), transaction_kind) diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index d5228bdddf72..9b9c24c68633 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -1,10 +1,11 @@ use crate::DBProvider; use alloy_primitives::BlockNumber; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_db::{ - cursor::DbCursorRW, + cursor::{DbCursorRO, DbCursorRW}, models::{StoredBlockOmmers, StoredBlockWithdrawals}, tables, - transaction::DbTxMut, + transaction::{DbTx, DbTxMut}, DbTxUnwindExt, }; use reth_primitives_traits::{Block, BlockBody, FullNodePrimitives}; @@ -41,6 +42,39 @@ impl ChainStorageWriter = + (&'a ::Header, Vec<<::Body as BlockBody>::Transaction>); + +/// Trait that implements how block bodies are read from the storage. +/// +/// Note: Within the current abstraction, transactions persistence is handled separately, thus this +/// trait is provided with transactions read beforehand and is expected to construct the block body +/// from those transactions and additional data read from elsewhere. +#[auto_impl::auto_impl(&, Arc)] +pub trait BlockBodyReader { + /// The block type. + type Block: Block; + + /// Receives a list of block headers along with block transactions and returns the block bodies. + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult::Body>>; +} + +/// Trait that implements how chain-specific types are read from storage. +pub trait ChainStorageReader: + BlockBodyReader +{ +} +impl ChainStorageReader for T where + T: BlockBodyReader +{ +} + /// Ethereum storage implementation. #[derive(Debug, Default, Clone, Copy)] pub struct EthStorage; @@ -89,3 +123,47 @@ where Ok(()) } } + +impl BlockBodyReader for EthStorage +where + Provider: DBProvider + ChainSpecProvider, +{ + type Block = reth_primitives::Block; + + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult::Body>> { + // TODO: Ideally storage should hold its own copy of chain spec + let chain_spec = provider.chain_spec(); + + let mut ommers_cursor = provider.tx_ref().cursor_read::()?; + let mut withdrawals_cursor = provider.tx_ref().cursor_read::()?; + + let mut bodies = Vec::with_capacity(inputs.len()); + + for (header, transactions) in inputs { + // If we are past shanghai, then all blocks should have a withdrawal list, + // even if empty + let withdrawals = if chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { + withdrawals_cursor + .seek_exact(header.number)? + .map(|(_, w)| w.withdrawals) + .unwrap_or_default() + .into() + } else { + None + }; + let ommers = if chain_spec.final_paris_total_difficulty(header.number).is_some() { + Vec::new() + } else { + ommers_cursor.seek_exact(header.number)?.map(|(_, o)| o.ommers).unwrap_or_default() + }; + + bodies.push(reth_primitives::BlockBody { transactions, ommers, withdrawals }); + } + + Ok(bodies) + } +} diff --git a/crates/storage/storage-api/src/hashing.rs b/crates/storage/storage-api/src/hashing.rs index c6958aa4d644..7cd30a82510c 100644 --- a/crates/storage/storage-api/src/hashing.rs +++ b/crates/storage/storage-api/src/hashing.rs @@ -1,10 +1,10 @@ -use alloy_primitives::{Address, BlockNumber, B256}; +use alloy_primitives::{map::HashMap, Address, BlockNumber, B256}; use auto_impl::auto_impl; use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; use reth_primitives::{Account, StorageEntry}; use reth_storage_errors::provider::ProviderResult; use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, + collections::{BTreeMap, BTreeSet}, ops::{RangeBounds, RangeInclusive}, }; diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index de09e66f1281..4c5d2ab02e7d 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -70,3 +70,6 @@ pub use stats::*; mod legacy; pub use legacy::*; + +mod primitives; +pub use primitives::*; diff --git a/crates/storage/storage-api/src/primitives.rs b/crates/storage/storage-api/src/primitives.rs new file mode 100644 index 000000000000..ae2a72e6e531 --- /dev/null +++ b/crates/storage/storage-api/src/primitives.rs @@ -0,0 +1,8 @@ +use reth_primitives::NodePrimitives; + +/// Provider implementation that knows configured [`NodePrimitives`]. +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait NodePrimitivesProvider { + /// The node primitive types. + type Primitives: NodePrimitives; +} diff --git a/crates/storage/storage-api/src/receipts.rs b/crates/storage/storage-api/src/receipts.rs index 06c6103ee9bb..67257cce67ce 100644 --- a/crates/storage/storage-api/src/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -1,33 +1,38 @@ use crate::BlockIdReader; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockNumber, TxHash, TxNumber}; -use reth_primitives::Receipt; +use alloy_primitives::{TxHash, TxNumber}; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; -/// Client trait for fetching [Receipt] data . +/// Client trait for fetching receipt data. #[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProvider: Send + Sync { + /// The receipt type. + type Receipt: Send + Sync; + /// Get receipt by transaction number /// /// Returns `None` if the transaction is not found. - fn receipt(&self, id: TxNumber) -> ProviderResult>; + fn receipt(&self, id: TxNumber) -> ProviderResult>; /// Get receipt by transaction hash. /// /// Returns `None` if the transaction is not found. - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult>; + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult>; /// Get receipts by block num or hash. /// /// Returns `None` if the block is not found. - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>>; + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>>; /// Get receipts by tx range. fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult>; + ) -> ProviderResult>; } /// Trait extension for `ReceiptProvider`, for types that implement `BlockId` conversion. @@ -40,10 +45,9 @@ pub trait ReceiptProvider: Send + Sync { /// so this trait can only be implemented for types that implement `BlockIdReader`. The /// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and /// retrieving the receipts should be done using the type's `ReceiptProvider` methods. -#[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { /// Get receipt by block id - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { let id = match block { BlockId::Hash(hash) => BlockHashOrNumber::Hash(hash.block_hash), BlockId::Number(num_tag) => { @@ -64,24 +68,7 @@ pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { fn receipts_by_number_or_tag( &self, number_or_tag: BlockNumberOrTag, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.receipts_by_block_id(number_or_tag.into()) } } - -/// Writer trait for writing [`Receipt`] data. -pub trait ReceiptWriter { - /// Appends receipts for a block. - /// - /// # Parameters - /// - `first_tx_index`: The transaction number of the first receipt in the block. - /// - `block_number`: The block number to which the receipts belong. - /// - `receipts`: A vector of optional receipts in the block. If `None`, it means they were - /// pruned. - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - block_number: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()>; -} diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 3174489fc4ac..0cb26d307434 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -8,6 +8,7 @@ use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue use auto_impl::auto_impl; use reth_primitives::Bytecode; use reth_storage_errors::provider::ProviderResult; +use reth_trie_db::StateCommitment; /// Type alias of boxed [`StateProvider`]. pub type StateProviderBox = Box; @@ -81,6 +82,12 @@ pub trait StateProvider: } } +/// Trait implemented for database providers that can provide the [`StateCommitment`] type. +pub trait StateCommitmentProvider { + /// The [`StateCommitment`] type that can be used to perform state commitment operations. + type StateCommitment: StateCommitment; +} + /// Trait implemented for database providers that can be converted into a historical state provider. pub trait TryIntoHistoricalStateProvider { /// Returns a historical [`StateProvider`] indexed by the given historic block number. diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index ca2bcaeb4690..3bb20b7e161a 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -1,4 +1,4 @@ -use crate::{BlockNumReader, BlockReader}; +use crate::{BlockNumReader, BlockReader, ReceiptProvider}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; use reth_primitives::TransactionMeta; @@ -84,6 +84,9 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { /// A helper type alias to access [`TransactionsProvider::Transaction`]. pub type ProviderTx

=

::Transaction; +/// A helper type alias to access [`ReceiptProvider::Receipt`]. +pub type ProviderReceipt

=

::Receipt; + /// Client trait for fetching additional transactions related data. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionsProviderExt: BlockReader + Send + Sync { diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index c8f12da07167..ee1ca1de1800 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -5,7 +5,8 @@ use alloy_primitives::{ use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::{StorageTrieUpdates, TrieUpdates}, - AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, StorageMultiProof, StorageProof, + TrieInput, }; /// A type that can compute the state root of a given post state. @@ -56,6 +57,14 @@ pub trait StorageRootProvider: Send + Sync { slot: B256, hashed_storage: HashedStorage, ) -> ProviderResult; + + /// Returns the storage multiproof for target slots. + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult; } /// A type that can generate state proof on top of a given post state. @@ -105,7 +114,7 @@ pub trait StorageTrieWriter: Send + Sync { /// Returns the number of entries modified. fn write_storage_trie_updates( &self, - storage_tries: &std::collections::HashMap, + storage_tries: &HashMap, ) -> ProviderResult; /// Writes storage trie updates for the given hashed address. diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 7c0f34765591..214633188167 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -52,7 +52,6 @@ bitflags.workspace = true auto_impl.workspace = true smallvec.workspace = true - # testing rand = { workspace = true, optional = true } paste = { workspace = true, optional = true } diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index 0f48c89a4995..3fdcbe8b4eae 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -1,7 +1,9 @@ //! Support for maintaining the blob pool. +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{BlockNumber, B256}; use reth_execution_types::ChainBlocks; +use reth_primitives_traits::{Block, BlockBody, SignedTransaction, TxType}; use std::collections::BTreeMap; /// The type that is used to track canonical blob transactions. @@ -37,13 +39,17 @@ impl BlobStoreCanonTracker { /// /// Note: In case this is a chain that's part of a reorg, this replaces previously tracked /// blocks. - pub fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_>) { + pub fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_, B>) + where + B: Block>, + { let blob_txs = blocks.iter().map(|(num, block)| { let iter = block .body .transactions() - .filter(|tx| tx.transaction.is_eip4844()) - .map(|tx| tx.hash()); + .iter() + .filter(|tx| tx.tx_type().is_eip4844()) + .map(|tx| tx.trie_hash()); (*num, iter) }); self.add_blocks(blob_txs); diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 3194ebba6f8b..1c383e8edf01 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -156,7 +156,6 @@ use alloy_primitives::{Address, TxHash, B256, U256}; use aquamarine as _; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; -use reth_primitives::PooledTransactionsElement; use reth_storage_api::StateProviderFactory; use std::{collections::HashSet, sync::Arc}; use tokio::sync::mpsc::Receiver; @@ -416,11 +415,25 @@ where &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec { + ) -> Vec<<::Transaction as PoolTransaction>::Pooled> { self.pool.get_pooled_transaction_elements(tx_hashes, limit) } - fn get_pooled_transaction_element(&self, tx_hash: TxHash) -> Option { + fn get_pooled_transactions_as

( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec

+ where + ::Pooled: Into

, + { + self.pool.get_pooled_transactions_as(tx_hashes, limit) + } + + fn get_pooled_transaction_element( + &self, + tx_hash: TxHash, + ) -> Option<<::Transaction as PoolTransaction>::Pooled> { self.pool.get_pooled_transaction_element(tx_hash) } @@ -441,6 +454,13 @@ where self.pool.pending_transactions() } + fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.pool.pending_transactions_max(max) + } + fn queued_transactions(&self) -> Vec>> { self.pool.queued_transactions() } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 47e70e914331..02f218d4b098 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -21,6 +21,7 @@ use reth_primitives::{ PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, }; +use reth_primitives_traits::SignedTransaction; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskSpawner; use std::{ @@ -317,7 +318,7 @@ pub async fn maintain_transaction_pool( // find all transactions that were mined in the old chain but not in the new chain let pruned_old_transactions = old_blocks .transactions_ecrecovered() - .filter(|tx| !new_mined_transactions.contains(tx.hash_ref())) + .filter(|tx| !new_mined_transactions.contains(tx.tx_hash())) .filter_map(|tx| { if tx.is_eip4844() { // reorged blobs no longer include the blob, which is necessary for diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index cf2270978abe..3a068d3a5936 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -13,8 +13,8 @@ use crate::{ validate::ValidTransaction, AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPoolTransaction, EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, - PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, - TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, + PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, + TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; use alloy_eips::{ eip1559::ETHEREUM_BLOCK_GAS_LIMIT, @@ -135,14 +135,25 @@ impl TransactionPool for NoopTransactionPool { &self, _tx_hashes: Vec, _limit: GetPooledTransactionLimit, - ) -> Vec { + ) -> Vec<::Pooled> { + vec![] + } + + fn get_pooled_transactions_as( + &self, + _tx_hashes: Vec, + _limit: GetPooledTransactionLimit, + ) -> Vec + where + ::Pooled: Into, + { vec![] } fn get_pooled_transaction_element( &self, _tx_hash: TxHash, - ) -> Option { + ) -> Option<::Pooled> { None } @@ -163,6 +174,13 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn pending_transactions_max( + &self, + _max: usize, + ) -> Vec>> { + vec![] + } + fn queued_transactions(&self) -> Vec>> { vec![] } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 171faccf7c2a..a4c91aae7268 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,4 +1,5 @@ use crate::{ + error::{Eip4844PoolTransactionError, InvalidPoolTransactionError}, identifier::{SenderId, TransactionId}, pool::pending::PendingTransaction, PoolTransaction, TransactionOrdering, ValidPoolTransaction, @@ -6,7 +7,7 @@ use crate::{ use alloy_primitives::Address; use core::fmt; use reth_payload_util::PayloadTransactions; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{InvalidTransactionError, TransactionSignedEcRecovered}; use std::{ collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, @@ -27,8 +28,8 @@ pub(crate) struct BestTransactionsWithFees { } impl crate::traits::BestTransactions for BestTransactionsWithFees { - fn mark_invalid(&mut self, tx: &Self::Item) { - BestTransactions::mark_invalid(&mut self.best, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + BestTransactions::mark_invalid(&mut self.best, tx, kind) } fn no_updates(&mut self) { @@ -60,7 +61,11 @@ impl Iterator for BestTransactionsWithFees { { return Some(best); } - crate::traits::BestTransactions::mark_invalid(self, &best); + crate::traits::BestTransactions::mark_invalid( + self, + &best, + InvalidPoolTransactionError::Underpriced, + ); } } } @@ -95,7 +100,11 @@ pub(crate) struct BestTransactions { impl BestTransactions { /// Mark the transaction and it's descendants as invalid. - pub(crate) fn mark_invalid(&mut self, tx: &Arc>) { + pub(crate) fn mark_invalid( + &mut self, + tx: &Arc>, + _kind: InvalidPoolTransactionError, + ) { self.invalid.insert(tx.sender_id()); } @@ -154,8 +163,8 @@ impl BestTransactions { } impl crate::traits::BestTransactions for BestTransactions { - fn mark_invalid(&mut self, tx: &Self::Item) { - Self::mark_invalid(self, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + Self::mark_invalid(self, tx, kind) } fn no_updates(&mut self) { @@ -199,7 +208,12 @@ impl Iterator for BestTransactions { if self.skip_blobs && best.transaction.transaction.is_eip4844() { // blobs should be skipped, marking them as invalid will ensure that no dependent // transactions are returned - self.mark_invalid(&best.transaction) + self.mark_invalid( + &best.transaction, + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::NoEip4844Blobs, + ), + ) } else { return Some(best.transaction) } @@ -280,7 +294,10 @@ where if (self.predicate)(&best) { return Some(best) } - self.best.mark_invalid(&best); + self.best.mark_invalid( + &best, + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); } } } @@ -290,8 +307,8 @@ where I: crate::traits::BestTransactions, P: FnMut(&::Item) -> bool + Send, { - fn mark_invalid(&mut self, tx: &Self::Item) { - crate::traits::BestTransactions::mark_invalid(&mut self.best, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + crate::traits::BestTransactions::mark_invalid(&mut self.best, tx, kind) } fn no_updates(&mut self) { @@ -379,8 +396,8 @@ where I: crate::traits::BestTransactions>>, T: PoolTransaction, { - fn mark_invalid(&mut self, tx: &Self::Item) { - self.inner.mark_invalid(tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + self.inner.mark_invalid(tx, kind) } fn no_updates(&mut self) { @@ -450,7 +467,10 @@ mod tests { // mark the first tx as invalid let invalid = best.independent.iter().next().unwrap(); - best.mark_invalid(&invalid.transaction.clone()); + best.mark_invalid( + &invalid.transaction.clone(), + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); // iterator is empty assert!(best.next().is_none()); @@ -475,7 +495,11 @@ mod tests { > = Box::new(pool.best()); let tx = Iterator::next(&mut best).unwrap(); - crate::traits::BestTransactions::mark_invalid(&mut *best, &tx); + crate::traits::BestTransactions::mark_invalid( + &mut *best, + &tx, + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); assert!(Iterator::next(&mut best).is_none()); } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 1a23bf3e07ce..b5391b6e8d73 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -88,7 +88,6 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use alloy_eips::eip4844::BlobTransactionSidecar; -use reth_primitives::PooledTransactionsElement; use std::{ collections::{HashMap, HashSet}, fmt, @@ -340,14 +339,27 @@ where } } - /// Returns converted [`PooledTransactionsElement`] for the given transaction hashes. + /// Returns pooled transactions for the given transaction hashes. pub(crate) fn get_pooled_transaction_elements( &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec + ) -> Vec<<::Transaction as PoolTransaction>::Pooled> where ::Transaction: EthPoolTransaction, + { + self.get_pooled_transactions_as(tx_hashes, limit) + } + + /// Returns pooled transactions for the given transaction hashes as the requested type. + pub(crate) fn get_pooled_transactions_as

( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec

+ where + ::Transaction: EthPoolTransaction, + <::Transaction as PoolTransaction>::Pooled: Into

, { let transactions = self.get_all(tx_hashes); let mut elements = Vec::with_capacity(transactions.len()); @@ -369,15 +381,15 @@ where elements } - /// Returns converted [`PooledTransactionsElement`] for the given transaction hash. + /// Returns converted pooled transaction for the given transaction hash. pub(crate) fn get_pooled_transaction_element( &self, tx_hash: TxHash, - ) -> Option + ) -> Option<<::Transaction as PoolTransaction>::Pooled> where ::Transaction: EthPoolTransaction, { - self.get(&tx_hash).and_then(|tx| self.to_pooled_transaction(tx).map(Into::into)) + self.get(&tx_hash).and_then(|tx| self.to_pooled_transaction(tx)) } /// Updates the entire pool after a new block was executed. @@ -678,6 +690,14 @@ where self.get_pool_data().best_transactions_with_attributes(best_transactions_attributes) } + /// Returns only the first `max` transactions in the pending pool. + pub(crate) fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.get_pool_data().pending_transactions_iter().take(max).collect() + } + /// Returns all transactions from the pending sub-pool pub(crate) fn pending_transactions(&self) -> Vec>> { self.get_pool_data().pending_transactions() diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 9d19105b5dab..b5fc0db5204d 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -23,6 +23,7 @@ use reth_primitives::{ PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; +use reth_primitives_traits::SignedTransaction; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ @@ -231,17 +232,41 @@ pub trait TransactionPool: Send + Sync + Clone { &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec; + ) -> Vec<::Pooled>; - /// Returns converted [PooledTransactionsElement] for the given transaction hash. + /// Returns the pooled transaction variant for the given transaction hash as the requested type. + fn get_pooled_transactions_as( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec + where + ::Pooled: Into; + + /// Returns the pooled transaction variant for the given transaction hash. /// /// This adheres to the expected behavior of /// [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): /// /// If the transaction is a blob transaction, the sidecar will be included. /// + /// It is expected that this variant represents the valid p2p format for full transactions. + /// E.g. for EIP-4844 transactions this is the consensus transaction format with the blob + /// sidecar. + /// /// Consumer: P2P - fn get_pooled_transaction_element(&self, tx_hash: TxHash) -> Option; + fn get_pooled_transaction_element( + &self, + tx_hash: TxHash, + ) -> Option<::Pooled>; + + /// Returns the pooled transaction variant for the given transaction hash as the requested type. + fn get_pooled_transaction_as(&self, tx_hash: TxHash) -> Option + where + ::Pooled: Into, + { + self.get_pooled_transaction_element(tx_hash).map(Into::into) + } /// Returns an iterator that yields transactions that are ready for block production. /// @@ -270,6 +295,15 @@ pub trait TransactionPool: Send + Sync + Clone { /// Consumer: RPC fn pending_transactions(&self) -> Vec>>; + /// Returns first `max` transactions that can be included in the next block. + /// See + /// + /// Consumer: Block production + fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>>; + /// Returns all transactions that can be included in _future_ blocks. /// /// This and [Self::pending_transactions] are mutually exclusive. @@ -772,7 +806,7 @@ pub trait BestTransactions: Iterator + Send { /// Implementers must ensure all subsequent transaction _don't_ depend on this transaction. /// In other words, this must remove the given transaction _and_ drain all transaction that /// depend on it. - fn mark_invalid(&mut self, transaction: &Self::Item); + fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError); /// An iterator may be able to receive additional pending transactions that weren't present it /// the pool when it was created. @@ -834,8 +868,8 @@ impl BestTransactions for Box where T: BestTransactions + ?Sized, { - fn mark_invalid(&mut self, transaction: &Self::Item) { - (**self).mark_invalid(transaction); + fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError) { + (**self).mark_invalid(transaction, kind) } fn no_updates(&mut self) { @@ -853,7 +887,7 @@ where /// A no-op implementation that yields no transactions. impl BestTransactions for std::iter::Empty { - fn mark_invalid(&mut self, _tx: &T) {} + fn mark_invalid(&mut self, _tx: &T, _kind: InvalidPoolTransactionError) {} fn no_updates(&mut self) {} @@ -938,7 +972,7 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { type Consensus: From + TryInto; /// Associated type representing the recovered pooled variant of the transaction. - type Pooled: Into; + type Pooled: Encodable2718 + Into; /// Define a method to convert from the `Consensus` type to `Self` fn try_from_consensus(tx: Self::Consensus) -> Result { @@ -1211,7 +1245,7 @@ impl PoolTransaction for EthPooledTransaction { /// Returns hash of the transaction. fn hash(&self) -> &TxHash { - self.transaction.hash_ref() + self.transaction.tx_hash() } /// Returns the Sender of the transaction. diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 2aa9ad114620..bd31e80cf70d 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -12,24 +12,29 @@ description = "Commonly used types for trie usage in reth." workspace = true [dependencies] -reth-primitives-traits = { workspace = true, features = ["serde"] } -reth-codecs.workspace = true - +# alloy alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["arrayvec"] } -alloy-trie = { workspace = true, features = ["serde"] } +alloy-trie.workspace = true alloy-consensus.workspace = true alloy-genesis.workspace = true +reth-primitives-traits.workspace = true +reth-codecs.workspace = true + # revm-primitives scroll re-export revm-primitives = { package = "reth-scroll-revm", path = "../../scroll/revm" } reth-scroll-primitives = { workspace = true, optional = true } bytes.workspace = true derive_more.workspace = true -serde.workspace = true itertools.workspace = true -nybbles = { workspace = true, features = ["serde", "rlp"] } +nybbles = { workspace = true, features = ["rlp"] } + +# `serde` feature +serde = { workspace = true, optional = true } + +serde_with = { workspace = true, optional = true } # `test-utils` feature hash-db = { version = "=0.15.2", optional = true } @@ -37,33 +42,57 @@ plain_hasher = { version = "0.2", optional = true } arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] +reth-primitives-traits = { workspace = true, features = ["serde"] } alloy-primitives = { workspace = true, features = ["getrandom"] } +alloy-trie = { workspace = true, features = ["arbitrary", "serde"] } +hash-db = "=0.15.2" +plain_hasher = "0.2" arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -hash-db = "=0.15.2" -plain_hasher = "0.2" +criterion.workspace = true +bincode.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_with.workspace = true reth-scroll-primitives.workspace = true [features] +serde = [ + "dep:serde", + "bytes/serde", + "nybbles/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-trie/serde", + "revm-primitives/serde", + "reth-primitives-traits/serde", + "reth-codecs/serde" +] +serde-bincode-compat = [ + "serde", + "reth-primitives-traits/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", + "dep:serde_with" +] test-utils = [ - "dep:plain_hasher", - "dep:hash-db", - "arbitrary", - "reth-primitives-traits/test-utils", - "reth-codecs/test-utils", + "dep:plain_hasher", + "dep:hash-db", + "arbitrary", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", "revm-primitives/test-utils" ] arbitrary = [ - "alloy-trie/arbitrary", - "dep:arbitrary", - "reth-primitives-traits/arbitrary", - "alloy-consensus/arbitrary", - "alloy-primitives/arbitrary", - "nybbles/arbitrary", - "revm-primitives/arbitrary", - "reth-codecs/arbitrary", + "alloy-trie/arbitrary", + "dep:arbitrary", + "reth-primitives-traits/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "nybbles/arbitrary", + "revm-primitives/arbitrary", + "reth-codecs/arbitrary", "reth-scroll-primitives?/arbitrary" ] scroll = [ @@ -71,3 +100,7 @@ scroll = [ "dep:reth-scroll-primitives", "revm-primitives/scroll" ] + +[[bench]] +name = "prefix_set" +harness = false diff --git a/crates/trie/trie/benches/prefix_set.rs b/crates/trie/common/benches/prefix_set.rs similarity index 99% rename from crates/trie/trie/benches/prefix_set.rs rename to crates/trie/common/benches/prefix_set.rs index cae08d129f68..b61d58e02729 100644 --- a/crates/trie/trie/benches/prefix_set.rs +++ b/crates/trie/common/benches/prefix_set.rs @@ -7,7 +7,7 @@ use proptest::{ strategy::ValueTree, test_runner::{basic_result_cache, TestRunner}, }; -use reth_trie::{ +use reth_trie_common::{ prefix_set::{PrefixSet, PrefixSetMut}, Nibbles, }; diff --git a/crates/trie/trie/src/constants.rs b/crates/trie/common/src/constants.rs similarity index 94% rename from crates/trie/trie/src/constants.rs rename to crates/trie/common/src/constants.rs index 7354290d9596..471b8bd9dcc6 100644 --- a/crates/trie/trie/src/constants.rs +++ b/crates/trie/common/src/constants.rs @@ -5,9 +5,9 @@ pub const TRIE_ACCOUNT_RLP_MAX_SIZE: usize = 110; #[cfg(test)] mod tests { use super::*; + use crate::TrieAccount; use alloy_primitives::{B256, U256}; use alloy_rlp::Encodable; - use reth_trie_common::TrieAccount; #[test] fn account_rlp_max_size() { diff --git a/crates/trie/common/src/hash_builder/state.rs b/crates/trie/common/src/hash_builder/state.rs index c5cae21a1a3d..ec6b102d44ec 100644 --- a/crates/trie/common/src/hash_builder/state.rs +++ b/crates/trie/common/src/hash_builder/state.rs @@ -3,11 +3,11 @@ use alloy_trie::{hash_builder::HashBuilderValue, nodes::RlpNode, HashBuilder}; use bytes::Buf; use nybbles::Nibbles; use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; /// The hash builder state for storing in the database. /// Check the `reth-trie` crate for more info on hash builder. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( feature = "arbitrary", derive(arbitrary::Arbitrary), diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index 7645ebd3a1cb..6647de67811c 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -11,6 +11,10 @@ /// The implementation of hash builder. pub mod hash_builder; +/// Constants related to the trie computation. +mod constants; +pub use constants::*; + mod account; pub use account::TrieAccount; @@ -26,6 +30,10 @@ pub use storage::StorageTrieEntry; mod subnode; pub use subnode::StoredSubNode; +/// The implementation of a container for storing intermediate changes to a trie. +/// The container indicates when the trie has been modified. +pub mod prefix_set; + mod proofs; #[cfg(any(test, feature = "test-utils"))] pub use proofs::triehash; @@ -33,4 +41,19 @@ pub use proofs::*; pub mod root; +/// Buffer for trie updates. +pub mod updates; + +/// Bincode-compatible serde implementations for trie types. +/// +/// `bincode` crate allows for more efficient serialization of trie types, because it allows +/// non-string map keys. +/// +/// Read more: +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub mod serde_bincode_compat { + pub use super::updates::serde_bincode_compat as updates; +} + +/// Re-export pub use alloy_trie::{nodes::*, proof, BranchNodeCompact, HashBuilder, TrieMask, EMPTY_ROOT_HASH}; diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index cf94f135f54b..2d4e34b3e3bf 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -1,24 +1,12 @@ use bytes::Buf; use derive_more::Deref; use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; pub use nybbles::Nibbles; /// The representation of nibbles of the merkle trie stored in the database. -#[derive( - Clone, - Debug, - Default, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - derive_more::Index, -)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, derive_more::Index)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibbles(pub Nibbles); @@ -74,7 +62,8 @@ impl Compact for StoredNibbles { } /// The representation of nibbles of the merkle trie stored in the database. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord, Hash, Deref)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Deref)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibblesSubKey(pub Nibbles); @@ -120,3 +109,97 @@ impl Compact for StoredNibblesSubKey { (Self(Nibbles::from_nibbles_unchecked(&buf[..len])), &buf[65..]) } } + +#[cfg(test)] +mod tests { + use super::*; + use bytes::BytesMut; + + #[test] + fn test_stored_nibbles_from_nibbles() { + let nibbles = Nibbles::from_nibbles_unchecked(vec![0x12, 0x34, 0x56]); + let stored = StoredNibbles::from(nibbles.clone()); + assert_eq!(stored.0, nibbles); + } + + #[test] + fn test_stored_nibbles_from_vec() { + let bytes = vec![0x12, 0x34, 0x56]; + let stored = StoredNibbles::from(bytes.clone()); + assert_eq!(stored.0.as_slice(), bytes.as_slice()); + } + + #[test] + fn test_stored_nibbles_equality() { + let bytes = vec![0x12, 0x34]; + let stored = StoredNibbles::from(bytes.clone()); + assert_eq!(stored, *bytes.as_slice()); + } + + #[test] + fn test_stored_nibbles_partial_cmp() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let other = vec![0x12, 0x35]; + assert!(stored < *other.as_slice()); + } + + #[test] + fn test_stored_nibbles_to_compact() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let mut buf = BytesMut::with_capacity(10); + let len = stored.to_compact(&mut buf); + assert_eq!(len, 2); + assert_eq!(buf, &vec![0x12, 0x34][..]); + } + + #[test] + fn test_stored_nibbles_from_compact() { + let buf = vec![0x12, 0x34, 0x56]; + let (stored, remaining) = StoredNibbles::from_compact(&buf, 2); + assert_eq!(stored.0.as_slice(), &[0x12, 0x34]); + assert_eq!(remaining, &[0x56]); + } + + #[test] + fn test_stored_nibbles_subkey_from_nibbles() { + let nibbles = Nibbles::from_nibbles_unchecked(vec![0x12, 0x34]); + let subkey = StoredNibblesSubKey::from(nibbles.clone()); + assert_eq!(subkey.0, nibbles); + } + + #[test] + fn test_stored_nibbles_subkey_to_compact() { + let subkey = StoredNibblesSubKey::from(vec![0x12, 0x34]); + let mut buf = BytesMut::with_capacity(65); + let len = subkey.to_compact(&mut buf); + assert_eq!(len, 65); + assert_eq!(buf[..2], [0x12, 0x34]); + assert_eq!(buf[64], 2); // Length byte + } + + #[test] + fn test_stored_nibbles_subkey_from_compact() { + let mut buf = vec![0x12, 0x34]; + buf.resize(65, 0); + buf[64] = 2; + let (subkey, remaining) = StoredNibblesSubKey::from_compact(&buf, 65); + assert_eq!(subkey.0.as_slice(), &[0x12, 0x34]); + assert_eq!(remaining, &[] as &[u8]); + } + + #[test] + fn test_serialization_stored_nibbles() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let serialized = serde_json::to_string(&stored).unwrap(); + let deserialized: StoredNibbles = serde_json::from_str(&serialized).unwrap(); + assert_eq!(stored, deserialized); + } + + #[test] + fn test_serialization_stored_nibbles_subkey() { + let subkey = StoredNibblesSubKey::from(vec![0x12, 0x34]); + let serialized = serde_json::to_string(&subkey).unwrap(); + let deserialized: StoredNibblesSubKey = serde_json::from_str(&serialized).unwrap(); + assert_eq!(subkey, deserialized); + } +} diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs similarity index 97% rename from crates/trie/trie/src/prefix_set.rs rename to crates/trie/common/src/prefix_set.rs index d904ef38fdd5..2536a41ff0c0 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -1,9 +1,9 @@ use crate::Nibbles; -use alloy_primitives::B256; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, }; +use std::sync::Arc; /// Collection of mutable prefix sets. #[derive(Clone, Default, Debug)] @@ -73,7 +73,7 @@ pub struct TriePrefixSets { /// # Examples /// /// ``` -/// use reth_trie::{prefix_set::PrefixSetMut, Nibbles}; +/// use reth_trie_common::{prefix_set::PrefixSetMut, Nibbles}; /// /// let mut prefix_set_mut = PrefixSetMut::default(); /// prefix_set_mut.insert(Nibbles::from_nibbles_unchecked(&[0xa, 0xb])); @@ -211,8 +211,8 @@ impl PrefixSet { } impl<'a> IntoIterator for &'a PrefixSet { - type Item = &'a reth_trie_common::Nibbles; - type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; + type Item = &'a Nibbles; + type IntoIter = std::slice::Iter<'a, Nibbles>; fn into_iter(self) -> Self::IntoIter { self.iter() } diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index e1bd85dde3da..d86156d2e77c 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -2,7 +2,11 @@ use crate::{Nibbles, TrieAccount}; use alloy_consensus::constants::KECCAK_EMPTY; -use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap}, + Address, Bytes, B256, U256, +}; use alloy_rlp::{encode_fixed_size, Decodable, EMPTY_STRING_CODE}; use alloy_trie::{ nodes::TrieNode, @@ -11,8 +15,6 @@ use alloy_trie::{ }; use itertools::Itertools; use reth_primitives_traits::Account; -use serde::{Deserialize, Serialize}; -use std::collections::{hash_map, HashMap}; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes @@ -26,6 +28,31 @@ pub struct MultiProof { } impl MultiProof { + /// Return the account proof nodes for the given account path. + pub fn account_proof_nodes(&self, path: &Nibbles) -> Vec<(Nibbles, Bytes)> { + self.account_subtree.matching_nodes_sorted(path) + } + + /// Return the storage proof nodes for the given storage slots of the account path. + pub fn storage_proof_nodes( + &self, + hashed_address: B256, + slots: impl IntoIterator, + ) -> Vec<(B256, Vec<(Nibbles, Bytes)>)> { + self.storages + .get(&hashed_address) + .map(|storage_mp| { + slots + .into_iter() + .map(|slot| { + let nibbles = Nibbles::unpack(slot); + (slot, storage_mp.subtree.matching_nodes_sorted(&nibbles)) + }) + .collect() + }) + .unwrap_or_default() + } + /// Construct the account proof from the multiproof. pub fn account_proof( &self, @@ -37,10 +64,9 @@ impl MultiProof { // Retrieve the account proof. let proof = self - .account_subtree - .matching_nodes_iter(&nibbles) - .sorted_by(|a, b| a.0.cmp(b.0)) - .map(|(_, node)| node.clone()) + .account_proof_nodes(&nibbles) + .into_iter() + .map(|(_, node)| node) .collect::>(); // Inspect the last node in the proof. If it's a leaf node with matching suffix, @@ -152,8 +178,9 @@ impl StorageMultiProof { } /// The merkle proof with the relevant account info. -#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), serde(rename_all = "camelCase"))] pub struct AccountProof { /// The address associated with the account. pub address: Address, @@ -208,7 +235,8 @@ impl AccountProof { } /// The merkle proof of the storage entry. -#[derive(Clone, PartialEq, Eq, Default, Debug, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Default, Debug)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageProof { /// The raw storage key. pub key: B256, diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index b61abb116888..cf2945d9101a 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -1,9 +1,9 @@ use super::{BranchNodeCompact, StoredNibblesSubKey}; use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; /// Account storage trie node. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieEntry { /// The nibbles of the intermediate node pub nibbles: StoredNibblesSubKey, diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/common/src/updates.rs similarity index 87% rename from crates/trie/trie/src/updates.rs rename to crates/trie/common/src/updates.rs index 6d1bcab63d8f..6f80eb16553e 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -1,16 +1,21 @@ -use crate::{walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles}; -use alloy_primitives::B256; -use std::collections::{HashMap, HashSet}; +use crate::{BranchNodeCompact, HashBuilder, Nibbles}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, +}; /// The aggregation of trie updates. #[derive(PartialEq, Eq, Clone, Default, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct TrieUpdates { - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] - pub(crate) account_nodes: HashMap, - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] - pub(crate) removed_nodes: HashSet, - pub(crate) storage_tries: HashMap, + /// Collection of updated intermediate account nodes indexed by full path. + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_map"))] + pub account_nodes: HashMap, + /// Collection of removed intermediate account nodes indexed by full path. + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_set"))] + pub removed_nodes: HashSet, + /// Collection of updated storage tries indexed by the hashed address. + pub storage_tries: HashMap, } impl TrieUpdates { @@ -75,20 +80,19 @@ impl TrieUpdates { } /// Finalize state trie updates. - pub fn finalize( + pub fn finalize( &mut self, - walker: TrieWalker, hash_builder: HashBuilder, + removed_keys: HashSet, destroyed_accounts: HashSet, ) { - // Retrieve deleted keys from trie walker. - let (_, removed_node_keys) = walker.split(); - self.removed_nodes.extend(exclude_empty(removed_node_keys)); - // Retrieve updated nodes from hash builder. let (_, updated_nodes) = hash_builder.split(); self.account_nodes.extend(exclude_empty_from_pair(updated_nodes)); + // Add deleted node paths. + self.removed_nodes.extend(exclude_empty(removed_keys)); + // Add deleted storage tries for destroyed accounts. for destroyed in destroyed_accounts { self.storage_tries.entry(destroyed).or_default().set_deleted(true); @@ -110,16 +114,16 @@ impl TrieUpdates { /// Trie updates for storage trie of a single account. #[derive(PartialEq, Eq, Clone, Default, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieUpdates { /// Flag indicating whether the trie was deleted. - pub(crate) is_deleted: bool, + pub is_deleted: bool, /// Collection of updated storage trie nodes. - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] - pub(crate) storage_nodes: HashMap, + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_map"))] + pub storage_nodes: HashMap, /// Collection of removed storage trie nodes. - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] - pub(crate) removed_nodes: HashSet, + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_set"))] + pub removed_nodes: HashSet, } #[cfg(feature = "test-utils")] @@ -198,14 +202,13 @@ impl StorageTrieUpdates { } /// Finalize storage trie updates for by taking updates from walker and hash builder. - pub fn finalize(&mut self, walker: TrieWalker, hash_builder: HashBuilder) { - // Retrieve deleted keys from trie walker. - let (_, removed_keys) = walker.split(); - self.removed_nodes.extend(exclude_empty(removed_keys)); - + pub fn finalize(&mut self, hash_builder: HashBuilder, removed_keys: HashSet) { // Retrieve updated nodes from hash builder. let (_, updated_nodes) = hash_builder.split(); self.storage_nodes.extend(exclude_empty_from_pair(updated_nodes)); + + // Add deleted node paths. + self.removed_nodes.extend(exclude_empty(removed_keys)); } /// Convert storage trie updates into [`StorageTrieUpdatesSorted`]. @@ -224,11 +227,10 @@ impl StorageTrieUpdates { /// hex-encoded packed representation. /// /// This also sorts the set before serializing. -#[cfg(feature = "serde")] +#[cfg(any(test, feature = "serde"))] mod serde_nibbles_set { - use std::collections::HashSet; - - use reth_trie_common::Nibbles; + use crate::Nibbles; + use alloy_primitives::map::HashSet; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; pub(super) fn serialize(map: &HashSet, serializer: S) -> Result @@ -261,17 +263,16 @@ mod serde_nibbles_set { /// hex-encoded packed representation. /// /// This also sorts the map's keys before encoding and serializing. -#[cfg(feature = "serde")] +#[cfg(any(test, feature = "serde"))] mod serde_nibbles_map { - use std::{collections::HashMap, marker::PhantomData}; - - use alloy_primitives::hex; - use reth_trie_common::Nibbles; + use crate::Nibbles; + use alloy_primitives::{hex, map::HashMap}; use serde::{ de::{Error, MapAccess, Visitor}, ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer, }; + use std::marker::PhantomData; pub(super) fn serialize( map: &HashMap, @@ -315,7 +316,10 @@ mod serde_nibbles_map { where A: MapAccess<'de>, { - let mut result = HashMap::with_capacity(map.size_hint().unwrap_or(0)); + let mut result = HashMap::with_capacity_and_hasher( + map.size_hint().unwrap_or(0), + Default::default(), + ); while let Some((key, value)) = map.next_entry::()? { let decoded_key = @@ -337,9 +341,13 @@ mod serde_nibbles_map { /// Sorted trie updates used for lookups and insertions. #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct TrieUpdatesSorted { - pub(crate) account_nodes: Vec<(Nibbles, BranchNodeCompact)>, - pub(crate) removed_nodes: HashSet, - pub(crate) storage_tries: HashMap, + /// Sorted collection of updated state nodes with corresponding paths. + pub account_nodes: Vec<(Nibbles, BranchNodeCompact)>, + /// The set of removed state node keys. + pub removed_nodes: HashSet, + /// Storage tries storage stored by hashed address of the account + /// the trie belongs to. + pub storage_tries: HashMap, } impl TrieUpdatesSorted { @@ -362,9 +370,12 @@ impl TrieUpdatesSorted { /// Sorted trie updates used for lookups and insertions. #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct StorageTrieUpdatesSorted { - pub(crate) is_deleted: bool, - pub(crate) storage_nodes: Vec<(Nibbles, BranchNodeCompact)>, - pub(crate) removed_nodes: HashSet, + /// Flag indicating whether the trie has been deleted/wiped. + pub is_deleted: bool, + /// Sorted collection of updated storage nodes with corresponding paths. + pub storage_nodes: Vec<(Nibbles, BranchNodeCompact)>, + /// The set of removed storage node keys. + pub removed_nodes: HashSet, } impl StorageTrieUpdatesSorted { @@ -397,23 +408,22 @@ fn exclude_empty_from_pair( } /// Bincode-compatible trie updates type serde implementations. -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { - use std::{ - borrow::Cow, - collections::{HashMap, HashSet}, + use crate::{BranchNodeCompact, Nibbles}; + use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, }; - - use alloy_primitives::B256; - use reth_trie_common::{BranchNodeCompact, Nibbles}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; + use std::borrow::Cow; /// Bincode-compatible [`super::TrieUpdates`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust - /// use reth_trie::{serde_bincode_compat, updates::TrieUpdates}; + /// use reth_trie_common::{serde_bincode_compat, updates::TrieUpdates}; /// use serde::{Deserialize, Serialize}; /// use serde_with::serde_as; /// @@ -477,7 +487,7 @@ pub mod serde_bincode_compat { /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust - /// use reth_trie::{serde_bincode_compat, updates::StorageTrieUpdates}; + /// use reth_trie_common::{serde_bincode_compat, updates::StorageTrieUpdates}; /// use serde::{Deserialize, Serialize}; /// use serde_with::serde_as; /// @@ -538,12 +548,12 @@ pub mod serde_bincode_compat { #[cfg(test)] mod tests { - use crate::updates::StorageTrieUpdates; - - use super::super::{serde_bincode_compat, TrieUpdates}; - + use crate::{ + serde_bincode_compat, + updates::{StorageTrieUpdates, TrieUpdates}, + BranchNodeCompact, Nibbles, + }; use alloy_primitives::B256; - use reth_trie_common::{BranchNodeCompact, Nibbles}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -552,7 +562,7 @@ pub mod serde_bincode_compat { #[serde_as] #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct Data { - #[serde_as(as = "serde_bincode_compat::TrieUpdates")] + #[serde_as(as = "serde_bincode_compat::updates::TrieUpdates")] trie_updates: TrieUpdates, } @@ -585,7 +595,7 @@ pub mod serde_bincode_compat { #[serde_as] #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct Data { - #[serde_as(as = "serde_bincode_compat::StorageTrieUpdates")] + #[serde_as(as = "serde_bincode_compat::updates::StorageTrieUpdates")] trie_updates: StorageTrieUpdates, } diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index 533a49cb48cb..e6ae8f1774e3 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -18,7 +18,6 @@ reth-execution-errors.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-storage-errors.workspace = true -reth-trie-common.workspace = true reth-trie.workspace = true revm.workspace = true @@ -70,16 +69,18 @@ similar-asserts.workspace = true metrics = ["reth-metrics", "reth-trie/metrics", "dep:metrics"] serde = [ "dep:serde", - "reth-provider/serde", - "reth-trie/serde", + "similar-asserts/serde", + "revm/serde", "alloy-consensus/serde", "alloy-primitives/serde", - "revm/serde", - "similar-asserts/serde", + "reth-trie/serde", + "reth-trie-common/serde", + "reth-provider/serde", "reth-scroll-primitives/serde" ] test-utils = [ "triehash", + "revm/test-utils", "reth-trie-common/test-utils", "reth-chainspec/test-utils", "reth-primitives/test-utils", @@ -87,7 +88,6 @@ test-utils = [ "reth-db-api/test-utils", "reth-provider/test-utils", "reth-trie/test-utils", - "revm/test-utils" ] scroll = [ "reth-db/scroll", diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index cd50503bc703..ac8c3b05304c 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -8,8 +8,10 @@ use reth_db_api::{ DatabaseError, }; use reth_primitives::StorageEntry; -use reth_trie::prefix_set::{PrefixSetMut, TriePrefixSets}; -use reth_trie_common::Nibbles; +use reth_trie::{ + prefix_set::{PrefixSetMut, TriePrefixSets}, + Nibbles, +}; use std::{ collections::{HashMap, HashSet}, ops::RangeInclusive, diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 9bf08fe136f7..99c87bf05ebf 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -10,9 +10,8 @@ use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, proof::{Proof, StorageProof}, trie_cursor::InMemoryTrieCursorFactory, - HashedPostStateSorted, HashedStorage, MultiProof, TrieInput, + AccountProof, HashedPostStateSorted, HashedStorage, MultiProof, StorageMultiProof, TrieInput, }; -use reth_trie_common::AccountProof; /// Extends [`Proof`] with operations specific for working with a database transaction. pub trait DatabaseProof<'a, TX> { @@ -96,7 +95,15 @@ pub trait DatabaseStorageProof<'a, TX> { address: Address, slot: B256, storage: HashedStorage, - ) -> Result; + ) -> Result; + + /// Generates the storage multiproof for target slots based on [`TrieInput`]. + fn overlay_storage_multiproof( + tx: &'a TX, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> Result; } impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> @@ -111,12 +118,12 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> address: Address, slot: B256, storage: HashedStorage, - ) -> Result { + ) -> Result { let hashed_address = keccak256(address); let prefix_set = storage.construct_prefix_set(); let state_sorted = HashedPostStateSorted::new( Default::default(), - HashMap::from([(hashed_address, storage.into_sorted())]), + HashMap::from_iter([(hashed_address, storage.into_sorted())]), ); Self::from_tx(tx, address) .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( @@ -126,4 +133,26 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> .with_prefix_set_mut(prefix_set) .storage_proof(slot) } + + fn overlay_storage_multiproof( + tx: &'a TX, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> Result { + let hashed_address = keccak256(address); + let targets = slots.iter().map(keccak256).collect(); + let prefix_set = storage.construct_prefix_set(); + let state_sorted = HashedPostStateSorted::new( + Default::default(), + HashMap::from_iter([(hashed_address, storage.into_sorted())]), + ); + Self::from_tx(tx, address) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(tx), + &state_sorted, + )) + .with_prefix_set_mut(prefix_set) + .storage_multiproof(targets) + } } diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index bfded342ba04..b364e9a86f14 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -11,9 +11,8 @@ use reth_storage_errors::db::DatabaseError; use reth_trie::{ trie_cursor::{TrieCursor, TrieCursorFactory}, updates::StorageTrieUpdates, - BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey, + BranchNodeCompact, Nibbles, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, }; -use reth_trie_common::StorageTrieEntry; /// Wrapper struct for database transaction implementing trie cursor factory trait. #[derive(Debug)] diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs index e03be1167479..c2e86d77025b 100644 --- a/crates/trie/db/tests/proof.rs +++ b/crates/trie/db/tests/proof.rs @@ -6,8 +6,7 @@ use alloy_rlp::EMPTY_STRING_CODE; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; use reth_primitives::Account; use reth_provider::test_utils::{create_test_provider_factory, insert_genesis}; -use reth_trie::{proof::Proof, Nibbles}; -use reth_trie_common::{AccountProof, StorageProof}; +use reth_trie::{proof::Proof, AccountProof, Nibbles, StorageProof}; use reth_trie_db::DatabaseProof; use std::{ str::FromStr, diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index 133dd4556a38..a0652107ca6a 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -1,13 +1,14 @@ #![allow(missing_docs)] use alloy_consensus::EMPTY_ROOT_HASH; -use alloy_primitives::{hex_literal::hex, keccak256, Address, B256, U256}; +use alloy_primitives::{hex_literal::hex, keccak256, map::HashMap, Address, B256, U256}; +use alloy_rlp::Encodable; use proptest::{prelude::ProptestConfig, proptest}; use proptest_arbitrary_interop::arb; use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - transaction::DbTxMut, + transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, StorageEntry}; use reth_provider::{ @@ -15,25 +16,15 @@ use reth_provider::{ StorageTrieWriter, TrieWriter, }; use reth_trie::{ - prefix_set::PrefixSetMut, + prefix_set::{PrefixSetMut, TriePrefixSets}, test_utils::{state_root, state_root_prehashed, storage_root, storage_root_prehashed}, - BranchNodeCompact, StateRoot, StorageRoot, TrieMask, + triehash::KeccakHasher, + updates::StorageTrieUpdates, + BranchNodeCompact, HashBuilder, IntermediateStateRootState, Nibbles, StateRoot, + StateRootProgress, StorageRoot, TrieAccount, TrieMask, }; -use reth_trie_common::triehash::KeccakHasher; use reth_trie_db::{DatabaseStateRoot, DatabaseStorageRoot}; -use std::{ - collections::{BTreeMap, HashMap}, - ops::Mul, - str::FromStr, - sync::Arc, -}; - -use alloy_rlp::Encodable; -use reth_db_api::transaction::DbTx; -use reth_trie::{ - prefix_set::TriePrefixSets, updates::StorageTrieUpdates, HashBuilder, - IntermediateStateRootState, Nibbles, StateRootProgress, TrieAccount, -}; +use std::{collections::BTreeMap, ops::Mul, str::FromStr, sync::Arc}; fn insert_account( tx: &impl DbTxMut, diff --git a/crates/trie/db/tests/walker.rs b/crates/trie/db/tests/walker.rs index dd4bcd6da8fc..06355ff6d489 100644 --- a/crates/trie/db/tests/walker.rs +++ b/crates/trie/db/tests/walker.rs @@ -5,9 +5,9 @@ use reth_db::tables; use reth_db_api::{cursor::DbCursorRW, transaction::DbTxMut}; use reth_provider::test_utils::create_test_provider_factory; use reth_trie::{ - prefix_set::PrefixSetMut, trie_cursor::TrieCursor, walker::TrieWalker, StorageTrieEntry, + prefix_set::PrefixSetMut, trie_cursor::TrieCursor, walker::TrieWalker, BranchNodeCompact, + Nibbles, StorageTrieEntry, }; -use reth_trie_common::{BranchNodeCompact, Nibbles}; use reth_trie_db::{DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}; #[test] diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 8e00472b4738..385f6269f394 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -27,7 +27,7 @@ fn includes_empty_node_preimage() { assert_eq!( TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), storages: HashMap::default(), }) .unwrap(), @@ -44,8 +44,8 @@ fn includes_empty_node_preimage() { let witness = TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([( + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( hashed_address, HashedStorage::from_iter(false, [(hashed_slot, U256::from(1))]), )]), @@ -80,12 +80,16 @@ fn includes_nodes_for_destroyed_storage_nodes() { .multiproof(HashMap::from_iter([(hashed_address, HashSet::from_iter([hashed_slot]))])) .unwrap(); - let witness = TrieWitness::from_tx(provider.tx_ref()) - .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([(hashed_address, HashedStorage::from_iter(true, []))]), // destroyed - }) - .unwrap(); + let witness = + TrieWitness::from_tx(provider.tx_ref()) + .compute(HashedPostState { + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( + hashed_address, + HashedStorage::from_iter(true, []), + )]), // destroyed + }) + .unwrap(); assert!(witness.contains_key(&state_root)); for node in multiproof.account_subtree.values() { assert_eq!(witness.get(&keccak256(node)), Some(node)); @@ -126,8 +130,8 @@ fn correctly_decodes_branch_node_values() { let witness = TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([( + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( hashed_address, HashedStorage::from_iter( false, diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index eb5b6575b9f7..a9300efa9b0d 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -5,8 +5,7 @@ use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use proptest_arbitrary_interop::arb; use reth_primitives::Account; use reth_provider::{ - providers::ConsistentDbView, test_utils::create_test_provider_factory, StateChangeWriter, - TrieWriter, + providers::ConsistentDbView, test_utils::create_test_provider_factory, StateWriter, TrieWriter, }; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StateRoot, diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index dcb1a0231dd1..f285079f2526 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -33,7 +33,7 @@ pub struct ParallelProof { /// Consistent view of the database. view: ConsistentDbView, /// Trie input. - input: TrieInput, + input: Arc, /// Parallel state root metrics. #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics, @@ -41,7 +41,7 @@ pub struct ParallelProof { impl ParallelProof { /// Create new state proof generator. - pub fn new(view: ConsistentDbView, input: TrieInput) -> Self { + pub fn new(view: ConsistentDbView, input: Arc) -> Self { Self { view, input, @@ -62,8 +62,8 @@ where ) -> Result { let mut tracker = ParallelTrieTracker::default(); - let trie_nodes_sorted = Arc::new(self.input.nodes.into_sorted()); - let hashed_state_sorted = Arc::new(self.input.state.into_sorted()); + let trie_nodes_sorted = self.input.nodes.clone().into_sorted(); + let hashed_state_sorted = self.input.state.clone().into_sorted(); // Extend prefix sets with targets let mut prefix_sets = self.input.prefix_sets.clone(); diff --git a/crates/trie/parallel/src/root.rs b/crates/trie/parallel/src/root.rs index 7a316d8b15fb..8d2b18f5e111 100644 --- a/crates/trie/parallel/src/root.rs +++ b/crates/trie/parallel/src/root.rs @@ -4,6 +4,7 @@ use crate::{stats::ParallelTrieTracker, storage_root_targets::StorageRootTargets use alloy_primitives::B256; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; +use reth_db::DatabaseError; use reth_execution_errors::StorageRootError; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, @@ -193,11 +194,8 @@ where let root = hash_builder.root(); - trie_updates.finalize( - account_node_iter.walker, - hash_builder, - prefix_sets.destroyed_accounts, - ); + let removed_keys = account_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, prefix_sets.destroyed_accounts); let stats = tracker.finish(); @@ -228,6 +226,9 @@ pub enum ParallelStateRootError { /// Provider error. #[error(transparent)] Provider(#[from] ProviderError), + /// Other unspecified error. + #[error("{_0}")] + Other(String), } impl From for ProviderError { @@ -237,6 +238,7 @@ impl From for ProviderError { ParallelStateRootError::StorageRoot(StorageRootError::Database(error)) => { Self::Database(error) } + ParallelStateRootError::Other(other) => Self::Database(DatabaseError::Other(other)), } } } diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 3301975961e2..efd68020ccd7 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -14,9 +14,9 @@ workspace = true [dependencies] # reth -reth-tracing.workspace = true +reth-primitives-traits.workspace = true reth-trie-common.workspace = true -reth-trie.workspace = true +reth-tracing.workspace = true # alloy alloy-primitives.workspace = true @@ -32,6 +32,7 @@ reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +arbitrary.workspace = true assert_matches.workspace = true criterion.workspace = true itertools.workspace = true diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index 30ce566fb5f6..d8d210c1b19d 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -146,7 +146,7 @@ pub fn calculate_root_from_leaves_repeated(c: &mut Criterion) { hb.root(); if storage_updates.peek().is_some() { - trie_updates.finalize(node_iter.walker, hb); + trie_updates.finalize(hb, node_iter.walker.take_removed_keys()); } } }, diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs index 506b206fdd79..a38a92395d9b 100644 --- a/crates/trie/sparse/src/errors.rs +++ b/crates/trie/sparse/src/errors.rs @@ -1,7 +1,7 @@ //! Errors for sparse trie. use alloy_primitives::{Bytes, B256}; -use reth_trie::Nibbles; +use reth_trie_common::Nibbles; use thiserror::Error; use crate::SparseNode; diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 0b0db1401150..549a86733f87 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,22 +1,43 @@ -use std::iter::Peekable; - -use crate::{SparseStateTrieError, SparseStateTrieResult, SparseTrie}; +use crate::{ + RevealedSparseTrie, SparseStateTrieError, SparseStateTrieResult, SparseTrie, SparseTrieError, +}; use alloy_primitives::{ map::{HashMap, HashSet}, Bytes, B256, }; -use alloy_rlp::Decodable; -use reth_trie::{Nibbles, TrieNode}; +use alloy_rlp::{Decodable, Encodable}; +use reth_primitives_traits::Account; +use reth_trie_common::{ + updates::{StorageTrieUpdates, TrieUpdates}, + MultiProof, Nibbles, TrieAccount, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, +}; +use std::iter::Peekable; /// Sparse state trie representing lazy-loaded Ethereum state trie. -#[derive(Default, Debug)] +#[derive(Debug)] pub struct SparseStateTrie { /// Sparse account trie. - pub(crate) state: SparseTrie, + state: SparseTrie, /// Sparse storage tries. - pub(crate) storages: HashMap, + storages: HashMap, /// Collection of revealed account and storage keys. - pub(crate) revealed: HashMap>, + revealed: HashMap>, + /// Flag indicating whether trie updates should be retained. + retain_updates: bool, + /// Reusable buffer for RLP encoding of trie accounts. + account_rlp_buf: Vec, +} + +impl Default for SparseStateTrie { + fn default() -> Self { + Self { + state: Default::default(), + storages: Default::default(), + revealed: Default::default(), + retain_updates: false, + account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), + } + } } impl SparseStateTrie { @@ -25,6 +46,12 @@ impl SparseStateTrie { Self { state, ..Default::default() } } + /// Set the retention of branch node updates and deletions. + pub const fn with_updates(mut self, retain_updates: bool) -> Self { + self.retain_updates = retain_updates; + self + } + /// Returns `true` if account was already revealed. pub fn is_account_revealed(&self, account: &B256) -> bool { self.revealed.contains_key(account) @@ -35,6 +62,11 @@ impl SparseStateTrie { self.revealed.get(account).is_some_and(|slots| slots.contains(slot)) } + /// Returns mutable reference to storage sparse trie if it was revealed. + pub fn storage_trie_mut(&mut self, account: &B256) -> Option<&mut RevealedSparseTrie> { + self.storages.get_mut(account).and_then(|e| e.as_revealed_mut()) + } + /// Reveal unknown trie paths from provided leaf path and its proof for the account. /// NOTE: This method does not extensively validate the proof. pub fn reveal_account( @@ -42,16 +74,16 @@ impl SparseStateTrie { account: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { - if self.revealed.contains_key(&account) { + if self.is_account_revealed(&account) { return Ok(()); } let mut proof = proof.into_iter().peekable(); - let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; + let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self.state.reveal_root(root_node)?; + let trie = self.state.reveal_root(root_node, self.retain_updates)?; // Reveal the remaining proof nodes. for (path, bytes) in proof { @@ -73,16 +105,20 @@ impl SparseStateTrie { slot: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { - if self.revealed.get(&account).is_some_and(|v| v.contains(&slot)) { + if self.is_storage_slot_revealed(&account, &slot) { return Ok(()); } let mut proof = proof.into_iter().peekable(); - let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; + let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self.storages.entry(account).or_default().reveal_root(root_node)?; + let trie = self + .storages + .entry(account) + .or_default() + .reveal_root(root_node, self.retain_updates)?; // Reveal the remaining proof nodes. for (path, bytes) in proof { @@ -96,8 +132,56 @@ impl SparseStateTrie { Ok(()) } + /// Reveal unknown trie paths from multiproof and the list of included accounts and slots. + /// NOTE: This method does not extensively validate the proof. + pub fn reveal_multiproof( + &mut self, + targets: HashMap>, + multiproof: MultiProof, + ) -> SparseStateTrieResult<()> { + let account_subtree = multiproof.account_subtree.into_nodes_sorted(); + let mut account_nodes = account_subtree.into_iter().peekable(); + + if let Some(root_node) = self.validate_root_node(&mut account_nodes)? { + // Reveal root node if it wasn't already. + let trie = self.state.reveal_root(root_node, self.retain_updates)?; + + // Reveal the remaining proof nodes. + for (path, bytes) in account_nodes { + let node = TrieNode::decode(&mut &bytes[..])?; + trie.reveal_node(path, node)?; + } + } + + for (account, storage_subtree) in multiproof.storages { + let storage_subtree = storage_subtree.subtree.into_nodes_sorted(); + let mut storage_nodes = storage_subtree.into_iter().peekable(); + + if let Some(root_node) = self.validate_root_node(&mut storage_nodes)? { + // Reveal root node if it wasn't already. + let trie = self + .storages + .entry(account) + .or_default() + .reveal_root(root_node, self.retain_updates)?; + + // Reveal the remaining proof nodes. + for (path, bytes) in storage_nodes { + let node = TrieNode::decode(&mut &bytes[..])?; + trie.reveal_node(path, node)?; + } + } + } + + for (account, slots) in targets { + self.revealed.entry(account).or_default().extend(slots); + } + + Ok(()) + } + /// Validates the root node of the proof and returns it if it exists and is valid. - fn validate_proof>( + fn validate_root_node>( &self, proof: &mut Peekable, ) -> SparseStateTrieResult> { @@ -118,30 +202,139 @@ impl SparseStateTrie { Ok(Some(root_node)) } - /// Update the leaf node. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseStateTrieResult<()> { + /// Update or remove trie account based on new account info. This method will either recompute + /// the storage root based on update storage trie or look it up from existing leaf value. + /// + /// If the new account info and storage trie are empty, the account leaf will be removed. + pub fn update_account(&mut self, address: B256, account: Account) -> SparseStateTrieResult<()> { + let nibbles = Nibbles::unpack(address); + let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { + storage_trie.root().ok_or(SparseTrieError::Blind)? + } else if self.revealed.contains_key(&address) { + let state = self.state.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + // The account was revealed, either... + if let Some(value) = state.get_leaf_value(&nibbles) { + // ..it exists and we should take it's current storage root or... + TrieAccount::decode(&mut &value[..])?.storage_root + } else { + // ...the account is newly created and the storage trie is empty. + EMPTY_ROOT_HASH + } + } else { + return Err(SparseTrieError::Blind.into()) + }; + + if account.is_empty() && storage_root == EMPTY_ROOT_HASH { + self.remove_account_leaf(&nibbles) + } else { + self.account_rlp_buf.clear(); + TrieAccount::from((account, storage_root)).encode(&mut self.account_rlp_buf); + self.update_account_leaf(nibbles, self.account_rlp_buf.clone()) + } + } + + /// Update the account leaf node. + pub fn update_account_leaf( + &mut self, + path: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { self.state.update_leaf(path, value)?; Ok(()) } - /// Returns sparse trie root if the trie has been revealed. - pub fn root(&mut self) -> Option { - self.state.root() + /// Remove the account leaf node. + pub fn remove_account_leaf(&mut self, path: &Nibbles) -> SparseStateTrieResult<()> { + self.state.remove_leaf(path)?; + Ok(()) + } + + /// Update the leaf node of a storage trie at the provided address. + pub fn update_storage_leaf( + &mut self, + address: B256, + slot: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { + self.storages.entry(address).or_default().update_leaf(slot, value)?; + Ok(()) + } + + /// Update the leaf node of a storage trie at the provided address. + pub fn remove_storage_leaf( + &mut self, + address: B256, + slot: &Nibbles, + ) -> SparseStateTrieResult<()> { + self.storages.entry(address).or_default().remove_leaf(slot)?; + Ok(()) + } + + /// Wipe the storage trie at the provided address. + pub fn wipe_storage(&mut self, address: B256) -> SparseStateTrieResult<()> { + if let Some(trie) = self.storages.get_mut(&address) { + trie.wipe()?; + } + Ok(()) + } + + /// Calculates the hashes of the nodes below the provided level. + pub fn calculate_below_level(&mut self, level: usize) { + self.state.calculate_below_level(level); } /// Returns storage sparse trie root if the trie has been revealed. pub fn storage_root(&mut self, account: B256) -> Option { self.storages.get_mut(&account).and_then(|trie| trie.root()) } + + /// Returns sparse trie root if the trie has been revealed. + pub fn root(&mut self) -> Option { + self.state.root() + } + + /// Returns [`TrieUpdates`] by taking the updates from the revealed sparse tries. + /// + /// Returns `None` if the accounts trie is not revealed. + pub fn take_trie_updates(&mut self) -> Option { + self.state.as_revealed_mut().map(|state| { + let updates = state.take_updates(); + TrieUpdates { + account_nodes: updates.updated_nodes, + removed_nodes: updates.removed_nodes, + storage_tries: self + .storages + .iter_mut() + .map(|(address, trie)| { + let trie = trie.as_revealed_mut().unwrap(); + let updates = trie.take_updates(); + let updates = StorageTrieUpdates { + is_deleted: updates.wiped, + storage_nodes: updates.updated_nodes, + removed_nodes: updates.removed_nodes, + }; + (*address, updates) + }) + .filter(|(_, updates)| !updates.is_empty()) + .collect(), + } + }) + } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Bytes; + use alloy_primitives::{b256, Bytes, U256}; use alloy_rlp::EMPTY_STRING_CODE; + use arbitrary::Arbitrary; use assert_matches::assert_matches; - use reth_trie::HashBuilder; + use rand::{rngs::StdRng, Rng, SeedableRng}; + use reth_primitives_traits::Account; + use reth_trie::{ + updates::StorageTrieUpdates, BranchNodeCompact, HashBuilder, TrieAccount, TrieMask, + EMPTY_ROOT_HASH, + }; use reth_trie_common::proof::ProofRetainer; #[test] @@ -149,7 +342,7 @@ mod tests { let sparse = SparseStateTrie::default(); let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; assert_matches!( - sparse.validate_proof(&mut proof.into_iter().peekable()), + sparse.validate_root_node(&mut proof.into_iter().peekable()), Err(SparseStateTrieError::InvalidRootNode { .. }) ); } @@ -162,7 +355,7 @@ mod tests { (Nibbles::from_nibbles([0x1]), Bytes::new()), ]; assert_matches!( - sparse.validate_proof(&mut proof.into_iter().peekable()), + sparse.validate_root_node(&mut proof.into_iter().peekable()), Err(SparseStateTrieError::InvalidRootNode { .. }) ); } @@ -199,4 +392,139 @@ mod tests { HashMap::from_iter([(Default::default(), SparseTrie::revealed_empty())]) ); } + + #[test] + fn take_trie_updates() { + reth_tracing::init_test_tracing(); + + // let mut rng = generators::rng(); + let mut rng = StdRng::seed_from_u64(1); + + let mut bytes = [0u8; 1024]; + rng.fill(bytes.as_mut_slice()); + + let slot_1 = b256!("1000000000000000000000000000000000000000000000000000000000000000"); + let slot_path_1 = Nibbles::unpack(slot_1); + let value_1 = U256::from(rng.gen::()); + let slot_2 = b256!("1100000000000000000000000000000000000000000000000000000000000000"); + let slot_path_2 = Nibbles::unpack(slot_2); + let value_2 = U256::from(rng.gen::()); + let slot_3 = b256!("2000000000000000000000000000000000000000000000000000000000000000"); + let slot_path_3 = Nibbles::unpack(slot_3); + let value_3 = U256::from(rng.gen::()); + + let mut storage_hash_builder = + HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter([ + slot_path_1.clone(), + slot_path_2.clone(), + ])); + storage_hash_builder.add_leaf(slot_path_1.clone(), &alloy_rlp::encode_fixed_size(&value_1)); + storage_hash_builder.add_leaf(slot_path_2.clone(), &alloy_rlp::encode_fixed_size(&value_2)); + + let storage_root = storage_hash_builder.root(); + let proof_nodes = storage_hash_builder.take_proof_nodes(); + let storage_proof_1 = proof_nodes.matching_nodes_sorted(&slot_path_1); + let storage_proof_2 = proof_nodes.matching_nodes_sorted(&slot_path_2); + + let address_1 = b256!("1000000000000000000000000000000000000000000000000000000000000000"); + let address_path_1 = Nibbles::unpack(address_1); + let account_1 = Account::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let mut trie_account_1 = TrieAccount::from((account_1, storage_root)); + let address_2 = b256!("1100000000000000000000000000000000000000000000000000000000000000"); + let address_path_2 = Nibbles::unpack(address_2); + let account_2 = Account::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let mut trie_account_2 = TrieAccount::from((account_2, EMPTY_ROOT_HASH)); + + let mut hash_builder = + HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter([ + address_path_1.clone(), + address_path_2.clone(), + ])); + hash_builder.add_leaf(address_path_1.clone(), &alloy_rlp::encode(trie_account_1)); + hash_builder.add_leaf(address_path_2.clone(), &alloy_rlp::encode(trie_account_2)); + + let root = hash_builder.root(); + let proof_nodes = hash_builder.take_proof_nodes(); + let proof_1 = proof_nodes.matching_nodes_sorted(&address_path_1); + let proof_2 = proof_nodes.matching_nodes_sorted(&address_path_2); + + let mut sparse = SparseStateTrie::default().with_updates(true); + sparse.reveal_account(address_1, proof_1).unwrap(); + sparse.reveal_account(address_2, proof_2).unwrap(); + sparse.reveal_storage_slot(address_1, slot_1, storage_proof_1.clone()).unwrap(); + sparse.reveal_storage_slot(address_1, slot_2, storage_proof_2.clone()).unwrap(); + sparse.reveal_storage_slot(address_2, slot_1, storage_proof_1).unwrap(); + sparse.reveal_storage_slot(address_2, slot_2, storage_proof_2).unwrap(); + + assert_eq!(sparse.root(), Some(root)); + + let address_3 = b256!("2000000000000000000000000000000000000000000000000000000000000000"); + let address_path_3 = Nibbles::unpack(address_3); + let account_3 = Account { nonce: account_1.nonce + 1, ..account_1 }; + let trie_account_3 = TrieAccount::from((account_3, EMPTY_ROOT_HASH)); + + sparse.update_account_leaf(address_path_3, alloy_rlp::encode(trie_account_3)).unwrap(); + + sparse.update_storage_leaf(address_1, slot_path_3, alloy_rlp::encode(value_3)).unwrap(); + trie_account_1.storage_root = sparse.storage_root(address_1).unwrap(); + sparse.update_account_leaf(address_path_1, alloy_rlp::encode(trie_account_1)).unwrap(); + + sparse.wipe_storage(address_2).unwrap(); + trie_account_2.storage_root = sparse.storage_root(address_2).unwrap(); + sparse.update_account_leaf(address_path_2, alloy_rlp::encode(trie_account_2)).unwrap(); + + sparse.root(); + + let sparse_updates = sparse.take_trie_updates().unwrap(); + // TODO(alexey): assert against real state root calculation updates + pretty_assertions::assert_eq!( + sparse_updates, + TrieUpdates { + account_nodes: HashMap::from_iter([ + ( + Nibbles::default(), + BranchNodeCompact { + state_mask: TrieMask::new(0b110), + tree_mask: TrieMask::new(0b000), + hash_mask: TrieMask::new(0b010), + hashes: vec![b256!( + "4c4ffbda3569fcf2c24ea2000b4cec86ef8b92cbf9ff415db43184c0f75a212e" + )], + root_hash: Some(b256!( + "60944bd29458529c3065d19f63c6e3d5269596fd3b04ca2e7b318912dc89ca4c" + )) + }, + ), + ]), + storage_tries: HashMap::from_iter([ + ( + b256!("1000000000000000000000000000000000000000000000000000000000000000"), + StorageTrieUpdates { + is_deleted: false, + storage_nodes: HashMap::from_iter([( + Nibbles::default(), + BranchNodeCompact { + state_mask: TrieMask::new(0b110), + tree_mask: TrieMask::new(0b000), + hash_mask: TrieMask::new(0b010), + hashes: vec![b256!("5bc8b4fdf51839c1e18b8d6a4bd3e2e52c9f641860f0e4d197b68c2679b0e436")], + root_hash: Some(b256!("c44abf1a9e1a92736ac479b20328e8d7998aa8838b6ef52620324c9ce85e3201")) + } + )]), + removed_nodes: HashSet::default() + } + ), + ( + b256!("1100000000000000000000000000000000000000000000000000000000000000"), + StorageTrieUpdates { + is_deleted: true, + storage_nodes: HashMap::default(), + removed_nodes: HashSet::default() + } + ) + ]), + removed_nodes: HashSet::default() + } + ); + } } diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index dff290271759..97446680df44 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -6,13 +6,10 @@ use alloy_primitives::{ }; use alloy_rlp::Decodable; use reth_tracing::tracing::debug; -use reth_trie::{ - prefix_set::{PrefixSet, PrefixSetMut}, - BranchNodeCompact, RlpNode, -}; use reth_trie_common::{ - BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, TrieMask, TrieNode, CHILD_INDEX_RANGE, - EMPTY_ROOT_HASH, + prefix_set::{PrefixSet, PrefixSetMut}, + BranchNodeCompact, BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, RlpNode, TrieMask, + TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; use std::{borrow::Cow, fmt}; @@ -53,9 +50,13 @@ impl SparseTrie { /// # Returns /// /// Mutable reference to [`RevealedSparseTrie`]. - pub fn reveal_root(&mut self, root: TrieNode) -> SparseTrieResult<&mut RevealedSparseTrie> { + pub fn reveal_root( + &mut self, + root: TrieNode, + retain_updates: bool, + ) -> SparseTrieResult<&mut RevealedSparseTrie> { if self.is_blind() { - *self = Self::Revealed(Box::new(RevealedSparseTrie::from_root(root)?)) + *self = Self::Revealed(Box::new(RevealedSparseTrie::from_root(root, retain_updates)?)) } Ok(self.as_revealed_mut().unwrap()) } @@ -67,10 +68,29 @@ impl SparseTrie { Ok(()) } + /// Remove the leaf node. + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.remove_leaf(path)?; + Ok(()) + } + + /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. + pub fn wipe(&mut self) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.wipe(); + Ok(()) + } + /// Calculates and returns the trie root if the trie has been revealed. pub fn root(&mut self) -> Option { Some(self.as_revealed_mut()?.root()) } + + /// Calculates the hashes of the nodes below the provided level. + pub fn calculate_below_level(&mut self, level: usize) { + self.as_revealed_mut().unwrap().update_rlp_node_level(level); + } } /// The representation of revealed sparse trie. @@ -91,6 +111,7 @@ pub struct RevealedSparseTrie { prefix_set: PrefixSetMut, /// Reusable buffer for RLP encoding of nodes. rlp_buf: Vec, + /// Retained trie updates. updates: Option, } @@ -120,19 +141,20 @@ impl Default for RevealedSparseTrie { impl RevealedSparseTrie { /// Create new revealed sparse trie from the given root node. - pub fn from_root(node: TrieNode) -> SparseTrieResult { + pub fn from_root(node: TrieNode, retain_updates: bool) -> SparseTrieResult { let mut this = Self { nodes: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), updates: None, - }; + } + .with_updates(retain_updates); this.reveal_node(Nibbles::default(), node)?; Ok(this) } - /// Makes the sparse trie to store updated branch nodes. + /// Set the retention of branch node updates and deletions. pub fn with_updates(mut self, retain_updates: bool) -> Self { if retain_updates { self.updates = Some(SparseTrieUpdates::default()); @@ -145,6 +167,11 @@ impl RevealedSparseTrie { self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) } + /// Returns a reference to the leaf value if present. + pub fn get_leaf_value(&self, path: &Nibbles) -> Option<&Vec> { + self.values.get(path) + } + /// Takes and returns the retained sparse node updates pub fn take_updates(&mut self) -> SparseTrieUpdates { self.updates.take().unwrap_or_default() @@ -152,7 +179,6 @@ impl RevealedSparseTrie { /// Reveal the trie node only if it was not known already. pub fn reveal_node(&mut self, path: Nibbles, node: TrieNode) -> SparseTrieResult<()> { - // TODO: revise all inserts to not overwrite existing entries match node { TrieNode::EmptyRoot => { debug_assert!(path.is_empty()); @@ -580,6 +606,14 @@ impl RevealedSparseTrie { Ok(nodes) } + /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. + pub fn wipe(&mut self) { + let updates_retained = self.updates.is_some(); + *self = Self::default(); + self.prefix_set = PrefixSetMut::all(); + self.updates = updates_retained.then(SparseTrieUpdates::wiped); + } + /// Return the root of the sparse trie. /// Updates all remaining dirty nodes before calculating the root. pub fn root(&mut self) -> B256 { @@ -773,8 +807,7 @@ impl RevealedSparseTrie { } // Set the hash mask. If a child node has a hash value AND is a - // branch node, set the hash mask - // and save the hash. + // branch node, set the hash mask and save the hash. let hash = child.as_hash().filter(|_| node_type.is_branch()); hash_mask_values.push(hash.is_some()); if let Some(hash) = hash { @@ -998,14 +1031,20 @@ impl RlpNodeBuffers { /// The aggregation of sparse trie updates. #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct SparseTrieUpdates { - updated_nodes: HashMap, - removed_nodes: HashSet, + pub(crate) updated_nodes: HashMap, + pub(crate) removed_nodes: HashSet, + pub(crate) wiped: bool, +} + +impl SparseTrieUpdates { + /// Create new wiped sparse trie updates. + pub fn wiped() -> Self { + Self { wiped: true, ..Default::default() } + } } #[cfg(test)] mod tests { - use std::collections::BTreeMap; - use super::*; use alloy_primitives::{map::HashSet, U256}; use alloy_rlp::Encodable; @@ -1027,6 +1066,7 @@ mod tests { proof::{ProofNodes, ProofRetainer}, HashBuilder, }; + use std::collections::BTreeMap; /// Pad nibbles to the length of a B256 hash with zeros on the left. fn pad_nibbles_left(nibbles: Nibbles) -> Nibbles { @@ -1560,7 +1600,7 @@ mod tests { TrieMask::new(0b11), )); - let mut sparse = RevealedSparseTrie::from_root(branch.clone()).unwrap(); + let mut sparse = RevealedSparseTrie::from_root(branch.clone(), false).unwrap(); // Reveal a branch node and one of its children // @@ -1722,6 +1762,7 @@ mod tests { .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + false, ) .unwrap(); @@ -1796,6 +1837,7 @@ mod tests { .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + false, ) .unwrap(); @@ -1866,6 +1908,7 @@ mod tests { .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + false, ) .unwrap(); @@ -1993,4 +2036,44 @@ mod tests { assert_eq!(sparse_root, hash_builder.root()); assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); } + + #[test] + fn sparse_trie_wipe() { + let mut sparse = RevealedSparseTrie::default().with_updates(true); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + // Extension (Key = 5) – Level 0 + // └── Branch (Mask = 1011) – Level 1 + // ├── 0 -> Extension (Key = 23) – Level 2 + // │ └── Branch (Mask = 0101) – Level 3 + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) – Level 4 + // │ └── 3 -> Leaf (Key = 3, Path = 50233) – Level 4 + // ├── 2 -> Leaf (Key = 013, Path = 52013) – Level 2 + // └── 3 -> Branch (Mask = 0101) – Level 2 + // ├── 1 -> Leaf (Key = 3102, Path = 53102) – Level 3 + // └── 3 -> Branch (Mask = 1010) – Level 3 + // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 + // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); + + sparse.wipe(); + + assert_eq!(sparse.root(), EMPTY_ROOT_HASH); + } } diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 30ada617146d..9b681559541f 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -44,12 +44,6 @@ metrics = { workspace = true, optional = true } # `test-utils` feature triehash = { version = "0.8", optional = true } -# `serde` feature -serde = { workspace = true, optional = true } - -# `serde-bincode-compat` feature -serde_with = { workspace = true, optional = true } - [dev-dependencies] # reth reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } @@ -63,29 +57,21 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true serde_json.workspace = true criterion.workspace = true -bincode.workspace = true [features] metrics = ["reth-metrics", "dep:metrics"] serde = [ - "dep:serde", - "alloy-consensus/serde", "alloy-primitives/serde", - "revm/serde", + "alloy-consensus/serde", "alloy-trie/serde", - "reth-primitives-traits/serde" -] -serde-bincode-compat = [ - "serde_with", - "reth-primitives/serde-bincode-compat", - "alloy-consensus/serde-bincode-compat", - "reth-primitives-traits/serde-bincode-compat" + "revm/serde", + "reth-trie-common/serde" ] test-utils = [ "triehash", - "reth-trie-common/test-utils", - "reth-primitives/test-utils", "revm/test-utils", + "reth-primitives/test-utils", + "reth-trie-common/test-utils", "reth-stages-types/test-utils", "reth-primitives-traits/test-utils" ] @@ -95,10 +81,6 @@ scroll = [ "reth-trie-common/scroll" ] -[[bench]] -name = "prefix_set" -harness = false - [[bench]] name = "hash_post_state" harness = false diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index 7521bb1b2bc5..e0689d450873 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -3,10 +3,9 @@ use crate::{ forward_cursor::ForwardInMemoryCursor, HashedAccountsSorted, HashedPostStateSorted, HashedStorageSorted, }; -use alloy_primitives::{B256, U256}; +use alloy_primitives::{map::HashSet, B256, U256}; use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; -use std::collections::HashSet; /// The hashed cursor factory for the post state. #[derive(Clone, Debug)] diff --git a/crates/trie/trie/src/input.rs b/crates/trie/trie/src/input.rs index 18f9ada2f4ab..ea71558c2c1f 100644 --- a/crates/trie/trie/src/input.rs +++ b/crates/trie/trie/src/input.rs @@ -1,7 +1,7 @@ use crate::{prefix_set::TriePrefixSetsMut, updates::TrieUpdates, HashedPostState}; /// Inputs for trie-related computations. -#[derive(Default, Debug)] +#[derive(Default, Debug, Clone)] pub struct TrieInput { /// The collection of cached in-memory intermediate trie nodes that /// can be reused for computation. diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index 26bdc751124f..1e7eeb9b52b8 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -13,14 +13,6 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -/// Constants related to the trie computation. -mod constants; -pub use constants::*; - -/// The implementation of a container for storing intermediate changes to a trie. -/// The container indicates when the trie has been modified. -pub mod prefix_set; - /// The implementation of forward-only in-memory cursor. pub mod forward_cursor; @@ -54,9 +46,6 @@ pub mod witness; mod trie; pub use trie::{StateRoot, StorageRoot}; -/// Buffer for trie updates. -pub mod updates; - /// Utilities for state root checkpoint progress. mod progress; pub use progress::{IntermediateStateRootState, StateRootProgress}; @@ -67,17 +56,6 @@ pub mod stats; // re-export for convenience pub use reth_trie_common::*; -/// Bincode-compatible serde implementations for trie types. -/// -/// `bincode` crate allows for more efficient serialization of trie types, because it allows -/// non-string map keys. -/// -/// Read more: -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] -pub mod serde_bincode_compat { - pub use super::updates::serde_bincode_compat as updates; -} - /// Trie calculation metrics. #[cfg(feature = "metrics")] pub mod metrics; diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index 895a3de153dc..34315416cb8d 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -103,7 +103,10 @@ where let retainer = targets.keys().map(Nibbles::unpack).collect(); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); - let mut storages = HashMap::default(); + // Initialize all storage multiproofs as empty. + // Storage multiproofs for non empty tries will be overwritten if necessary. + let mut storages: HashMap<_, _> = + targets.keys().map(|key| (*key, StorageMultiProof::empty())).collect(); let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::new(walker, hashed_account_cursor); while let Some(account_node) = account_node_iter.try_next()? { @@ -132,6 +135,8 @@ where account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + + // Overwrite storage multiproof. storages.insert(hashed_address, storage_multiproof); } } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 0a69dcf9da80..d2536f41d51d 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -2,15 +2,16 @@ use crate::{ prefix_set::{PrefixSetMut, TriePrefixSetsMut}, Nibbles, }; -use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap, HashSet}, + Address, B256, U256, +}; use itertools::Itertools; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_primitives::Account; use revm::db::{states::CacheAccount, AccountStatus, BundleAccount}; -use std::{ - borrow::Cow, - collections::{hash_map, HashMap, HashSet}, -}; +use std::borrow::Cow; /// Representation of in-memory hashed state. #[derive(PartialEq, Eq, Clone, Default, Debug)] @@ -41,8 +42,8 @@ impl HashedPostState { }) .collect::, HashedStorage))>>(); - let mut accounts = HashMap::with_capacity(hashed.len()); - let mut storages = HashMap::with_capacity(hashed.len()); + let mut accounts = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); + let mut storages = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); for (address, (account, storage)) in hashed { accounts.insert(address, account); storages.insert(address, storage); @@ -69,8 +70,8 @@ impl HashedPostState { }) .collect::, HashedStorage))>>(); - let mut accounts = HashMap::with_capacity(hashed.len()); - let mut storages = HashMap::with_capacity(hashed.len()); + let mut accounts = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); + let mut storages = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); for (address, (account, storage)) in hashed { accounts.insert(address, account); storages.insert(address, storage); @@ -80,7 +81,10 @@ impl HashedPostState { /// Construct [`HashedPostState`] from a single [`HashedStorage`]. pub fn from_hashed_storage(hashed_address: B256, storage: HashedStorage) -> Self { - Self { accounts: HashMap::default(), storages: HashMap::from([(hashed_address, storage)]) } + Self { + accounts: HashMap::default(), + storages: HashMap::from_iter([(hashed_address, storage)]), + } } /// Set account entries on hashed state. @@ -122,7 +126,8 @@ impl HashedPostState { } // Populate storage prefix sets. - let mut storage_prefix_sets = HashMap::with_capacity(self.storages.len()); + let mut storage_prefix_sets = + HashMap::with_capacity_and_hasher(self.storages.len(), Default::default()); for (hashed_address, hashed_storage) in &self.storages { account_prefix_set.insert(Nibbles::unpack(hashed_address)); storage_prefix_sets.insert(*hashed_address, hashed_storage.construct_prefix_set()); diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 74faf7bbc60f..28517b23e90f 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -258,11 +258,8 @@ where let root = hash_builder.root(); - trie_updates.finalize( - account_node_iter.walker, - hash_builder, - self.prefix_sets.destroyed_accounts, - ); + let removed_keys = account_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, self.prefix_sets.destroyed_accounts); let stats = tracker.finish(); @@ -434,7 +431,8 @@ where let root = hash_builder.root(); let mut trie_updates = StorageTrieUpdates::default(); - trie_updates.finalize(storage_node_iter.walker, hash_builder); + let removed_keys = storage_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys); let stats = tracker.finish(); diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 4a34fd31ad11..fa59b70d1fd9 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -3,10 +3,9 @@ use crate::{ forward_cursor::ForwardInMemoryCursor, updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, }; -use alloy_primitives::B256; +use alloy_primitives::{map::HashSet, B256}; use reth_storage_errors::db::DatabaseError; use reth_trie_common::{BranchNodeCompact, Nibbles}; -use std::collections::HashSet; /// The trie cursor factory for the trie updates. #[derive(Debug, Clone)] diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index 774fa64a0efe..d1c5247966da 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -3,9 +3,8 @@ use crate::{ trie_cursor::{CursorSubNode, TrieCursor}, BranchNodeCompact, Nibbles, }; -use alloy_primitives::B256; +use alloy_primitives::{map::HashSet, B256}; use reth_storage_errors::db::DatabaseError; -use std::collections::HashSet; #[cfg(feature = "metrics")] use crate::metrics::WalkerMetrics; @@ -58,8 +57,13 @@ impl TrieWalker { /// Split the walker into stack and trie updates. pub fn split(mut self) -> (Vec, HashSet) { - let keys = self.removed_keys.take(); - (self.stack, keys.unwrap_or_default()) + let keys = self.take_removed_keys(); + (self.stack, keys) + } + + /// Take removed keys from the walker. + pub fn take_removed_keys(&mut self) -> HashSet { + self.removed_keys.take().unwrap_or_default() } /// Prints the current stack of trie nodes. diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index d2077edafff4..5ab851191843 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -98,6 +98,7 @@ where fn process_block(&mut self, block: &SealedBlockWithSenders) { let txs: Vec<_> = block .transactions() + .iter() .filter(|tx| tx.is_eip4844()) .map(|tx| (tx.clone(), tx.blob_versioned_hashes().unwrap().len())) .collect(); @@ -191,6 +192,7 @@ where for (_, block) in old.blocks().iter() { let txs: Vec = block .transactions() + .iter() .filter(|tx: &&reth::primitives::TransactionSigned| { tx.is_eip4844() }) diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 3b2301024379..ad0ae4bb2634 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -18,10 +18,7 @@ use reth::{ providers::ProviderError, revm::{ interpreter::Host, - primitives::{ - address, Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, - TransactTo, TxEnv, U256, - }, + primitives::{address, Address, Bytes, Env, EnvWithHandlerCfg, TransactTo, TxEnv, U256}, shared::BundleState, Database, DatabaseCommit, Evm, State, }, @@ -142,10 +139,7 @@ where header: &alloy_consensus::Header, total_difficulty: U256, ) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) } } diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index f6cabf47a744..ffe6e002b94c 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -53,7 +53,7 @@ async fn main() -> eyre::Result<()> { let head = notifications.next().await.unwrap(); - let tx = head.tip().transactions().next().unwrap(); + let tx = &head.tip().transactions()[0]; assert_eq!(tx.hash(), hash); println!("mined transaction: {hash}"); Ok(()) diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 16a096305a51..c769e5bd5385 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -20,11 +20,6 @@ #![cfg_attr(feature = "scroll", allow(unused_crate_dependencies))] #![cfg(not(feature = "scroll"))] -use std::{convert::Infallible, sync::Arc}; - -use serde::{Deserialize, Serialize}; -use thiserror::Error; - use alloy_eips::eip4895::Withdrawals; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; @@ -36,7 +31,7 @@ use alloy_rpc_types::{ Withdrawal, }; use reth::{ - api::PayloadTypes, + api::{InvalidPayloadAttributesError, PayloadTypes}, builder::{ components::{ComponentsBuilder, PayloadServiceBuilder}, node::{NodeTypes, NodeTypesWithEngine}, @@ -45,9 +40,13 @@ use reth::{ PayloadBuilderConfig, }, network::NetworkHandle, - primitives::EthPrimitives, + payload::ExecutionPayloadValidator, + primitives::{Block, EthPrimitives, SealedBlockFor}, providers::{CanonStateSubscriptions, EthStorage, StateProviderFactory}, - rpc::eth::EthApi, + rpc::{ + eth::EthApi, + types::engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}, + }, tasks::TaskManager, transaction_pool::TransactionPool, }; @@ -75,6 +74,9 @@ use reth_payload_builder::{ }; use reth_tracing::{RethTracer, Tracer}; use reth_trie_db::MerklePatriciaTrie; +use serde::{Deserialize, Serialize}; +use std::{convert::Infallible, sync::Arc}; +use thiserror::Error; /// A custom payload attributes type. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -174,19 +176,34 @@ impl EngineTypes for CustomEngineTypes { /// Custom engine validator #[derive(Debug, Clone)] pub struct CustomEngineValidator { - chain_spec: Arc, + inner: ExecutionPayloadValidator, +} + +impl CustomEngineValidator { + /// Instantiates a new validator. + pub const fn new(chain_spec: Arc) -> Self { + Self { inner: ExecutionPayloadValidator::new(chain_spec) } + } + + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &ChainSpec { + self.inner.chain_spec() + } } impl EngineValidator for CustomEngineValidator where T: EngineTypes, { + type Block = Block; + fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, T::PayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, payload_or_attrs) + validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } fn ensure_well_formed_attributes( @@ -194,7 +211,7 @@ where version: EngineApiMessageVersion, attributes: &T::PayloadAttributes, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into())?; + validate_version_specific_fields(self.chain_spec(), version, attributes.into())?; // custom validation logic - ensure that the custom field is not zero if attributes.custom == 0 { @@ -205,6 +222,23 @@ where Ok(()) } + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } + + fn validate_payload_attributes_against_header( + &self, + _attr: &::PayloadAttributes, + _header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + // skip default timestamp validation + Ok(()) + } } /// Custom engine validator builder @@ -221,7 +255,7 @@ where type Validator = CustomEngineValidator; async fn build(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { - Ok(CustomEngineValidator { chain_spec: ctx.config.chain.clone() }) + Ok(CustomEngineValidator::new(ctx.config.chain.clone())) } } @@ -303,7 +337,11 @@ pub struct CustomPayloadServiceBuilder; impl PayloadServiceBuilder for CustomPayloadServiceBuilder where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = CustomEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + >, >, Pool: TransactionPool + Unpin + 'static, { diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 5eeb3ddea006..4958cb44ce29 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -36,7 +36,7 @@ use reth_node_ethereum::{ node::{EthereumAddOns, EthereumPayloadBuilder}, BasicBlockExecutorProvider, EthExecutionStrategyFactory, EthereumNode, }; -use reth_primitives::TransactionSigned; +use reth_primitives::{EthPrimitives, TransactionSigned}; use reth_tracing::{RethTracer, Tracer}; use std::{convert::Infallible, sync::Arc}; @@ -184,7 +184,7 @@ pub struct MyPayloadBuilder { impl PayloadServiceBuilder for MyPayloadBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, Types::Engine: PayloadTypes< diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index 7bdcc00f0415..80cba50a8438 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -10,6 +10,7 @@ use reth::{ builder::{components::PoolBuilder, BuilderContext, FullNodeTypes}, chainspec::ChainSpec, cli::Cli, + primitives::EthPrimitives, providers::CanonStateSubscriptions, transaction_pool::{ blobstore::InMemoryBlobStore, EthTransactionPool, TransactionValidationTaskExecutor, @@ -50,7 +51,7 @@ pub struct CustomPoolBuilder { /// This will be used to build the transaction pool and its maintenance tasks during launch. impl PoolBuilder for CustomPoolBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type Pool = EthTransactionPool; diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index 2e264d017a3b..da48a0754f9c 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -9,7 +9,7 @@ use reth::{ use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder, PayloadConfig}; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::{PayloadBuilderError, PayloadJobGenerator}; -use reth_primitives::SealedHeader; +use reth_primitives::{BlockExt, SealedHeader}; use std::sync::Arc; /// The generator type that creates new jobs that builds empty blocks. @@ -48,7 +48,11 @@ impl EmptyBlockPayloadJobGenerator PayloadJobGenerator for EmptyBlockPayloadJobGenerator where - Client: StateProviderFactory + BlockReaderIdExt + Clone + Unpin + 'static, + Client: StateProviderFactory + + BlockReaderIdExt + + Clone + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + Unpin + 'static, Builder: PayloadBuilder + Unpin + 'static, diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index 54026655fe76..81ecf29270e7 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -27,6 +27,7 @@ use reth_chainspec::ChainSpec; use reth_node_api::NodeTypesWithEngine; use reth_node_ethereum::{node::EthereumAddOns, EthEngineTypes, EthEvmConfig, EthereumNode}; use reth_payload_builder::PayloadBuilderService; +use reth_primitives::EthPrimitives; pub mod generator; pub mod job; @@ -37,7 +38,13 @@ pub struct CustomPayloadBuilder; impl PayloadServiceBuilder for CustomPayloadBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + >, + >, Pool: TransactionPool + Unpin + 'static, { async fn spawn_payload_service( diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 988e75b60de7..7bc89b2a8f46 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -8,7 +8,7 @@ use reth_chainspec::ChainSpecBuilder; use reth_db::{open_db_read_only, DatabaseEnv}; use reth_node_ethereum::EthereumNode; use reth_node_types::NodeTypesWithDBAdapter; -use reth_primitives::{SealedHeader, TransactionSigned}; +use reth_primitives::{BlockExt, SealedHeader, TransactionSigned}; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, StateProvider, TransactionsProvider, @@ -123,7 +123,10 @@ fn txs_provider_example } /// The `BlockReader` allows querying the headers-related tables. -fn block_provider_example(provider: T, number: u64) -> eyre::Result<()> { +fn block_provider_example>( + provider: T, + number: u64, +) -> eyre::Result<()> { // Can query a block by number let block = provider.block(number.into())?.ok_or(eyre::eyre!("block num not found"))?; assert_eq!(block.number, number); @@ -167,7 +170,9 @@ fn block_provider_example(provider: T, number: u64) -> eyre::Res /// The `ReceiptProvider` allows querying the receipts tables. fn receipts_provider_example< - T: ReceiptProvider + TransactionsProvider + HeaderProvider, + T: ReceiptProvider + + TransactionsProvider + + HeaderProvider, >( provider: T, ) -> eyre::Result<()> { diff --git a/examples/rpc-db/src/myrpc_ext.rs b/examples/rpc-db/src/myrpc_ext.rs index e38b6fc24d37..6cc7a4142f5f 100644 --- a/examples/rpc-db/src/myrpc_ext.rs +++ b/examples/rpc-db/src/myrpc_ext.rs @@ -22,7 +22,7 @@ pub struct MyRpcExt { impl MyRpcExtApiServer for MyRpcExt where - Provider: BlockReaderIdExt + 'static, + Provider: BlockReaderIdExt + 'static, { /// Showcasing how to implement a custom rpc method /// using the provider. diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index f108fd9654e9..654c526be895 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -24,6 +24,7 @@ secp256k1 = { workspace = true, features = ["rand"] } [dev-dependencies] alloy-eips.workspace = true +reth-primitives-traits .workspace = true [features] scroll = ["reth-primitives/scroll"] diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 8cf679153353..ae45f0b6fc08 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -462,6 +462,7 @@ mod tests { use alloy_eips::eip2930::AccessList; use alloy_primitives::{hex, PrimitiveSignature as Signature}; use reth_primitives::public_key_to_address; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; #[test]