diff --git a/bin/rundler/src/cli/builder.rs b/bin/rundler/src/cli/builder.rs index b8ebb6182..de832dfdc 100644 --- a/bin/rundler/src/cli/builder.rs +++ b/bin/rundler/src/cli/builder.rs @@ -333,6 +333,9 @@ impl BuilderArgs { let sender_args = self.sender_args(&chain_spec, &rpc_url)?; + let da_gas_tracking_enabled = + super::lint_da_gas_tracking(common.da_gas_tracking_enabled, &chain_spec); + Ok(BuilderTaskArgs { entry_points, chain_spec, @@ -353,6 +356,7 @@ impl BuilderArgs { max_cancellation_fee_increases: self.max_cancellation_fee_increases, max_replacement_underpriced_blocks: self.max_replacement_underpriced_blocks, remote_address, + da_gas_tracking_enabled, }) } diff --git a/bin/rundler/src/cli/mod.rs b/bin/rundler/src/cli/mod.rs index c84055873..ab064b60c 100644 --- a/bin/rundler/src/cli/mod.rs +++ b/bin/rundler/src/cli/mod.rs @@ -39,7 +39,7 @@ use rundler_sim::{ EstimationSettings, PrecheckSettings, PriorityFeeMode, SimulationSettings, MIN_CALL_GAS_LIMIT, }; use rundler_types::{ - chain::ChainSpec, v0_6::UserOperation as UserOperationV0_6, + chain::ChainSpec, da::DAGasOracleContractType, v0_6::UserOperation as UserOperationV0_6, v0_7::UserOperation as UserOperationV0_7, }; @@ -328,6 +328,14 @@ pub struct CommonArgs { global = true )] pub num_builders_v0_7: u64, + + #[arg( + long = "da_gas_tracking_enabled", + name = "da_gas_tracking_enabled", + env = "DA_GAS_TRACKING_ENABLED", + default_value = "false" + )] + pub da_gas_tracking_enabled: bool, } const SIMULATION_GAS_OVERHEAD: u64 = 100_000; @@ -582,3 +590,21 @@ pub fn construct_providers( ep_v0_7, }) } + +fn lint_da_gas_tracking(da_gas_tracking_enabled: bool, chain_spec: &ChainSpec) -> bool { + if !da_gas_tracking_enabled { + return false; + } + + if !chain_spec.da_pre_verification_gas { + tracing::warn!("DA tracking is disabled because DA pre-verification gas is not enabled"); + false + } else if !(chain_spec.da_gas_oracle_contract_type == DAGasOracleContractType::CachedNitro + || chain_spec.da_gas_oracle_contract_type == DAGasOracleContractType::LocalBedrock) + { + tracing::warn!("DA tracking is disabled because DA gas oracle contract type {:?} does not support caching", chain_spec.da_gas_oracle_contract_type); + false + } else { + true + } +} diff --git a/bin/rundler/src/cli/pool.rs b/bin/rundler/src/cli/pool.rs index 107441c6a..a90167355 100644 --- a/bin/rundler/src/cli/pool.rs +++ b/bin/rundler/src/cli/pool.rs @@ -201,7 +201,9 @@ impl PoolArgs { }; tracing::info!("Mempool channel configs: {:?}", mempool_channel_configs); - let chain_id = chain_spec.id; + let da_gas_tracking_enabled = + super::lint_da_gas_tracking(common.da_gas_tracking_enabled, &chain_spec); + let pool_config_base = PoolConfig { // update per entry point entry_point: Address::ZERO, @@ -209,7 +211,7 @@ impl PoolArgs { num_shards: 0, mempool_channel_configs: HashMap::new(), // Base config - chain_id, + chain_spec: chain_spec.clone(), same_sender_mempool_count: self.same_sender_mempool_count, min_replacement_fee_increase_percentage: self.min_replacement_fee_increase_percentage, max_size_of_pool_bytes: self.max_size_in_bytes, @@ -223,6 +225,7 @@ impl PoolArgs { paymaster_cache_length: self.paymaster_cache_length, reputation_tracking_enabled: self.reputation_tracking_enabled, drop_min_num_blocks: self.drop_min_num_blocks, + da_gas_tracking_enabled, }; let mut pool_configs = vec![]; diff --git a/crates/builder/src/bundle_proposer.rs b/crates/builder/src/bundle_proposer.rs index 7f834c4bc..b6dee4f97 100644 --- a/crates/builder/src/bundle_proposer.rs +++ b/crates/builder/src/bundle_proposer.rs @@ -32,11 +32,12 @@ use rundler_provider::{ BundleHandler, DAGasProvider, EntryPoint, EvmProvider, HandleOpsOut, SignatureAggregator, }; use rundler_sim::{ - gas, ExpectedStorage, FeeEstimator, PriorityFeeMode, SimulationError, SimulationResult, - Simulator, ViolationError, + ExpectedStorage, FeeEstimator, PriorityFeeMode, SimulationError, SimulationResult, Simulator, + ViolationError, }; use rundler_types::{ chain::ChainSpec, + da::DAGasBlockData, pool::{Pool, PoolOperation, SimulationViolation}, Entity, EntityInfo, EntityInfos, EntityType, EntityUpdate, EntityUpdateType, GasFees, Timestamp, UserOperation, UserOperationVariant, UserOpsPerAggregator, BUNDLE_BYTE_OVERHEAD, @@ -44,7 +45,7 @@ use rundler_types::{ }; use rundler_utils::{emit::WithEntryPoint, math}; use tokio::{sync::broadcast, try_join}; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use crate::emit::{BuilderEvent, ConditionNotMetReason, OpRejectionReason, SkipReason}; @@ -155,6 +156,7 @@ pub(crate) struct Settings { pub(crate) beneficiary: Address, pub(crate) bundle_priority_fee_overhead_percent: u32, pub(crate) priority_fee_mode: PriorityFeeMode, + pub(crate) da_gas_tracking_enabled: bool, } #[async_trait] @@ -200,7 +202,7 @@ where return Err(BundleProposerError::NoOperationsInitially); } - tracing::debug!("Starting bundle proposal with {} ops", ops.len()); + debug!("Starting bundle proposal with {} ops", ops.len()); // (0) Determine fees required for ops to be included in a bundle // if replacing, just require bundle fees increase chances of unsticking @@ -214,10 +216,30 @@ where .filter_map(|op| op.uo.paymaster()) .collect::>(); + let da_block_data = if self.settings.da_gas_tracking_enabled { + match self.entry_point.block_data(block_hash.into()).await { + Ok(block_data) => Some(block_data), + Err(e) => { + error!("Failed to get block data for block hash {block_hash:?}, falling back to async da gas calculations: {e:?}"); + None + } + } + } else { + None + }; + // (1) Filter out ops that don't pay enough to be included let fee_futs = ops .into_iter() - .map(|op| self.check_fees(op, block_hash, base_fee, required_op_fees)) + .map(|op| { + self.check_fees( + op, + block_hash, + da_block_data.as_ref(), + base_fee, + required_op_fees, + ) + }) .collect::>(); let ops = future::join_all(fee_futs) .await @@ -233,7 +255,7 @@ where // (2) Limit the amount of operations for simulation let (ops, gas_limit) = self.limit_user_operations_for_simulation(ops); - tracing::debug!( + debug!( "Bundle proposal after gas limit had {} ops and {:?} gas limit", ops.len(), gas_limit @@ -340,12 +362,14 @@ where // Check fees for a single user op. Returns None if the op should be skipped. // // Filters on: - // - insufficient gas fees - // - insufficient pre-verification gas + // - Insufficient gas fees + // - Insufficient pre-verification gas. The initial PVG check is done when the block is updated in the mempool. However, + // that check uses the initial gas fee estimate, whereas this check uses the gas fees specifically from this bundle. async fn check_fees( &self, op: PoolOperation, block_hash: B256, + da_block_data: Option<&DAGasBlockData>, base_fee: u128, required_op_fees: GasFees, ) -> Option { @@ -369,32 +393,52 @@ where return None; } - // Check if the pvg is enough - let required_pvg = match gas::calc_required_pre_verification_gas( - &self.settings.chain_spec, - &self.entry_point, - op.uo.as_ref(), - block_hash.into(), - base_fee, - ) - .await - { - Ok(pvg) => pvg, - Err(e) => { - error!("Failed to calculate required pre-verification gas for op: {e:?}, skipping"); - self.emit(BuilderEvent::skipped_op( - self.builder_index, - op_hash, - SkipReason::Other { - reason: Arc::new(format!( - "Failed to calculate required pre-verification gas for op: {e:?}, skipping" - )), - }, - )); - return None; + if !self.settings.chain_spec.da_pre_verification_gas { + // Skip PVG check if no da pre-verification gas as this is checked on entry to the mempool. + return Some(op); + } + + let required_da_gas = if self.settings.da_gas_tracking_enabled && da_block_data.is_some() { + let da_block_data = da_block_data.unwrap(); + self.entry_point.calc_da_gas_sync( + &op.da_gas_data, + da_block_data, + required_op_fees.max_fee_per_gas, + ) + } else { + match self + .entry_point + .calc_da_gas( + op.uo.clone().into(), + block_hash.into(), + required_op_fees.max_fee_per_gas, + ) + .await + { + Ok((required_da_gas, _, _)) => required_da_gas, + Err(e) => { + error!( + "Failed to calculate required pre-verification gas for op: {e:?}, skipping" + ); + self.emit(BuilderEvent::skipped_op( + self.builder_index, + op_hash, + SkipReason::Other { + reason: Arc::new(format!( + "Failed to calculate required pre-verification gas for op: {e:?}, skipping" + )), + }, + )); + return None; + } } }; + // This assumes a bundle size of 1 + let required_pvg = + op.uo + .required_pre_verification_gas(&self.settings.chain_spec, 1, required_da_gas); + if op.uo.pre_verification_gas() < required_pvg { self.emit(BuilderEvent::skipped_op( self.builder_index, @@ -1645,49 +1689,52 @@ mod tests { assert!(bundle.rejected_ops.is_empty()); } - #[tokio::test] - async fn test_drops_but_not_rejects_op_with_too_low_pvg() { - let base_fee = 1000; - let max_priority_fee_per_gas = 50; - let mut op1 = op_with_sender_and_fees(address(1), 1054, 55); - op1.pre_verification_gas = 0; // Should be dropped but not rejected - let op2 = op_with_sender_and_fees(address(2), 1055, 55); - let bundle = mock_make_bundle( - vec![ - MockOp { - op: op1.clone(), - simulation_result: Box::new(|| Ok(SimulationResult::default())), - }, - MockOp { - op: op2.clone(), - simulation_result: Box::new(|| Ok(SimulationResult::default())), - }, - ], - vec![], - vec![HandleOpsOut::Success], - vec![], - base_fee, - max_priority_fee_per_gas, - false, - ExpectedStorage::default(), - ) - .await; - assert_eq!( - bundle.gas_fees, - GasFees { - max_fee_per_gas: 1050, - max_priority_fee_per_gas: 50, - } - ); - assert_eq!( - bundle.ops_per_aggregator, - vec![UserOpsPerAggregator { - user_ops: vec![op2], - ..Default::default() - }], - ); - assert!(bundle.rejected_ops.is_empty()); - } + // TODO(danc): This test is now longer valid as we only recheck PVG for + // chains with external DA. We should add tests for that, but will require adding + // mock calls to the DA provider. + // #[tokio::test] + // async fn test_drops_but_not_rejects_op_with_too_low_pvg() { + // let base_fee = 1000; + // let max_priority_fee_per_gas = 50; + // let mut op1 = op_with_sender_and_fees(address(1), 1054, 55); + // op1.pre_verification_gas = 0; // Should be dropped but not rejected + // let op2 = op_with_sender_and_fees(address(2), 1055, 55); + // let bundle = mock_make_bundle( + // vec![ + // MockOp { + // op: op1.clone(), + // simulation_result: Box::new(|| Ok(SimulationResult::default())), + // }, + // MockOp { + // op: op2.clone(), + // simulation_result: Box::new(|| Ok(SimulationResult::default())), + // }, + // ], + // vec![], + // vec![HandleOpsOut::Success], + // vec![], + // base_fee, + // max_priority_fee_per_gas, + // false, + // ExpectedStorage::default(), + // ) + // .await; + // assert_eq!( + // bundle.gas_fees, + // GasFees { + // max_fee_per_gas: 1050, + // max_priority_fee_per_gas: 50, + // } + // ); + // assert_eq!( + // bundle.ops_per_aggregator, + // vec![UserOpsPerAggregator { + // user_ops: vec![op2], + // ..Default::default() + // }], + // ); + // assert!(bundle.rejected_ops.is_empty()); + // } #[tokio::test] async fn test_aggregators() { @@ -2279,6 +2326,7 @@ mod tests { valid_time_range: ValidTimeRange::default(), entity_infos: EntityInfos::default(), aggregator: None, + da_gas_data: Default::default(), }) .collect(); @@ -2374,6 +2422,7 @@ mod tests { beneficiary, priority_fee_mode: PriorityFeeMode::PriorityFeeIncreasePercent(0), bundle_priority_fee_overhead_percent: 0, + da_gas_tracking_enabled: false, }, event_sender, ); diff --git a/crates/builder/src/bundle_sender.rs b/crates/builder/src/bundle_sender.rs index b29c90300..a7e47a053 100644 --- a/crates/builder/src/bundle_sender.rs +++ b/crates/builder/src/bundle_sender.rs @@ -1018,7 +1018,7 @@ impl Trigger for BundleSenderTrigger { a = self.bundle_action_receiver.recv() => { match a { Some(BundleSenderAction::ChangeMode(mode)) => { - debug!("changing bundling mode to {mode:?}"); + info!("changing bundling mode to {mode:?}"); self.bundling_mode = mode; continue; }, diff --git a/crates/builder/src/task.rs b/crates/builder/src/task.rs index 5181e6b0b..3ac494e23 100644 --- a/crates/builder/src/task.rs +++ b/crates/builder/src/task.rs @@ -87,6 +87,8 @@ pub struct Args { pub remote_address: Option, /// Entry points to start builders for pub entry_points: Vec, + /// Enable DA tracking + pub da_gas_tracking_enabled: bool, } /// Builder settings for an entrypoint @@ -360,6 +362,7 @@ where beneficiary, priority_fee_mode: self.args.priority_fee_mode, bundle_priority_fee_overhead_percent: self.args.bundle_priority_fee_overhead_percent, + da_gas_tracking_enabled: self.args.da_gas_tracking_enabled, }; let transaction_sender = self diff --git a/crates/pool/Cargo.toml b/crates/pool/Cargo.toml index 854ce9cf2..72d8d9ba0 100644 --- a/crates/pool/Cargo.toml +++ b/crates/pool/Cargo.toml @@ -47,6 +47,7 @@ mockall.workspace = true reth-tasks.workspace = true rundler-provider = { workspace = true, features = ["test-utils"] } rundler-sim = { workspace = true, features = ["test-utils"] } +rundler-types = { workspace = true, features = ["test-utils"] } [build-dependencies] tonic-build.workspace = true diff --git a/crates/pool/proto/op_pool/op_pool.proto b/crates/pool/proto/op_pool/op_pool.proto index c07c74c3b..83345b6ed 100644 --- a/crates/pool/proto/op_pool/op_pool.proto +++ b/crates/pool/proto/op_pool/op_pool.proto @@ -153,6 +153,29 @@ message MempoolOp { bool account_is_staked = 7; // The entry point address of this operation bytes entry_point = 8; + // The DA gas data for the UO + DaGasUoData da_gas_data = 9; +} + +// Data associated with a user operation for DA gas calculations +message DaGasUoData { + oneof data { + EmptyUoData empty = 1; + NitroDaGasUoData nitro = 2; + BedrockDaGasUoData bedrock = 3; + } +} + +message EmptyUoData {} + +// Data associated with a user operation for Nitro DA gas calculations +message NitroDaGasUoData { + bytes uo_units = 1; +} + +// Data associated with a user operation for Bedrock DA gas calculations +message BedrockDaGasUoData { + uint64 uo_units = 1; } // Defines the gRPC endpoints for a UserOperation mempool service diff --git a/crates/pool/src/emit.rs b/crates/pool/src/emit.rs index 554591f3d..e635bab15 100644 --- a/crates/pool/src/emit.rs +++ b/crates/pool/src/emit.rs @@ -56,6 +56,17 @@ pub enum OpPoolEvent { /// The throttled entity entity: Entity, }, + /// DA data was updated for an operation + UpdatedDAData { + /// The operation hash + op_hash: B256, + /// The DA data + eligible: bool, + /// The required pre_verification_gas + required_pvg: u128, + /// The actual pre_verification_gas + actual_pvg: u128, + }, } /// Summary of the entities associated with an operation @@ -192,6 +203,24 @@ impl Display for OpPoolEvent { OpPoolEvent::ThrottledEntity { entity } => { write!(f, concat!("Throttled entity.", " Entity: {}",), entity,) } + OpPoolEvent::UpdatedDAData { + op_hash, + eligible, + required_pvg, + actual_pvg, + } => { + write!( + f, + concat!( + "Updated DA data for op: ", + "Hash: {:?} ", + "Eligible: {} ", + "Required PVG: {} ", + "Actual PVG: {}", + ), + op_hash, eligible, required_pvg, actual_pvg, + ) + } } } } diff --git a/crates/pool/src/mempool/mod.rs b/crates/pool/src/mempool/mod.rs index 2fa4af045..459126ed8 100644 --- a/crates/pool/src/mempool/mod.rs +++ b/crates/pool/src/mempool/mod.rs @@ -33,6 +33,7 @@ use alloy_primitives::{Address, B256}; use mockall::automock; use rundler_sim::{MempoolConfig, PrecheckSettings, SimulationSettings}; use rundler_types::{ + chain::ChainSpec, pool::{ MempoolError, PaymasterMetadata, PoolOperation, Reputation, ReputationStatus, StakeStatus, }, @@ -124,12 +125,12 @@ pub trait Mempool: Send + Sync { /// Config for the mempool #[derive(Debug, Clone)] pub struct PoolConfig { + /// Chain specification + pub chain_spec: ChainSpec, /// Address of the entry point this pool targets pub entry_point: Address, /// Version of the entry point this pool targets pub entry_point_version: EntryPointVersion, - /// Chain ID this pool targets - pub chain_id: u64, /// The maximum number of operations an unstaked sender can have in the mempool pub same_sender_mempool_count: usize, /// The minimum fee bump required to replace an operation in the mempool @@ -162,6 +163,8 @@ pub struct PoolConfig { pub paymaster_cache_length: u32, /// Boolean field used to toggle the operation of the reputation tracker pub reputation_tracking_enabled: bool, + /// Boolean field used to toggle the operation of the DA tracker + pub da_gas_tracking_enabled: bool, /// The minimum number of blocks a user operation must be in the mempool before it can be dropped pub drop_min_num_blocks: u64, } @@ -227,6 +230,7 @@ mod tests { is_staked: false, }), }, + da_gas_data: Default::default(), }; let entities = po.entities().collect::>(); diff --git a/crates/pool/src/mempool/paymaster.rs b/crates/pool/src/mempool/paymaster.rs index bf1c0e283..cc7270d94 100644 --- a/crates/pool/src/mempool/paymaster.rs +++ b/crates/pool/src/mempool/paymaster.rs @@ -529,6 +529,7 @@ mod tests { sim_block_number: 0, account_is_staked: true, entity_infos: EntityInfos::default(), + da_gas_data: rundler_types::da::DAGasUOData::Empty, } } diff --git a/crates/pool/src/mempool/pool.rs b/crates/pool/src/mempool/pool.rs index e976b2093..c67bbb366 100644 --- a/crates/pool/src/mempool/pool.rs +++ b/crates/pool/src/mempool/pool.rs @@ -22,56 +22,65 @@ use alloy_primitives::{Address, B256}; use anyhow::Context; use metrics::{Gauge, Histogram}; use metrics_derive::Metrics; +use parking_lot::RwLock; +use rundler_provider::DAGasProvider; use rundler_types::{ + chain::ChainSpec, + da::DAGasBlockData, pool::{MempoolError, PoolOperation}, Entity, EntityType, GasFees, Timestamp, UserOperation, UserOperationId, UserOperationVariant, }; -use rundler_utils::math; +use rundler_utils::{emit::WithEntryPoint, math}; +use tokio::sync::broadcast; use tracing::{info, warn}; use super::{entity_tracker::EntityCounter, size::SizeTracker, MempoolResult, PoolConfig}; -use crate::chain::MinedOp; +use crate::{chain::MinedOp, emit::OpRemovalReason, PoolEvent}; #[derive(Debug, Clone)] pub(crate) struct PoolInnerConfig { + chain_spec: ChainSpec, entry_point: Address, - chain_id: u64, max_size_of_pool_bytes: usize, min_replacement_fee_increase_percentage: u32, throttled_entity_mempool_count: u64, throttled_entity_live_blocks: u64, + da_gas_tracking_enabled: bool, } impl From for PoolInnerConfig { fn from(config: PoolConfig) -> Self { Self { + chain_spec: config.chain_spec, entry_point: config.entry_point, - chain_id: config.chain_id, max_size_of_pool_bytes: config.max_size_of_pool_bytes, min_replacement_fee_increase_percentage: config.min_replacement_fee_increase_percentage, throttled_entity_mempool_count: config.throttled_entity_mempool_count, throttled_entity_live_blocks: config.throttled_entity_live_blocks, + da_gas_tracking_enabled: config.da_gas_tracking_enabled, } } } /// Pool of user operations #[derive(Debug)] -pub(crate) struct PoolInner { +pub(crate) struct PoolInner { /// Pool settings config: PoolInnerConfig, + /// DA Gas Provider + da_gas_provider: D, /// Operations by hash - by_hash: HashMap, + by_hash: HashMap>, /// Operations by operation ID - by_id: HashMap, + by_id: HashMap>, /// Best operations, sorted by gas price - best: BTreeSet, + best: BTreeSet>, /// Time to mine info time_to_mine: HashMap, /// Removed operations, temporarily kept around in case their blocks are /// reorged away. Stored along with the block number at which it was /// removed. - mined_at_block_number_by_hash: HashMap, + mined_at_block_number_by_hash: HashMap, u64)>, /// Removed operation hashes sorted by block number, so we can forget them /// when enough new blocks have passed. mined_hashes_with_block_numbers: BTreeSet<(u64, B256)>, @@ -89,13 +98,23 @@ pub(crate) struct PoolInner { prev_block_number: u64, /// The metrics of pool. metrics: PoolMetrics, + /// Event sender + event_sender: broadcast::Sender>, } -impl PoolInner { - pub(crate) fn new(config: PoolInnerConfig) -> Self { +impl PoolInner +where + D: DAGasProvider, +{ + pub(crate) fn new( + config: PoolInnerConfig, + da_gas_provider: D, + event_sender: broadcast::Sender>, + ) -> Self { let entry_point = config.entry_point.to_string(); Self { config, + da_gas_provider, by_hash: HashMap::new(), by_id: HashMap::new(), best: BTreeSet::new(), @@ -109,9 +128,17 @@ impl PoolInner { prev_sys_block_time: Duration::default(), prev_block_number: 0, metrics: PoolMetrics::new_with_labels(&[("entry_point", entry_point)]), + event_sender, } } + fn emit(&self, event: PoolEvent) { + let _ = self.event_sender.send(WithEntryPoint { + entry_point: self.config.entry_point, + event, + }); + } + /// Returns hash of operation to replace if operation is a replacement pub(crate) fn check_replacement( &self, @@ -120,7 +147,7 @@ impl PoolInner { // Check if operation already known if self .by_hash - .contains_key(&op.hash(self.config.entry_point, self.config.chain_id)) + .contains_key(&op.hash(self.config.entry_point, self.config.chain_spec.id)) { return Err(MempoolError::OperationAlreadyKnown); } @@ -141,21 +168,57 @@ impl PoolInner { Ok(Some( pool_op .uo() - .hash(self.config.entry_point, self.config.chain_id), + .hash(self.config.entry_point, self.config.chain_spec.id), )) } else { Ok(None) } } - pub(crate) fn add_operation(&mut self, op: PoolOperation) -> MempoolResult { - let ret = self.add_operation_internal(Arc::new(op), None); + pub(crate) fn add_operation( + &mut self, + op: PoolOperation, + required_pvg: u128, + ) -> MempoolResult { + // only eligibility criteria is required PVG which is enabled when da_gas_tracking is enabled + let is_eligible = if self.config.da_gas_tracking_enabled { + if op.uo.pre_verification_gas() >= required_pvg { + self.emit(PoolEvent::UpdatedDAData { + op_hash: op + .uo + .hash(self.config.entry_point, self.config.chain_spec.id), + eligible: false, + required_pvg, + actual_pvg: op.uo.pre_verification_gas(), + }); + false + } else { + true + } + } else { + true + }; + + // only eligibility requirement is if the op has required pvg + let pool_op = Arc::new(OrderedPoolOperation::new( + Arc::new(op), + self.next_submission_id(), + is_eligible, + )); + + let hash = self.add_operation_internal(pool_op)?; self.update_metrics(); - ret + Ok(hash) } - pub(crate) fn best_operations(&self) -> impl Iterator> { - self.best.clone().into_iter().map(|v| v.po) + pub(crate) fn best_operations(&self) -> impl Iterator> + '_ { + self.best.iter().filter_map(|p| { + if p.eligible() { + Some(p.po.clone()) + } else { + None + } + }) } /// Does maintenance on the pool. @@ -169,9 +232,10 @@ impl PoolInner { &mut self, block_number: u64, block_timestamp: Timestamp, + block_da_data: Option<&DAGasBlockData>, candidate_gas_fees: GasFees, base_fee: u128, - ) -> Vec<(B256, Timestamp)> { + ) { let sys_block_time = SystemTime::now() .duration_since(UNIX_EPOCH) .expect("time should be after epoch"); @@ -181,36 +245,83 @@ impl PoolInner { let candidate_gas_price = base_fee + candidate_gas_fees.max_priority_fee_per_gas; let mut expired = Vec::new(); let mut num_candidates = 0; + let mut events = vec![]; for (hash, op) in &mut self.by_hash { if op.po.valid_time_range.valid_until < block_timestamp { - expired.push((*hash, op.po.valid_time_range.valid_until)); + events.push(PoolEvent::RemovedOp { + op_hash: *hash, + reason: OpRemovalReason::Expired { + valid_until: op.po.valid_time_range.valid_until, + }, + }); + expired.push(*hash); + continue; + } + + if self.config.da_gas_tracking_enabled && block_da_data.is_some() { + let block_da_data = block_da_data.unwrap(); + let required_da_gas = self.da_gas_provider.calc_da_gas_sync( + &op.po.da_gas_data, + block_da_data, + candidate_gas_price, + ); + + let required_pvg = op.uo().required_pre_verification_gas( + &self.config.chain_spec, + 1, + required_da_gas, + ); + let actual_pvg = op.uo().pre_verification_gas(); + + if actual_pvg < required_pvg { + if op.eligible() { + op.set_ineligible(); + events.push(PoolEvent::UpdatedDAData { + op_hash: *hash, + eligible: false, + required_pvg, + actual_pvg, + }); + } + continue; + } else if !op.eligible() { + op.set_eligible(); + events.push(PoolEvent::UpdatedDAData { + op_hash: *hash, + eligible: true, + required_pvg, + actual_pvg, + }); + } } let uo_gas_price = cmp::min( op.uo().max_fee_per_gas(), op.uo().max_priority_fee_per_gas() + base_fee, ); + if candidate_gas_price > uo_gas_price { + // don't mark as ineligible, but also not a candidate + continue; + } - num_candidates += if uo_gas_price >= candidate_gas_price { - if let Some(ttm) = self.time_to_mine.get_mut(hash) { - ttm.increase(block_delta_time, block_delta_height); - } - 1 - } else { - 0 - }; + // Op is a candidate, update time to mine and candidate count + if let Some(ttm) = self.time_to_mine.get_mut(hash) { + ttm.increase(block_delta_time, block_delta_height); + } + num_candidates += 1; } - for (hash, _) in &expired { - self.remove_operation_by_hash(*hash); + for hash in expired { + self.remove_operation_by_hash(hash); + } + for event in events { + self.emit(event); } self.metrics.num_candidates.set(num_candidates as f64); self.prev_block_number = block_number; self.prev_sys_block_time = sys_block_time; - - expired } pub(crate) fn address_count(&self, address: &Address) -> usize { @@ -313,7 +424,7 @@ impl PoolInner { let hash = tx_in_pool .uo() - .hash(mined_op.entry_point, self.config.chain_id); + .hash(mined_op.entry_point, self.config.chain_spec.id); let ret = self.remove_operation_internal(hash, Some(block_number)); @@ -331,7 +442,7 @@ impl PoolInner { info!("Could not put back unmined operation: {error}"); }; self.update_metrics(); - Some(op.po) + Some(op.po.clone()) } /// Remove all but THROTTLED_ENTITY_MEMPOOL_COUNT operations that are within THROTTLED_ENTITY_LIVE_BLOCKS of head @@ -358,7 +469,10 @@ impl PoolInner { } false }) - .map(|o| o.po.uo.hash(self.config.entry_point, self.config.chain_id)) + .map(|o| { + o.po.uo + .hash(self.config.entry_point, self.config.chain_spec.id) + }) .collect::>(); for &hash in &to_remove { self.remove_operation_internal(hash, None); @@ -417,7 +531,7 @@ impl PoolInner { if let Some(worst) = self.best.pop_last() { let hash = worst .uo() - .hash(self.config.entry_point, self.config.chain_id); + .hash(self.config.entry_point, self.config.chain_spec.id); let _ = self .remove_operation_internal(hash, None) @@ -430,26 +544,20 @@ impl PoolInner { Ok(removed) } - fn put_back_unmined_operation(&mut self, op: OrderedPoolOperation) -> MempoolResult { - self.add_operation_internal(op.po, Some(op.submission_id)) + fn put_back_unmined_operation(&mut self, op: Arc) -> MempoolResult { + self.add_operation_internal(op) } fn add_operation_internal( &mut self, - op: Arc, - submission_id: Option, + pool_op: Arc, ) -> MempoolResult { // Check if operation already known or replacing an existing operation // if replacing, remove the existing operation - if let Some(hash) = self.check_replacement(&op.uo)? { + if let Some(hash) = self.check_replacement(pool_op.uo())? { self.remove_operation_by_hash(hash); } - let pool_op = OrderedPoolOperation { - po: op, - submission_id: submission_id.unwrap_or_else(|| self.next_submission_id()), - }; - // update counts for e in pool_op.po.entities() { self.count_by_address @@ -461,7 +569,7 @@ impl PoolInner { // create and insert ordered operation let hash = pool_op .uo() - .hash(self.config.entry_point, self.config.chain_id); + .hash(self.config.entry_point, self.config.chain_spec.id); self.pool_size += pool_op.mem_size(); self.by_hash.insert(hash, pool_op.clone()); self.by_id.insert(pool_op.uo().id(), pool_op.clone()); @@ -504,7 +612,7 @@ impl PoolInner { } self.pool_size -= op.mem_size(); - Some(op.po) + Some(op.po.clone()) } fn decrement_address_count(&mut self, address: Address, entity: &EntityType) { @@ -547,13 +655,22 @@ impl PoolInner { /// Wrapper around PoolOperation that adds a submission ID to implement /// a custom ordering for the best operations -#[derive(Debug, Clone)] +#[derive(Debug)] struct OrderedPoolOperation { po: Arc, submission_id: u64, + eligible: RwLock, } impl OrderedPoolOperation { + fn new(po: Arc, submission_id: u64, eligible: bool) -> Self { + Self { + po, + submission_id, + eligible: RwLock::new(eligible), + } + } + fn uo(&self) -> &UserOperationVariant { &self.po.uo } @@ -561,6 +678,18 @@ impl OrderedPoolOperation { fn mem_size(&self) -> usize { std::mem::size_of::() + self.po.mem_size() } + + fn eligible(&self) -> bool { + *self.eligible.read() + } + + fn set_eligible(&self) { + *self.eligible.write() = true; + } + + fn set_ineligible(&self) { + *self.eligible.write() = false; + } } impl Eq for OrderedPoolOperation {} @@ -630,6 +759,7 @@ struct PoolMetrics { #[cfg(test)] mod tests { use alloy_primitives::U256; + use rundler_provider::MockEntryPointV0_6; use rundler_types::{ v0_6::UserOperation, EntityInfo, EntityInfos, UserOperation as UserOperationTrait, ValidTimeRange, @@ -639,9 +769,9 @@ mod tests { #[test] fn add_single_op() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let op = create_op(Address::random(), 0, 1); - let hash = pool.add_operation(op.clone()).unwrap(); + let hash = pool.add_operation(op.clone(), 0).unwrap(); check_map_entry(pool.by_hash.get(&hash), Some(&op)); check_map_entry(pool.by_id.get(&op.uo.id()), Some(&op)); @@ -650,9 +780,9 @@ mod tests { #[test] fn test_get_by_hash() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let op = create_op(Address::random(), 0, 1); - let hash = pool.add_operation(op.clone()).unwrap(); + let hash = pool.add_operation(op.clone(), 0).unwrap(); let get_op = pool.get_operation_by_hash(hash).unwrap(); assert_eq!(op, *get_op); @@ -662,9 +792,9 @@ mod tests { #[test] fn test_get_by_id() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let op = create_op(Address::random(), 0, 1); - pool.add_operation(op.clone()).unwrap(); + pool.add_operation(op.clone(), 0).unwrap(); let id = op.uo.id(); let get_op = pool.get_operation_by_id(&id).unwrap(); @@ -680,7 +810,7 @@ mod tests { #[test] fn add_multiple_ops() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let ops = vec![ create_op(Address::random(), 0, 1), create_op(Address::random(), 0, 2), @@ -689,7 +819,7 @@ mod tests { let mut hashes = vec![]; for op in ops.iter() { - hashes.push(pool.add_operation(op.clone()).unwrap()); + hashes.push(pool.add_operation(op.clone(), 0).unwrap()); } for (hash, op) in hashes.iter().zip(&ops) { @@ -706,7 +836,7 @@ mod tests { #[test] fn best_ties() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let ops = vec![ create_op(Address::random(), 0, 1), create_op(Address::random(), 0, 1), @@ -715,7 +845,7 @@ mod tests { let mut hashes = vec![]; for op in ops.iter() { - hashes.push(pool.add_operation(op.clone()).unwrap()); + hashes.push(pool.add_operation(op.clone(), 0).unwrap()); } // best should be sorted by gas, then by submission id @@ -727,7 +857,7 @@ mod tests { #[test] fn remove_op() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let ops = vec![ create_op(Address::random(), 0, 3), create_op(Address::random(), 0, 2), @@ -736,7 +866,7 @@ mod tests { let mut hashes = vec![]; for op in ops.iter() { - hashes.push(pool.add_operation(op.clone()).unwrap()); + hashes.push(pool.add_operation(op.clone(), 0).unwrap()); } assert!(pool.remove_operation_by_hash(hashes[0]).is_some()); @@ -758,7 +888,7 @@ mod tests { #[test] fn remove_account() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let account = Address::random(); let ops = vec![ create_op(account, 0, 3), @@ -767,7 +897,7 @@ mod tests { ]; for mut op in ops.into_iter() { op.aggregator = Some(account); - pool.add_operation(op.clone()).unwrap(); + pool.add_operation(op.clone(), 0).unwrap(); } assert_eq!(pool.by_hash.len(), 3); @@ -779,15 +909,17 @@ mod tests { #[test] fn mine_op() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let sender = Address::random(); let nonce = 0; let op = create_op(sender, nonce, 1); - let hash = op.uo.hash(pool.config.entry_point, pool.config.chain_id); + let hash = op + .uo + .hash(pool.config.entry_point, pool.config.chain_spec.id); - pool.add_operation(op).unwrap(); + pool.add_operation(op, 0).unwrap(); let mined_op = MinedOp { paymaster: None, @@ -807,17 +939,19 @@ mod tests { #[test] fn mine_op_with_replacement() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let sender = Address::random(); let nonce = 0; let op = create_op(sender, nonce, 1); let op_2 = create_op(sender, nonce, 2); - let hash = op_2.uo.hash(pool.config.entry_point, pool.config.chain_id); + let hash = op_2 + .uo + .hash(pool.config.entry_point, pool.config.chain_spec.id); - pool.add_operation(op).unwrap(); - pool.add_operation(op_2).unwrap(); + pool.add_operation(op, 0).unwrap(); + pool.add_operation(op_2, 0).unwrap(); let mined_op = MinedOp { paymaster: None, @@ -837,7 +971,7 @@ mod tests { #[test] fn remove_aggregator() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let agg = Address::random(); let ops = vec![ create_op(Address::random(), 0, 3), @@ -850,7 +984,7 @@ mod tests { entity: Entity::aggregator(agg), is_staked: false, }); - pool.add_operation(op.clone()).unwrap(); + pool.add_operation(op.clone(), 0).unwrap(); } assert_eq!(pool.by_hash.len(), 3); @@ -862,7 +996,7 @@ mod tests { #[test] fn remove_paymaster() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let paymaster = Address::random(); let ops = vec![ create_op(Address::random(), 0, 3), @@ -877,7 +1011,7 @@ mod tests { entity: Entity::paymaster(paymaster), is_staked: false, }); - pool.add_operation(op.clone()).unwrap(); + pool.add_operation(op.clone(), 0).unwrap(); } assert_eq!(pool.by_hash.len(), 3); @@ -889,7 +1023,7 @@ mod tests { #[test] fn address_count() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let sender = Address::random(); let paymaster = Address::random(); let factory = Address::random(); @@ -919,7 +1053,7 @@ mod tests { let mut op = op.clone(); let uo: &mut UserOperation = op.uo.as_mut(); uo.nonce = U256::from(i); - hashes.push(pool.add_operation(op).unwrap()); + hashes.push(pool.add_operation(op, 0).unwrap()); } assert_eq!(pool.address_count(&sender), 5); @@ -940,49 +1074,48 @@ mod tests { #[test] fn pool_full_new_replaces_worst() { let args = conf(); - let mut pool = PoolInner::new(args.clone()); + let mut pool = pool(); for i in 0..20 { let op = create_op(Address::random(), i, (i + 1) as u128); - pool.add_operation(op).unwrap(); + pool.add_operation(op, 0).unwrap(); } // on greater gas, new op should win let op = create_op(Address::random(), args.max_size_of_pool_bytes, 2); - let result = pool.add_operation(op); + let result = pool.add_operation(op, 0); assert!(result.is_ok(), "{:?}", result.err()); } #[test] fn pool_full_worst_remains() { - let args = conf(); - let mut pool = PoolInner::new(args.clone()); + let mut pool = pool(); for i in 0..20 { let op = create_op(Address::random(), i, (i + 1) as u128); - pool.add_operation(op).unwrap(); + pool.add_operation(op, 0).unwrap(); } let op = create_op(Address::random(), 4, 1); - assert!(pool.add_operation(op).is_err()); + assert!(pool.add_operation(op, 0).is_err()); // on equal gas, worst should remain because it came first let op = create_op(Address::random(), 4, 2); - let result = pool.add_operation(op); + let result = pool.add_operation(op, 0); assert!(result.is_ok(), "{:?}", result.err()); } #[test] fn replace_op_underpriced() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let sender = Address::random(); let mut po1 = create_op(sender, 0, 100); let uo1: &mut UserOperation = po1.uo.as_mut(); uo1.max_priority_fee_per_gas = 100; - let _ = pool.add_operation(po1.clone()).unwrap(); + let _ = pool.add_operation(po1.clone(), 0).unwrap(); let mut po2 = create_op(sender, 0, 101); let uo2: &mut UserOperation = po2.uo.as_mut(); uo2.max_priority_fee_per_gas = 101; - let res = pool.add_operation(po2); + let res = pool.add_operation(po2, 0); assert!(res.is_err()); match res.err().unwrap() { MempoolError::ReplacementUnderpriced(a, b) => { @@ -995,17 +1128,13 @@ mod tests { assert_eq!(pool.address_count(&sender), 1); assert_eq!( pool.pool_size, - OrderedPoolOperation { - po: Arc::new(po1), - submission_id: 0, - } - .mem_size() + OrderedPoolOperation::new(Arc::new(po1), 0, true).mem_size(), ); } #[test] fn replace_op() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let sender = Address::random(); let paymaster1 = Address::random(); let mut po1 = create_op(sender, 0, 10); @@ -1016,7 +1145,7 @@ mod tests { entity: Entity::paymaster(paymaster1), is_staked: false, }); - let _ = pool.add_operation(po1).unwrap(); + let _ = pool.add_operation(po1, 0).unwrap(); assert_eq!(pool.address_count(&paymaster1), 1); let paymaster2 = Address::random(); @@ -1028,31 +1157,27 @@ mod tests { entity: Entity::paymaster(paymaster2), is_staked: false, }); - let _ = pool.add_operation(po2.clone()).unwrap(); + let _ = pool.add_operation(po2.clone(), 0).unwrap(); assert_eq!(pool.address_count(&sender), 1); assert_eq!(pool.address_count(&paymaster1), 0); assert_eq!(pool.address_count(&paymaster2), 1); assert_eq!( pool.pool_size, - OrderedPoolOperation { - po: Arc::new(po2), - submission_id: 0, - } - .mem_size() + OrderedPoolOperation::new(Arc::new(po2), 0, true).mem_size() ); } #[test] fn test_already_known() { - let mut pool = PoolInner::new(conf()); + let mut pool = pool(); let sender = Address::random(); let mut po1 = create_op(sender, 0, 10); let uo1: &mut UserOperation = po1.uo.as_mut(); uo1.max_priority_fee_per_gas = 10; - let _ = pool.add_operation(po1.clone()).unwrap(); + let _ = pool.add_operation(po1.clone(), 0).unwrap(); - let res = pool.add_operation(po1); + let res = pool.add_operation(po1, 0); assert!(res.is_err()); match res.err().unwrap() { MempoolError::OperationAlreadyKnown => (), @@ -1063,58 +1188,69 @@ mod tests { #[test] fn test_expired() { let conf = conf(); - let mut pool = PoolInner::new(conf.clone()); + let mut pool = pool_with_conf(conf.clone()); let sender = Address::random(); let mut po1 = create_op(sender, 0, 10); po1.valid_time_range.valid_until = Timestamp::from(1); - let _ = pool.add_operation(po1.clone()).unwrap(); + let hash = pool.add_operation(po1.clone(), 0).unwrap(); - let res = pool.do_maintenance(0, Timestamp::from(2), GasFees::default(), 0); - assert_eq!(res.len(), 1); - assert_eq!(res[0].0, po1.uo.hash(conf.entry_point, conf.chain_id)); - assert_eq!(res[0].1, Timestamp::from(1)); + pool.do_maintenance(0, Timestamp::from(2), None, GasFees::default(), 0); + assert_eq!(None, pool.get_operation_by_hash(hash)); } #[test] fn test_multiple_expired() { let conf = conf(); - let mut pool = PoolInner::new(conf.clone()); + let mut pool = pool_with_conf(conf.clone()); let mut po1 = create_op(Address::random(), 0, 10); po1.valid_time_range.valid_until = 5.into(); - let _ = pool.add_operation(po1.clone()).unwrap(); + let hash1 = pool.add_operation(po1.clone(), 0).unwrap(); let mut po2 = create_op(Address::random(), 0, 10); po2.valid_time_range.valid_until = 10.into(); - let _ = pool.add_operation(po2.clone()).unwrap(); + let hash2 = pool.add_operation(po2.clone(), 0).unwrap(); let mut po3 = create_op(Address::random(), 0, 10); po3.valid_time_range.valid_until = 9.into(); - let _ = pool.add_operation(po3.clone()).unwrap(); + let hash3 = pool.add_operation(po3.clone(), 0).unwrap(); - let res = pool.do_maintenance(0, Timestamp::from(10), GasFees::default(), 0); + pool.do_maintenance(0, Timestamp::from(10), None, GasFees::default(), 0); - assert_eq!(res.len(), 2); - assert!(res.contains(&(po1.uo.hash(conf.entry_point, conf.chain_id), 5.into()))); - assert!(res.contains(&(po3.uo.hash(conf.entry_point, conf.chain_id), 9.into()))); + assert_eq!(None, pool.get_operation_by_hash(hash1)); + assert!(pool.get_operation_by_hash(hash2).is_some()); + assert_eq!(None, pool.get_operation_by_hash(hash3)); } fn conf() -> PoolInnerConfig { PoolInnerConfig { + chain_spec: ChainSpec::default(), entry_point: Address::random(), - chain_id: 1, min_replacement_fee_increase_percentage: 10, max_size_of_pool_bytes: 20 * mem_size_of_ordered_pool_op(), throttled_entity_mempool_count: 4, throttled_entity_live_blocks: 10, + da_gas_tracking_enabled: false, } } + fn pool() -> PoolInner { + PoolInner::new( + conf(), + MockEntryPointV0_6::new(), + broadcast::channel(100000).0, + ) + } + + fn pool_with_conf(conf: PoolInnerConfig) -> PoolInner { + PoolInner::new( + conf, + MockEntryPointV0_6::new(), + broadcast::channel(100000).0, + ) + } + fn mem_size_of_ordered_pool_op() -> usize { - OrderedPoolOperation { - po: Arc::new(create_op(Address::random(), 1, 1)), - submission_id: 1, - } - .mem_size() + OrderedPoolOperation::new(Arc::new(create_op(Address::random(), 1, 1)), 1, true).mem_size() } fn create_op(sender: Address, nonce: usize, max_fee_per_gas: u128) -> PoolOperation { @@ -1142,10 +1278,14 @@ mod tests { sim_block_hash: B256::random(), sim_block_number: 0, account_is_staked: false, + da_gas_data: Default::default(), } } - fn check_map_entry(actual: Option<&OrderedPoolOperation>, expected: Option<&PoolOperation>) { + fn check_map_entry( + actual: Option<&Arc>, + expected: Option<&PoolOperation>, + ) { match (actual, expected) { (Some(actual), Some(expected)) => assert_eq!(*actual.po, *expected), (None, None) => (), diff --git a/crates/pool/src/mempool/uo_pool.rs b/crates/pool/src/mempool/uo_pool.rs index 33811a601..d61e3af84 100644 --- a/crates/pool/src/mempool/uo_pool.rs +++ b/crates/pool/src/mempool/uo_pool.rs @@ -11,14 +11,14 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{collections::HashSet, marker::PhantomData, sync::Arc}; +use std::{collections::HashSet, marker::PhantomData, sync::Arc, time::Instant}; use alloy_primitives::{utils::format_units, Address, B256, U256}; use itertools::Itertools; -use metrics::{Counter, Gauge}; +use metrics::{Counter, Gauge, Histogram}; use metrics_derive::Metrics; use parking_lot::RwLock; -use rundler_provider::{EntryPoint, EvmProvider}; +use rundler_provider::{DAGasProvider, EntryPoint, EvmProvider}; use rundler_sim::{Prechecker, Simulator}; use rundler_types::{ pool::{ @@ -46,19 +46,14 @@ use crate::{ /// Wrapper around a pool object that implements thread-safety /// via a RwLock. Safe to call from multiple threads. Methods /// block on write locks. -pub(crate) struct UoPool< - UO: UserOperation, - EP: EvmProvider, - P: Prechecker, - S: Simulator, - E: EntryPoint, -> { +pub(crate) struct UoPool { config: PoolConfig, - state: RwLock, + state: RwLock>, paymaster: PaymasterTracker, reputation: Arc, event_sender: broadcast::Sender>, - provider: EP, + entry_point: E, + evm: EP, prechecker: P, simulator: S, ep_specific_metrics: UoPoolMetricsEPSpecific, @@ -66,8 +61,8 @@ pub(crate) struct UoPool< _uo_type: PhantomData, } -struct UoPoolState { - pool: PoolInner, +struct UoPoolState { + pool: PoolInner, throttled_ops: HashSet, block_number: u64, block_hash: B256, @@ -77,25 +72,28 @@ struct UoPoolState { impl UoPool where - UO: UserOperation, - EP: EvmProvider, - P: Prechecker, - S: Simulator, - E: EntryPoint, + E: DAGasProvider + Clone, { + // TODO refactor provider args + #[allow(clippy::too_many_arguments)] pub(crate) fn new( config: PoolConfig, event_sender: broadcast::Sender>, - provider: EP, + evm: EP, + entry_point: E, prechecker: P, simulator: S, paymaster: PaymasterTracker, reputation: Arc, ) -> Self { - let entry_point = config.entry_point.to_string(); + let ep = config.entry_point.to_string(); Self { state: RwLock::new(UoPoolState { - pool: PoolInner::new(config.clone().into()), + pool: PoolInner::new( + config.clone().into(), + entry_point.clone(), + event_sender.clone(), + ), throttled_ops: HashSet::new(), block_number: 0, block_hash: B256::ZERO, @@ -105,14 +103,12 @@ where reputation, paymaster, event_sender, - provider, + evm, + entry_point, prechecker, simulator, config, - ep_specific_metrics: UoPoolMetricsEPSpecific::new_with_labels(&[( - "entry_point", - entry_point, - )]), + ep_specific_metrics: UoPoolMetricsEPSpecific::new_with_labels(&[("entry_point", ep)]), metrics: UoPoolMetrics::default(), _uo_type: PhantomData, } @@ -169,17 +165,27 @@ where EP: EvmProvider, P: Prechecker, S: Simulator, - E: EntryPoint, + E: EntryPoint + DAGasProvider + Clone, { async fn on_chain_update(&self, update: &ChainUpdate) { - { - let deduped_ops = update.deduped_ops(); - let mined_ops = deduped_ops - .mined_ops - .iter() - .filter(|op| op.entry_point == self.config.entry_point); + let deduped_ops = update.deduped_ops(); + let mined_ops = deduped_ops + .mined_ops + .iter() + .filter(|op| op.entry_point == self.config.entry_point); + + let entity_balance_updates = update.entity_balance_updates.iter().filter_map(|u| { + if u.entrypoint == self.config.entry_point { + Some(u.address) + } else { + None + } + }); - let entity_balance_updates = update.entity_balance_updates.iter().filter_map(|u| { + let unmined_entity_balance_updates = update + .unmined_entity_balance_updates + .iter() + .filter_map(|u| { if u.entrypoint == self.config.entry_point { Some(u.address) } else { @@ -187,112 +193,119 @@ where } }); - let unmined_entity_balance_updates = update - .unmined_entity_balance_updates - .iter() - .filter_map(|u| { - if u.entrypoint == self.config.entry_point { - Some(u.address) - } else { - None - } - }); - - let unmined_ops = deduped_ops - .unmined_ops - .iter() - .filter(|op| op.entry_point == self.config.entry_point); - let mut mined_op_count = 0; - let mut unmined_op_count = 0; + let unmined_ops = deduped_ops + .unmined_ops + .iter() + .filter(|op| op.entry_point == self.config.entry_point); + let mut mined_op_count = 0; + let mut unmined_op_count = 0; - for op in mined_ops { - if op.entry_point != self.config.entry_point { - continue; - } - self.paymaster.update_paymaster_balance_from_mined_op(op); + for op in mined_ops { + if op.entry_point != self.config.entry_point { + continue; + } + self.paymaster.update_paymaster_balance_from_mined_op(op); - // Remove throttled ops that were included in the block - self.state.write().throttled_ops.remove(&op.hash); + // Remove throttled ops that were included in the block + self.state.write().throttled_ops.remove(&op.hash); - if let Some(pool_op) = self - .state - .write() - .pool - .mine_operation(op, update.latest_block_number) - { - // Only account for an entity once - for entity_addr in pool_op.entities().map(|e| e.address).unique() { - self.reputation.add_included(entity_addr); - } - mined_op_count += 1; + if let Some(pool_op) = self + .state + .write() + .pool + .mine_operation(op, update.latest_block_number) + { + // Only account for an entity once + for entity_addr in pool_op.entities().map(|e| e.address).unique() { + self.reputation.add_included(entity_addr); } + mined_op_count += 1; } + } - for op in unmined_ops { - if op.entry_point != self.config.entry_point { - continue; - } - - if let Some(paymaster) = op.paymaster { - self.paymaster - .unmine_actual_cost(&paymaster, op.actual_gas_cost); - } + for op in unmined_ops { + if op.entry_point != self.config.entry_point { + continue; + } - let pool_op = self.state.write().pool.unmine_operation(op); + if let Some(paymaster) = op.paymaster { + self.paymaster + .unmine_actual_cost(&paymaster, op.actual_gas_cost); + } - if let Some(po) = pool_op { - for entity_addr in po.entities().map(|e| e.address).unique() { - self.reputation.remove_included(entity_addr); - } + let pool_op = self.state.write().pool.unmine_operation(op); - unmined_op_count += 1; - let _ = self.paymaster.add_or_update_balance(&po).await; + if let Some(po) = pool_op { + for entity_addr in po.entities().map(|e| e.address).unique() { + self.reputation.remove_included(entity_addr); } + + unmined_op_count += 1; + let _ = self.paymaster.add_or_update_balance(&po).await; } + } - // Update paymaster balances AFTER updating the pool to reset confirmed balances if needed. - if update.reorg_larger_than_history { - if let Err(e) = self.reset_confirmed_paymaster_balances().await { + // Update paymaster balances AFTER updating the pool to reset confirmed balances if needed. + if update.reorg_larger_than_history { + if let Err(e) = self.reset_confirmed_paymaster_balances().await { + tracing::error!("Failed to reset confirmed paymaster balances: {:?}", e); + } + } else { + let addresses = entity_balance_updates + .chain(unmined_entity_balance_updates) + .unique() + .collect::>(); + if !addresses.is_empty() { + if let Err(e) = self + .paymaster + .reset_confirmed_balances_for(&addresses) + .await + { tracing::error!("Failed to reset confirmed paymaster balances: {:?}", e); } - } else { - let addresses = entity_balance_updates - .chain(unmined_entity_balance_updates) - .unique() - .collect::>(); - if !addresses.is_empty() { - if let Err(e) = self - .paymaster - .reset_confirmed_balances_for(&addresses) - .await - { - tracing::error!("Failed to reset confirmed paymaster balances: {:?}", e); - } - } } + } - if mined_op_count > 0 { - info!( - "{mined_op_count} op(s) mined on entry point {:?} when advancing to block with number {}, hash {:?}.", - self.config.entry_point, - update.latest_block_number, - update.latest_block_hash, - ); - } - if unmined_op_count > 0 { - info!( - "{unmined_op_count} op(s) unmined in reorg on entry point {:?} when advancing to block with number {}, hash {:?}.", - self.config.entry_point, - update.latest_block_number, - update.latest_block_hash, - ); + if mined_op_count > 0 { + info!( + "{mined_op_count} op(s) mined on entry point {:?} when advancing to block with number {}, hash {:?}.", + self.config.entry_point, + update.latest_block_number, + update.latest_block_hash, + ); + } + if unmined_op_count > 0 { + info!( + "{unmined_op_count} op(s) unmined in reorg on entry point {:?} when advancing to block with number {}, hash {:?}.", + self.config.entry_point, + update.latest_block_number, + update.latest_block_hash, + ); + } + let ops_seen: f64 = (mined_op_count as isize - unmined_op_count as isize) as f64; + self.ep_specific_metrics.ops_seen.increment(ops_seen); + self.ep_specific_metrics + .unmined_operations + .increment(unmined_op_count); + + let da_block_data = if self.config.da_gas_tracking_enabled { + match self + .entry_point + .block_data(update.latest_block_hash.into()) + .await + { + Ok(da_block_data) => Some(da_block_data), + Err(e) => { + tracing::error!("Failed to get da block data, skipping da tracking: {:?}", e); + None + } } - let ops_seen: f64 = (mined_op_count as isize - unmined_op_count as isize) as f64; - self.ep_specific_metrics.ops_seen.increment(ops_seen); - self.ep_specific_metrics - .unmined_operations - .increment(unmined_op_count); + } else { + None + }; + let start = Instant::now(); + { let mut state = self.state.write(); state .pool @@ -328,20 +341,22 @@ where // pool maintenance let gas_fees = state.gas_fees; let base_fee = state.base_fee; - let expired = state.pool.do_maintenance( + state.pool.do_maintenance( update.latest_block_number, update.latest_block_timestamp, + da_block_data.as_ref(), gas_fees, base_fee, - ); - - for (hash, until) in expired { - self.emit(OpPoolEvent::RemovedOp { - op_hash: hash, - reason: OpRemovalReason::Expired { valid_until: until }, - }) - } + ) } + let maintenance_time = start.elapsed(); + tracing::debug!( + "Pool maintenance took {:?} µs", + maintenance_time.as_micros() + ); + self.ep_specific_metrics + .maintenance_time + .record(maintenance_time.as_micros() as f64); // update required bundle fees and update metrics match self.prechecker.update_fees().await { @@ -394,19 +409,6 @@ where self.config.entry_point_version } - fn set_tracking(&self, paymaster: bool, reputation: bool) { - self.paymaster.set_tracking(paymaster); - self.reputation.set_tracking(reputation); - } - - async fn reset_confirmed_paymaster_balances(&self) -> MempoolResult<()> { - self.paymaster.reset_confirmed_balances().await - } - - async fn get_stake_status(&self, address: Address) -> MempoolResult { - self.paymaster.get_stake_status(address).await - } - async fn add_operation( &self, origin: OperationOrigin, @@ -456,7 +458,7 @@ where // This doesn't clear all race conditions, as the pool may need to update its state before // a UO can be valid, i.e. for replacement. let (block_hash, block_number) = self - .provider + .evm .get_latest_block_hash_and_number() .await .map_err(anyhow::Error::from)?; @@ -475,7 +477,9 @@ where // Prechecks let versioned_op = op.clone().into(); - self.prechecker + + let precheck_ret = self + .prechecker .check(&versioned_op, block_hash.into()) .await?; @@ -507,6 +511,7 @@ where sim_block_number: block_number, account_is_staked: sim_result.account_is_staked, entity_infos: sim_result.entity_infos, + da_gas_data: precheck_ret.da_gas_data, }; // Check sender count in mempool. If sender has too many operations, must be staked @@ -540,7 +545,9 @@ where // Add op to pool let hash = { let mut state = self.state.write(); - let hash = state.pool.add_operation(pool_op.clone())?; + let hash = state + .pool + .add_operation(pool_op.clone(), precheck_ret.required_pre_verification_gas)?; if throttled { state.throttled_ops.insert(hash); @@ -563,18 +570,18 @@ where } }); } + + // Emit event let op_hash = pool_op .uo - .hash(self.config.entry_point, self.config.chain_id); - let valid_after = pool_op.valid_time_range.valid_after; - let valid_until = pool_op.valid_time_range.valid_until; + .hash(self.config.entry_point, self.config.chain_spec.id); self.emit(OpPoolEvent::ReceivedOp { op_hash, op: pool_op.uo, block_number: pool_op.sim_block_number, origin, - valid_after, - valid_until, + valid_after: pool_op.valid_time_range.valid_after, + valid_until: pool_op.valid_time_range.valid_until, entities: entity_summary, }); @@ -623,7 +630,9 @@ where } }; - let hash = po.uo.hash(self.config.entry_point, self.config.chain_id); + let hash = po + .uo + .hash(self.config.entry_point, self.config.chain_spec.id); // This can return none if the operation was removed by another thread if self @@ -669,7 +678,8 @@ where } // get the best operations from the pool - let ordered_ops = self.state.read().pool.best_operations(); + let state = self.state.read(); + let ordered_ops = state.pool.best_operations(); // keep track of senders to avoid sending multiple ops from the same sender let mut senders = HashSet::
::new(); @@ -703,6 +713,8 @@ where self.state.read().pool.get_operation_by_hash(hash) } + // DEBUG METHODS + fn clear_state(&self, clear_mempool: bool, clear_paymaster: bool, clear_reputation: bool) { if clear_mempool { self.state.write().pool.clear(); @@ -733,6 +745,19 @@ where self.reputation .set_reputation(address, ops_seen, ops_included) } + + async fn get_stake_status(&self, address: Address) -> MempoolResult { + self.paymaster.get_stake_status(address).await + } + + async fn reset_confirmed_paymaster_balances(&self) -> MempoolResult<()> { + self.paymaster.reset_confirmed_balances().await + } + + fn set_tracking(&self, paymaster: bool, reputation: bool) { + self.paymaster.set_tracking(paymaster); + self.reputation.set_tracking(reputation); + } } #[derive(Metrics)] @@ -746,6 +771,8 @@ struct UoPoolMetricsEPSpecific { removed_operations: Counter, #[metric(describe = "the count of removed entities.")] removed_entities: Counter, + #[metric(describe = "time to run pool maintenance in µs.")] + maintenance_time: Histogram, } #[derive(Metrics)] @@ -767,10 +794,12 @@ mod tests { use mockall::Sequence; use rundler_provider::{DepositInfo, MockEntryPointV0_6, MockEvmProvider}; use rundler_sim::{ - MockPrechecker, MockSimulator, PrecheckError, PrecheckSettings, SimulationError, - SimulationResult, SimulationSettings, ViolationError, + MockPrechecker, MockSimulator, PrecheckError, PrecheckReturn, PrecheckSettings, + SimulationError, SimulationResult, SimulationSettings, ViolationError, }; use rundler_types::{ + chain::ChainSpec, + da::DAGasUOData, pool::{PrecheckViolation, SimulationViolation}, v0_6::UserOperation, EntityInfo, EntityInfos, EntityType, EntryPointVersion, GasFees, @@ -879,7 +908,7 @@ mod tests { reorg_depth: 0, mined_ops: vec![MinedOp { entry_point: pool.config.entry_point, - hash: uos[0].hash(pool.config.entry_point, 1), + hash: uos[0].hash(pool.config.entry_point, 0), sender: uos[0].sender(), nonce: uos[0].nonce(), actual_gas_cost: U256::ZERO, @@ -964,7 +993,7 @@ mod tests { reorg_depth: 0, mined_ops: vec![MinedOp { entry_point: pool.config.entry_point, - hash: uos[0].hash(pool.config.entry_point, 1), + hash: uos[0].hash(pool.config.entry_point, 0), sender: uos[0].sender(), nonce: uos[0].nonce(), actual_gas_cost: U256::from(10), @@ -1004,7 +1033,7 @@ mod tests { mined_ops: vec![], unmined_ops: vec![MinedOp { entry_point: pool.config.entry_point, - hash: uos[0].hash(pool.config.entry_point, 1), + hash: uos[0].hash(pool.config.entry_point, 0), sender: uos[0].sender(), nonce: uos[0].nonce(), actual_gas_cost: U256::from(10), @@ -1021,10 +1050,10 @@ mod tests { }) .await; + check_ops(pool.best_operations(3, 0).unwrap(), uos); + let metadata = pool.paymaster.paymaster_balance(paymaster).await.unwrap(); assert_eq!(metadata.pending_balance, U256::from(840)); - - check_ops(pool.best_operations(3, 0).unwrap(), uos); } #[tokio::test] @@ -1045,7 +1074,7 @@ mod tests { reorg_depth: 0, mined_ops: vec![MinedOp { entry_point: Address::random(), - hash: uos[0].hash(pool.config.entry_point, 1), + hash: uos[0].hash(pool.config.entry_point, 0), sender: uos[0].sender(), nonce: uos[0].nonce(), actual_gas_cost: U256::ZERO, @@ -1087,7 +1116,7 @@ mod tests { reorg_depth: 0, mined_ops: vec![MinedOp { entry_point: pool.config.entry_point, - hash: uos[0].hash(pool.config.entry_point, 1), + hash: uos[0].hash(pool.config.entry_point, 0), sender: uos[0].sender(), nonce: uos[0].nonce(), actual_gas_cost: U256::ZERO, @@ -1163,7 +1192,7 @@ mod tests { reorg_depth: 0, mined_ops: vec![MinedOp { entry_point: pool.config.entry_point, - hash: uos[0].hash(pool.config.entry_point, 1), + hash: uos[0].hash(pool.config.entry_point, 0), sender: uos[0].sender(), nonce: uos[0].nonce(), actual_gas_cost: U256::ZERO, @@ -1477,7 +1506,7 @@ mod tests { .add_operation(OperationOrigin::Local, op.op.clone()) .await .unwrap(); - let hash = op.op.hash(pool.config.entry_point, 1); + let hash = op.op.hash(pool.config.entry_point, 0); pool.on_chain_update(&ChainUpdate { latest_block_number: 11, @@ -1552,7 +1581,7 @@ mod tests { impl EvmProvider, impl Prechecker, impl Simulator, - impl EntryPoint, + impl EntryPoint + DAGasProvider + Clone, > { let entrypoint = MockEntryPointV0_6::new(); create_pool_with_entry_point(ops, entrypoint) @@ -1566,12 +1595,13 @@ mod tests { impl EvmProvider, impl Prechecker, impl Simulator, - impl EntryPoint, + impl EntryPoint + DAGasProvider + Clone, > { + let entrypoint = Arc::new(entrypoint); let args = PoolConfig { + chain_spec: ChainSpec::default(), entry_point: Address::random(), entry_point_version: EntryPointVersion::V0_6, - chain_id: 1, min_replacement_fee_increase_percentage: 10, max_size_of_pool_bytes: 10000, blocklist: None, @@ -1584,21 +1614,21 @@ mod tests { throttled_entity_mempool_count: 4, throttled_entity_live_blocks: 10, paymaster_tracking_enabled: true, + da_gas_tracking_enabled: false, paymaster_cache_length: 100, reputation_tracking_enabled: true, drop_min_num_blocks: 10, }; - let mut provider = MockEvmProvider::new(); - provider - .expect_get_latest_block_hash_and_number() + let mut evm = MockEvmProvider::new(); + evm.expect_get_latest_block_hash_and_number() .returning(|| Ok((B256::ZERO, 0))); let mut simulator = MockSimulator::new(); let mut prechecker = MockPrechecker::new(); let paymaster = PaymasterTracker::new( - entrypoint, + entrypoint.clone(), PaymasterConfig::new( args.sim_settings.min_stake_value, args.sim_settings.min_unstake_delay, @@ -1628,7 +1658,10 @@ mod tests { if let Some(error) = &op.precheck_error { Err(PrecheckError::Violations(vec![error.clone()])) } else { - Ok(()) + Ok(PrecheckReturn { + da_gas_data: DAGasUOData::Empty, + required_pre_verification_gas: 0, + }) } }); simulator @@ -1661,7 +1694,8 @@ mod tests { UoPool::new( args, event_sender, - provider, + evm, + entrypoint, prechecker, simulator, paymaster, @@ -1678,7 +1712,7 @@ mod tests { impl EvmProvider, impl Prechecker, impl Simulator, - impl EntryPoint, + impl EntryPoint + DAGasProvider + Clone, >, Vec, ) { @@ -1698,7 +1732,7 @@ mod tests { impl EvmProvider, impl Prechecker, impl Simulator, - impl EntryPoint, + impl EntryPoint + DAGasProvider + Clone, >, Vec, ) { diff --git a/crates/pool/src/server/remote/protos.rs b/crates/pool/src/server/remote/protos.rs index ecbb91d28..7806b18f0 100644 --- a/crates/pool/src/server/remote/protos.rs +++ b/crates/pool/src/server/remote/protos.rs @@ -16,6 +16,10 @@ use anyhow::{anyhow, Context}; use rundler_task::grpc::protos::{from_bytes, ConversionError, ToProtoBytes}; use rundler_types::{ chain::ChainSpec, + da::{ + BedrockDAGasUOData as RundlerBedrockDAGasUOData, DAGasUOData as RundlerDAGasUOData, + NitroDAGasUOData as RundlerNitroDAGasUOData, + }, pool::{ NewHead as PoolNewHead, PaymasterMetadata as PoolPaymasterMetadata, PoolOperation, Reputation as PoolReputation, ReputationStatus as PoolReputationStatus, @@ -359,10 +363,52 @@ impl From<&PoolOperation> for MempoolOp { expected_code_hash: op.expected_code_hash.to_proto_bytes(), sim_block_hash: op.sim_block_hash.to_proto_bytes(), account_is_staked: op.account_is_staked, + da_gas_data: Some(DaGasUoData::from(&op.da_gas_data)), } } } +impl From<&RundlerDAGasUOData> for DaGasUoData { + fn from(data: &RundlerDAGasUOData) -> Self { + match data { + RundlerDAGasUOData::Empty => DaGasUoData { + data: Some(da_gas_uo_data::Data::Empty(EmptyUoData {})), + }, + RundlerDAGasUOData::Nitro(data) => DaGasUoData { + data: Some(da_gas_uo_data::Data::Nitro(NitroDaGasUoData { + uo_units: data.uo_units.to_proto_bytes(), + })), + }, + RundlerDAGasUOData::Bedrock(data) => DaGasUoData { + data: Some(da_gas_uo_data::Data::Bedrock(BedrockDaGasUoData { + uo_units: data.uo_units, + })), + }, + } + } +} + +impl TryFrom for RundlerDAGasUOData { + type Error = ConversionError; + + fn try_from(data: DaGasUoData) -> Result { + let ret = match data.data { + Some(da_gas_uo_data::Data::Empty(_)) => RundlerDAGasUOData::Empty, + Some(da_gas_uo_data::Data::Nitro(NitroDaGasUoData { uo_units })) => { + RundlerDAGasUOData::Nitro(RundlerNitroDAGasUOData { + uo_units: from_bytes(&uo_units)?, + }) + } + Some(da_gas_uo_data::Data::Bedrock(BedrockDaGasUoData { uo_units })) => { + RundlerDAGasUOData::Bedrock(RundlerBedrockDAGasUOData { uo_units }) + } + None => RundlerDAGasUOData::Empty, + }; + + Ok(ret) + } +} + pub const MISSING_USER_OP_ERR_STR: &str = "Mempool op should contain user operation"; impl TryUoFromProto for PoolOperation { fn try_uo_from_proto(op: MempoolOp, chain_spec: &ChainSpec) -> Result { @@ -394,6 +440,10 @@ impl TryUoFromProto for PoolOperation { sim_block_number: 0, account_is_staked: op.account_is_staked, entity_infos: EntityInfos::default(), + da_gas_data: op + .da_gas_data + .context("DA gas data should be set")? + .try_into()?, }) } } diff --git a/crates/pool/src/task.rs b/crates/pool/src/task.rs index 67324ef87..8ba7d45e7 100644 --- a/crates/pool/src/task.rs +++ b/crates/pool/src/task.rs @@ -285,7 +285,7 @@ where chain_spec: ChainSpec, pool_config: &PoolConfig, event_sender: broadcast::Sender>, - provider: P, + evm: P, ep: E, simulator: S, ) -> anyhow::Result> @@ -296,9 +296,9 @@ where E: EntryPointProvider + Clone + 'static, S: Simulator + 'static, { - let fee_oracle = gas::get_fee_oracle(&chain_spec, provider.clone()); + let fee_oracle = gas::get_fee_oracle(&chain_spec, evm.clone()); let fee_estimator = FeeEstimatorImpl::new( - provider.clone(), + evm.clone(), fee_oracle, pool_config.precheck_settings.priority_fee_mode, pool_config @@ -308,7 +308,7 @@ where let prechecker = PrecheckerImpl::new( chain_spec, - provider.clone(), + evm.clone(), ep.clone(), fee_estimator, pool_config.precheck_settings, @@ -340,7 +340,8 @@ where let uo_pool = UoPool::new( pool_config.clone(), event_sender, - provider, + evm, + ep, prechecker, simulator, paymaster, diff --git a/crates/rpc/src/eth/api.rs b/crates/rpc/src/eth/api.rs index 688cd9305..ec442c431 100644 --- a/crates/rpc/src/eth/api.rs +++ b/crates/rpc/src/eth/api.rs @@ -216,6 +216,7 @@ mod tests { sim_block_number: 1000, account_is_staked: false, entity_infos: EntityInfos::default(), + da_gas_data: rundler_types::da::DAGasUOData::Empty, }; let mut pool = MockPool::default(); diff --git a/crates/sim/src/gas/gas.rs b/crates/sim/src/gas/gas.rs index 512f29687..1fd36942c 100644 --- a/crates/sim/src/gas/gas.rs +++ b/crates/sim/src/gas/gas.rs @@ -16,8 +16,8 @@ use std::{cmp, fmt::Debug}; use anyhow::{bail, Context}; #[cfg(feature = "test-utils")] use mockall::automock; -use rundler_provider::{BlockHashOrNumber, DAGasProvider, EntryPoint, EvmProvider}; -use rundler_types::{chain::ChainSpec, GasFees, UserOperation}; +use rundler_provider::{BlockHashOrNumber, DAGasProvider, EvmProvider}; +use rundler_types::{chain::ChainSpec, da::DAGasUOData, GasFees, UserOperation}; use rundler_utils::math; use tokio::try_join; @@ -36,10 +36,7 @@ use super::oracle::FeeOracle; /// /// Networks that require Data Availability (DA) pre_verification_gas are those that charge extra calldata fees /// that can scale based on DA gas prices. -pub async fn estimate_pre_verification_gas< - UO: UserOperation, - E: EntryPoint + DAGasProvider, ->( +pub async fn estimate_pre_verification_gas>( chain_spec: &ChainSpec, entry_point: &E, full_op: &UO, @@ -63,17 +60,14 @@ pub async fn estimate_pre_verification_gas< /// Calculate the required pre_verification_gas for the given user operation and the provided base fee. /// /// The effective gas price is calculated as min(base_fee + max_priority_fee_per_gas, max_fee_per_gas) -pub async fn calc_required_pre_verification_gas< - UO: UserOperation, - E: EntryPoint + DAGasProvider, ->( +pub async fn calc_required_pre_verification_gas>( chain_spec: &ChainSpec, entry_point: &E, op: &UO, block: BlockHashOrNumber, base_fee: u128, -) -> anyhow::Result { - let da_gas = if chain_spec.da_pre_verification_gas { +) -> anyhow::Result<(u128, DAGasUOData)> { + let (da_gas, uo_data) = if chain_spec.da_pre_verification_gas { let gas_price = cmp::min( base_fee + op.max_priority_fee_per_gas(), op.max_fee_per_gas(), @@ -83,16 +77,19 @@ pub async fn calc_required_pre_verification_gas< bail!("Gas price cannot be zero") } - entry_point + let (da_gas, uo_data, _) = entry_point .calc_da_gas(op.clone(), block, gas_price) - .await? - .0 + .await?; + (da_gas, uo_data) } else { - 0 + (0, DAGasUOData::Empty) }; // Currently assume 1 op bundle - Ok(op.required_pre_verification_gas(chain_spec, 1, da_gas)) + Ok(( + op.required_pre_verification_gas(chain_spec, 1, da_gas), + uo_data, + )) } /// Different modes for calculating the required priority fee diff --git a/crates/sim/src/lib.rs b/crates/sim/src/lib.rs index 22249a8c3..e7419d9d3 100644 --- a/crates/sim/src/lib.rs +++ b/crates/sim/src/lib.rs @@ -49,7 +49,8 @@ mod precheck; #[cfg(feature = "test-utils")] pub use precheck::MockPrechecker; pub use precheck::{ - PrecheckError, Prechecker, PrecheckerImpl, Settings as PrecheckSettings, MIN_CALL_GAS_LIMIT, + PrecheckError, PrecheckReturn, Prechecker, PrecheckerImpl, Settings as PrecheckSettings, + MIN_CALL_GAS_LIMIT, }; /// Simulation and violation checking diff --git a/crates/sim/src/precheck.rs b/crates/sim/src/precheck.rs index bd538f6f2..8402cf58d 100644 --- a/crates/sim/src/precheck.rs +++ b/crates/sim/src/precheck.rs @@ -21,6 +21,7 @@ use mockall::automock; use rundler_provider::{BlockHashOrNumber, DAGasProvider, EntryPoint, EvmProvider}; use rundler_types::{ chain::ChainSpec, + da::DAGasUOData, pool::{MempoolError, PrecheckViolation}, GasFees, UserOperation, }; @@ -34,6 +35,14 @@ use crate::{ /// The min cost of a `CALL` with nonzero value, as required by the spec. pub const MIN_CALL_GAS_LIMIT: u128 = 9100; +/// The result of a precheck call. +pub struct PrecheckReturn { + /// DA gas data for the operation + pub da_gas_data: DAGasUOData, + /// The required pre-verification gas for the operation + pub required_pre_verification_gas: u128, +} + /// Trait for checking if a user operation is valid before simulation /// according to the spec rules. #[cfg_attr(feature = "test-utils", automock(type UO = rundler_types::v0_6::UserOperation;))] @@ -43,7 +52,11 @@ pub trait Prechecker: Send + Sync { type UO: UserOperation; /// Run the precheck on the given operation and return an error if it fails. - async fn check(&self, op: &Self::UO, block: BlockHashOrNumber) -> Result<(), PrecheckError>; + async fn check( + &self, + op: &Self::UO, + block: BlockHashOrNumber, + ) -> Result; /// Update and return the bundle fees. /// @@ -98,6 +111,7 @@ pub struct Settings { /// Percentage of the current network base fee that a user operation must have to be accepted into the mempool. pub base_fee_accept_percent: u32, /// Percentage of the preVerificationGas that a user operation must have to be accepted into the mempool. + /// Only applied if the chain has dynamic preVerificationGas, else enforced to 100% pub pre_verification_gas_accept_percent: u32, } @@ -115,7 +129,7 @@ impl Default for Settings { } } -#[derive(Copy, Clone, Debug)] +#[derive(Clone, Debug)] struct AsyncData { factory_exists: bool, sender_exists: bool, @@ -123,6 +137,7 @@ struct AsyncData { payer_funds: U256, base_fee: u128, min_pre_verification_gas: u128, + da_gas_data: DAGasUOData, } #[derive(Copy, Clone, Debug)] @@ -146,16 +161,23 @@ where { type UO = UO; - async fn check(&self, op: &Self::UO, block: BlockHashOrNumber) -> Result<(), PrecheckError> { + async fn check( + &self, + op: &Self::UO, + block: BlockHashOrNumber, + ) -> Result { let async_data = self.load_async_data(op, block).await?; let mut violations: Vec = vec![]; - violations.extend(self.check_init_code(op, async_data)); - violations.extend(self.check_gas(op, async_data)); - violations.extend(self.check_payer(op, async_data)); + violations.extend(self.check_init_code(op, &async_data)); + violations.extend(self.check_gas(op, &async_data)); + violations.extend(self.check_payer(op, &async_data)); if !violations.is_empty() { Err(violations)? } - Ok(()) + Ok(PrecheckReturn { + da_gas_data: async_data.da_gas_data, + required_pre_verification_gas: async_data.min_pre_verification_gas, + }) } async fn update_fees(&self) -> anyhow::Result<(GasFees, u128)> { @@ -197,7 +219,7 @@ where } } - fn check_init_code(&self, op: &UO, async_data: AsyncData) -> ArrayVec { + fn check_init_code(&self, op: &UO, async_data: &AsyncData) -> ArrayVec { let AsyncData { factory_exists, sender_exists, @@ -216,14 +238,14 @@ where op.factory().unwrap(), )) } - if sender_exists { + if *sender_exists { violations.push(PrecheckViolation::ExistingSenderWithInitCode(op.sender())); } } violations } - fn check_gas(&self, op: &UO, async_data: AsyncData) -> ArrayVec { + fn check_gas(&self, op: &UO, async_data: &AsyncData) -> ArrayVec { let Settings { max_verification_gas, max_total_execution_gas, @@ -231,9 +253,9 @@ where } = self.settings; let AsyncData { base_fee, - min_pre_verification_gas, + mut min_pre_verification_gas, .. - } = async_data; + } = *async_data; let mut violations = ArrayVec::new(); if op.verification_gas_limit() > max_verification_gas { @@ -255,10 +277,12 @@ where // if preVerificationGas is dynamic, then allow for the percentage buffer // and check if the preVerificationGas is at least the minimum. - let min_pre_verification_gas = math::percent( - min_pre_verification_gas, - self.settings.pre_verification_gas_accept_percent, - ); + if self.chain_spec.da_pre_verification_gas { + min_pre_verification_gas = math::percent( + min_pre_verification_gas, + self.settings.pre_verification_gas_accept_percent, + ); + } if op.pre_verification_gas() < min_pre_verification_gas { violations.push(PrecheckViolation::PreVerificationGasTooLow( op.pre_verification_gas(), @@ -298,12 +322,12 @@ where violations } - fn check_payer(&self, op: &UO, async_data: AsyncData) -> Option { + fn check_payer(&self, op: &UO, async_data: &AsyncData) -> Option { let AsyncData { paymaster_exists, payer_funds, .. - } = async_data; + } = *async_data; if let Some(paymaster) = op.paymaster() { if !paymaster_exists { return Some(PrecheckViolation::PaymasterIsNotContract(paymaster)); @@ -338,7 +362,7 @@ where sender_exists, paymaster_exists, payer_funds, - min_pre_verification_gas, + (min_pre_verification_gas, da_gas_data), ) = tokio::try_join!( self.is_contract(op.factory()), self.is_contract(Some(op.sender())), @@ -353,6 +377,7 @@ where payer_funds, base_fee, min_pre_verification_gas, + da_gas_data, }) } @@ -408,7 +433,7 @@ where op: UO, block: BlockHashOrNumber, base_fee: u128, - ) -> anyhow::Result { + ) -> anyhow::Result<(u128, DAGasUOData)> { gas::calc_required_pre_verification_gas( &self.chain_spec, &self.entry_point, @@ -456,6 +481,7 @@ mod tests { payer_funds: U256::from(5_000_000), base_fee: 4_000, min_pre_verification_gas: 1_000, + da_gas_data: DAGasUOData::Empty, } } @@ -488,7 +514,7 @@ mod tests { ) .build(); - let res = prechecker.check_init_code(&op, get_test_async_data()); + let res = prechecker.check_init_code(&op, &get_test_async_data()); let mut expected = ArrayVec::new(); expected.push(PrecheckViolation::ExistingSenderWithInitCode(address!( "3f8a2b6c4d5e1079286fa1b3c0d4e5f6902b7c8d" @@ -535,7 +561,7 @@ mod tests { ) .build(); - let res = prechecker.check_gas(&op, get_test_async_data()); + let res = prechecker.check_gas(&op, &get_test_async_data()); let total_gas_limit = op.gas_limit(&cs, Some(1)); @@ -584,7 +610,7 @@ mod tests { ) .build(); - let res = prechecker.check_payer(&op, get_test_async_data()); + let res = prechecker.check_payer(&op, &get_test_async_data()); assert_eq!( res, Some(PrecheckViolation::PaymasterDepositTooLow( @@ -624,7 +650,7 @@ mod tests { ..Default::default() }; - let res = prechecker.check_gas(&op, async_data); + let res = prechecker.check_gas(&op, &async_data); assert!(res.is_empty()); } @@ -652,7 +678,7 @@ mod tests { ..Default::default() }; - let res = prechecker.check_gas(&op, async_data); + let res = prechecker.check_gas(&op, &async_data); let mut expected = ArrayVec::::new(); expected.push(PrecheckViolation::MaxFeePerGasTooLow( math::percent(5_000, settings.base_fee_accept_percent - 10), @@ -692,7 +718,7 @@ mod tests { ..Default::default() }; - let res = prechecker.check_gas(&op, async_data); + let res = prechecker.check_gas(&op, &async_data); let mut expected = ArrayVec::::new(); expected.push(PrecheckViolation::MaxPriorityFeePerGasTooLow( mintip - 1, @@ -729,7 +755,7 @@ mod tests { ..Default::default() }; - let res = prechecker.check_gas(&op, async_data); + let res = prechecker.check_gas(&op, &async_data); let mut expected = ArrayVec::::new(); expected.push(PrecheckViolation::PreVerificationGasTooLow( math::percent(1_000, settings.pre_verification_gas_accept_percent - 10), diff --git a/crates/types/src/da.rs b/crates/types/src/da.rs index f09fe7532..160d22e7e 100644 --- a/crates/types/src/da.rs +++ b/crates/types/src/da.rs @@ -16,7 +16,7 @@ use serde::{Deserialize, Serialize}; /// Type of gas oracle contract for pricing calldata in preVerificationGas -#[derive(Clone, Copy, Debug, Deserialize, Default, Serialize)] +#[derive(Clone, Copy, Debug, Deserialize, Default, Serialize, PartialEq, Eq)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] pub enum DAGasOracleContractType { /// No gas oracle contract diff --git a/crates/types/src/pool/types.rs b/crates/types/src/pool/types.rs index 35b7c7121..3cc6c54d1 100644 --- a/crates/types/src/pool/types.rs +++ b/crates/types/src/pool/types.rs @@ -15,7 +15,8 @@ use alloy_primitives::{Address, B256, U256}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use crate::{ - entity::EntityInfos, Entity, StakeInfo, UserOperation, UserOperationVariant, ValidTimeRange, + da::DAGasUOData, entity::EntityInfos, Entity, StakeInfo, UserOperation, UserOperationVariant, + ValidTimeRange, }; /// The new head of the chain, as viewed by the pool @@ -119,6 +120,8 @@ pub struct PoolOperation { pub account_is_staked: bool, /// Staking information about all the entities. pub entity_infos: EntityInfos, + /// The DA gas data for this operation + pub da_gas_data: DAGasUOData, } impl PoolOperation { diff --git a/docs/cli.md b/docs/cli.md index 31a2eb310..d0cea9b6e 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -38,6 +38,8 @@ See [chain spec](./architecture/chain_spec.md) for a detailed description of cha - env: *MIN_STAKE_VALUE* - `--min_unstake_delay`: Minimum unstake delay. (default: `84600`). - env: *MIN_UNSTAKE_DELAY* +- `--tracer_timeout`: The timeout used for custom javascript tracers, the string must be in a valid parseable format that can be used in the `ParseDuration` function on an ethereum node. See Docs [Here](https://pkg.go.dev/time#ParseDuration). (default: `15s`) + - env: *TRACER_TIMEOUT* - `--user_operation_event_block_distance`: Number of blocks to search when calling `eth_getUserOperationByHash`. (default: all blocks) - env: *USER_OPERATION_EVENT_BLOCK_DISTANCE* - `--max_simulate_handle_ops_gas`: Maximum gas for simulating handle operations. (default: `20000000`). @@ -71,8 +73,8 @@ See [chain spec](./architecture/chain_spec.md) for a detailed description of cha - env: *DISABLE_ENTRY_POINT_V0_7* - `--num_builders_v0_7`: The number of bundle builders to run on entry point v0.7 (default: `1`) - env: *NUM_BUILDERS_V0_7* -- `--tracer_timeout`: The timeout used for custom javascript tracers, the string must be in a valid parseable format that can be used in the `ParseDuration` function on an ethereum node. See Docs [Here](https://pkg.go.dev/time#ParseDuration). (default: `15s`) - - env: *TRACER_TIMEOUT* +- `--da_gas_tracking_enabled`: Enable the DA gas tracking feature of the mempool (default: `false`) + - env: *DA_GAS_TRACKING_ENABLED* ## Metrics Options diff --git a/test/docker-compose.yml b/test/docker-compose.yml index 51b044ba9..ea5c22b43 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -1,5 +1,4 @@ # A docker-compose for running a local geth node for testing -version: "3.8" services: geth: image: ethereum/client-go:v1.10.26 diff --git a/test/spec-tests/launchers/rundler-launcher/docker-compose.yml b/test/spec-tests/launchers/rundler-launcher/docker-compose.yml index 6f50c1fe9..bb3afcfe0 100644 --- a/test/spec-tests/launchers/rundler-launcher/docker-compose.yml +++ b/test/spec-tests/launchers/rundler-launcher/docker-compose.yml @@ -1,5 +1,3 @@ -version: "3.8" - services: rundler: image: alchemy-platform/rundler:$TAG diff --git a/test/spec-tests/launchers/rundler-launcher/rundler-launcher.sh b/test/spec-tests/launchers/rundler-launcher/rundler-launcher.sh index 69a736afe..6abc18bfc 100755 --- a/test/spec-tests/launchers/rundler-launcher/rundler-launcher.sh +++ b/test/spec-tests/launchers/rundler-launcher/rundler-launcher.sh @@ -10,13 +10,13 @@ case $1 in ;; start) - docker-compose up -d + docker compose up -d ./waitForServices.sh cast send --from $(cast rpc eth_accounts | tail -n 1 | tr -d '[]"') --unlocked --value 1ether 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 > /dev/null cd ../../bundler-spec-tests/@account-abstraction && yarn deploy --network localhost ;; stop) - docker-compose down -t 3 + docker compose down -t 3 ;; *) diff --git a/test/spec-tests/local/docker-compose.yml b/test/spec-tests/local/docker-compose.yml index 6419041aa..f910af4e0 100644 --- a/test/spec-tests/local/docker-compose.yml +++ b/test/spec-tests/local/docker-compose.yml @@ -1,5 +1,3 @@ -version: "3.8" - services: geth: image: ethereum/client-go:release-1.14 diff --git a/test/spec-tests/local/launcher.sh b/test/spec-tests/local/launcher.sh index c4b5e871d..1deb9f5e7 100755 --- a/test/spec-tests/local/launcher.sh +++ b/test/spec-tests/local/launcher.sh @@ -5,7 +5,7 @@ cd `dirname \`realpath $0\`` case $1 in start) - docker-compose up -d + docker compose up -d sleep 10 cast send --unlocked --from $(cast rpc eth_accounts | tail -n 1 | tr -d '[]"') --value 1000ether 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 > /dev/null (cd ../$2/bundler-spec-tests/@account-abstraction && yarn deploy --network localhost) @@ -14,7 +14,7 @@ case $1 in ;; stop) pkill rundler - docker-compose down -t 3 + docker compose down -t 3 ;; *) diff --git a/test/spec-tests/remote/docker-compose.yml b/test/spec-tests/remote/docker-compose.yml index 7cbe48937..0cdfcd798 100644 --- a/test/spec-tests/remote/docker-compose.yml +++ b/test/spec-tests/remote/docker-compose.yml @@ -1,5 +1,3 @@ -version: "3.8" - services: geth: image: ethereum/client-go:release-1.14 diff --git a/test/spec-tests/remote/launcher.sh b/test/spec-tests/remote/launcher.sh index 09cdbe73e..c891bdae3 100755 --- a/test/spec-tests/remote/launcher.sh +++ b/test/spec-tests/remote/launcher.sh @@ -27,12 +27,12 @@ usage: EOF exit 1 esac - docker-compose up -d --wait + docker compose up -d --wait cast send --unlocked --from $(cast rpc eth_accounts | tail -n 1 | tr -d '[]"') --value 1000ether 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 > /dev/null cd ../$2/bundler-spec-tests/@account-abstraction && yarn deploy --network localhost ;; stop) - docker-compose down -t 3 + docker compose down -t 3 ;; *)