From 1a8d8b8405a43b528f9901701249d7accbf7db7b Mon Sep 17 00:00:00 2001 From: lightsing Date: Wed, 25 Sep 2024 12:31:12 +0800 Subject: [PATCH] add legacy support --- crates/bin/src/commands/run_rpc.rs | 51 ++++++---- crates/core/src/error.rs | 2 + crates/primitives/src/lib.rs | 7 +- crates/primitives/src/types/mod.rs | 144 +++++++++++++++++++++++++---- 4 files changed, 166 insertions(+), 38 deletions(-) diff --git a/crates/bin/src/commands/run_rpc.rs b/crates/bin/src/commands/run_rpc.rs index 27553ef..39b6ec7 100644 --- a/crates/bin/src/commands/run_rpc.rs +++ b/crates/bin/src/commands/run_rpc.rs @@ -2,6 +2,7 @@ use crate::utils; use alloy::providers::{Provider, ProviderBuilder}; use clap::Args; use futures::future::OptionFuture; +use sbv::primitives::types::LegacyStorageTrace; use sbv::{ core::HardforkConfig, primitives::{types::BlockTrace, Block}, @@ -19,6 +20,9 @@ pub struct RunRpcCommand { /// RPC URL #[arg(short, long, default_value = "http://localhost:8545")] url: Url, + /// Legacy rpc + #[arg(short, long)] + legacy: bool, /// Start Block number #[arg(short, long, default_value = "latest")] start_block: StartBlockSpec, @@ -49,7 +53,11 @@ pub enum StartBlockSpec { impl RunRpcCommand { pub async fn run(self, fork_config: impl Fn(u64) -> HardforkConfig) -> anyhow::Result<()> { - dev_info!("Running RPC command with url: {}", self.url); + dev_info!( + "Running RPC command with url: {}, legacy support: {}", + self.url, + self.legacy + ); let provider = ProviderBuilder::new().on_http(self.url); let chain_id = provider.get_chain_id().await?; @@ -75,21 +83,32 @@ impl RunRpcCommand { let rx = rx.clone(); handles.spawn(async move { while let Ok(block_number) = rx.recv().await { - let l2_trace = _provider - .raw_request::<_, BlockTrace>( - "scroll_getBlockTraceByNumberOrHash".into(), - ( - format!("0x{:x}", block_number), - serde_json::json!({ - "ExcludeExecutionResults": true, - "ExcludeTxStorageTraces": true, - "StorageProofFormat": "flatten", - "FlattenProofsOnly": true - }), - ), - ) - .await - .map_err(|e| (block_number, e.into()))?; + let l2_trace: BlockTrace = if self.legacy { + let trace = _provider + .raw_request::<_, BlockTrace>( + "scroll_getBlockTraceByNumberOrHash".into(), + (format!("0x{:x}", block_number),), + ) + .await + .map_err(|e| (block_number, e.into()))?; + trace.into() + } else { + _provider + .raw_request::<_, BlockTrace>( + "scroll_getBlockTraceByNumberOrHash".into(), + ( + format!("0x{:x}", block_number), + serde_json::json!({ + "ExcludeExecutionResults": true, + "ExcludeTxStorageTraces": true, + "StorageProofFormat": "flatten", + "FlattenProofsOnly": true + }), + ), + ) + .await + .map_err(|e| (block_number, e.into()))? + }; dev_info!( "worker#{_idx}: load trace for block #{block_number}({})", diff --git a/crates/core/src/error.rs b/crates/core/src/error.rs index 322f6c5..174f260 100644 --- a/crates/core/src/error.rs +++ b/crates/core/src/error.rs @@ -4,8 +4,10 @@ use std::error::Error; /// Error variants encountered during manipulation of a zkTrie. #[derive(Debug, thiserror::Error)] pub enum DatabaseError { + /// Error encountered from code db. #[error("error encountered from code db: {0}")] CodeDb(Box), + /// Error encountered from zkTrie. #[error("error encountered from zkTrie: {0}")] ZkTrie(Box), } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 00ba421..f0f7f7a 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -76,18 +76,17 @@ pub trait Block: Debug { fn start_l1_queue_index(&self) -> u64; /// flatten proofs - fn flatten_proofs(&self) -> impl Iterator; + fn flatten_proofs(&self) -> impl Iterator; /// Update zktrie state from trace #[inline] fn build_zktrie_db(&self, db: &mut Db) -> Result<(), Db::Error> { - for (k, bytes) in self.flatten_proofs() { + for bytes in self.flatten_proofs() { if bytes == MAGIC_NODE_BYTES { continue; } let node = Node::::try_from(bytes).expect("invalid node"); let node_hash = node.get_or_calculate_node_hash().expect("infallible"); - debug_assert_eq!(k.as_slice(), node_hash.as_slice()); dev_trace!("put zktrie node: {:?}", node); db.put_owned(node_hash.as_slice(), node.canonical_value(false))?; } @@ -334,7 +333,7 @@ impl Block for &T { (*self).start_l1_queue_index() } - fn flatten_proofs(&self) -> impl Iterator { + fn flatten_proofs(&self) -> impl Iterator { (*self).flatten_proofs() } } diff --git a/crates/primitives/src/types/mod.rs b/crates/primitives/src/types/mod.rs index 0ab6a3b..8eec3c5 100644 --- a/crates/primitives/src/types/mod.rs +++ b/crates/primitives/src/types/mod.rs @@ -1,7 +1,11 @@ use crate::Block; use alloy::primitives::{Address, Bytes, B256, U256}; -use serde::{Deserialize, Serialize}; +use rkyv::Archive; +use serde::{Deserialize, Deserializer, Serialize}; use serde_with::{serde_as, Map}; +use std::collections::{BTreeSet, HashMap}; +use std::fmt::Debug; +use std::hash::Hash; mod tx; pub use tx::{ArchivedTransactionTrace, TransactionTrace, TxL1Msg, TypedTransaction}; @@ -58,16 +62,15 @@ pub struct BytecodeTrace { } /// storage trace -#[serde_as] #[derive( rkyv::Archive, rkyv::Serialize, rkyv::Deserialize, Serialize, - Deserialize, Default, Debug, Clone, + Hash, Eq, PartialEq, )] @@ -82,8 +85,44 @@ pub struct StorageTrace { pub root_after: B256, /// proofs #[serde(rename = "flattenProofs")] + pub flatten_proofs: Vec, +} + +/// legacy storage trace +#[serde_as] +#[derive( + rkyv::Archive, + rkyv::Serialize, + rkyv::Deserialize, + Serialize, + Deserialize, + Default, + Debug, + Clone, + Hash, + Eq, + PartialEq, +)] +#[archive(check_bytes)] +#[archive_attr(derive(Debug, Hash, PartialEq, Eq))] +pub struct LegacyStorageTrace { + /// root before + #[serde(rename = "rootBefore")] + pub root_before: B256, + /// root after + #[serde(rename = "rootAfter")] + pub root_after: B256, + /// account proofs + #[serde(default)] #[serde_as(as = "Map<_, _>")] - pub flatten_proofs: Vec<(B256, Bytes)>, + pub proofs: Vec<(Address, Vec)>, + #[serde(rename = "storageProofs", default)] + #[serde_as(as = "Map<_, Map<_, _>>")] + /// storage proofs for each account + pub storage_proofs: Vec<(Address, Vec<(B256, Vec)>)>, + #[serde(rename = "deletionProofs", default)] + /// additional deletion proofs + pub deletion_proofs: Vec, } /// Block trace format @@ -94,7 +133,11 @@ pub struct StorageTrace { )] #[archive(check_bytes)] #[archive_attr(derive(Debug, Hash, PartialEq, Eq))] -pub struct BlockTrace { +pub struct BlockTrace +where + S: Archive, + ::Archived: Debug + Hash + PartialEq + Eq, +{ /// chain id #[serde(rename = "chainID", default)] pub chain_id: u64, @@ -108,7 +151,7 @@ pub struct BlockTrace { pub codes: Vec, /// storage trace BEFORE execution #[serde(rename = "storageTrace")] - pub storage_trace: StorageTrace, + pub storage_trace: S, /// l1 tx queue #[serde(rename = "startL1QueueIndex", default)] pub start_l1_queue_index: u64, @@ -116,6 +159,78 @@ pub struct BlockTrace { pub withdraw_trie_root: B256, } +impl<'de> Deserialize<'de> for StorageTrace { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(untagged)] + enum FlattenProofs { + Map(HashMap), + Vec(Vec), + } + #[derive(Deserialize)] + struct StorageTraceDe { + #[serde(rename = "rootBefore")] + pub root_before: B256, + #[serde(rename = "rootAfter")] + pub root_after: B256, + #[serde(rename = "flattenProofs")] + pub flatten_proofs: FlattenProofs, + } + + let de = StorageTraceDe::deserialize(deserializer)?; + let mut flatten_proofs = match de.flatten_proofs { + FlattenProofs::Map(map) => map.into_iter().map(|(_, v)| v).collect(), + FlattenProofs::Vec(vec) => vec, + }; + flatten_proofs.sort(); + + Ok(StorageTrace { + root_before: de.root_before, + root_after: de.root_after, + flatten_proofs, + }) + } +} + +impl From for StorageTrace { + fn from(trace: LegacyStorageTrace) -> Self { + let mut flatten_proofs = BTreeSet::new(); + for (_, proofs) in trace.proofs { + flatten_proofs.extend(proofs); + } + for (_, proofs) in trace.storage_proofs { + for (_, proofs) in proofs { + flatten_proofs.extend(proofs); + } + } + flatten_proofs.extend(trace.deletion_proofs); + + StorageTrace { + root_before: trace.root_before, + root_after: trace.root_after, + flatten_proofs: flatten_proofs.into_iter().collect(), + } + } +} + +impl From> for BlockTrace { + fn from(trace: BlockTrace) -> Self { + BlockTrace { + chain_id: trace.chain_id, + coinbase: trace.coinbase, + header: trace.header, + transactions: trace.transactions, + codes: trace.codes, + storage_trace: trace.storage_trace.into(), + start_l1_queue_index: trace.start_l1_queue_index, + withdraw_trie_root: trace.withdraw_trie_root, + } + } +} + impl Block for BlockTrace { type Tx = TransactionTrace; @@ -179,11 +294,8 @@ impl Block for BlockTrace { self.start_l1_queue_index } - fn flatten_proofs(&self) -> impl Iterator { - self.storage_trace - .flatten_proofs - .iter() - .map(|(k, v)| (k, v.as_ref())) + fn flatten_proofs(&self) -> impl Iterator { + self.storage_trace.flatten_proofs.iter().map(|v| v.as_ref()) } } @@ -254,11 +366,8 @@ impl Block for ArchivedBlockTrace { self.start_l1_queue_index } - fn flatten_proofs(&self) -> impl Iterator { - self.storage_trace - .flatten_proofs - .iter() - .map(|(k, v)| (k, v.as_ref())) + fn flatten_proofs(&self) -> impl Iterator { + self.storage_trace.flatten_proofs.iter().map(|v| v.as_ref()) } } @@ -404,8 +513,7 @@ mod tests { .iter() .zip(archived_block.storage_trace.flatten_proofs.iter()) { - assert_eq!(proof.0, archived_proof.0); - assert_eq!(proof.1.as_ref(), archived_proof.1.as_ref()); + assert_eq!(proof.as_ref(), archived_proof.as_ref()); } assert_eq!(