From 478e47055c621b5349e7a243a2794dbc61bb1506 Mon Sep 17 00:00:00 2001 From: Rakan Al-Huneiti Date: Fri, 5 Jul 2024 16:45:19 +0300 Subject: [PATCH] Commit when diff size > 300KB (#854) * Introduce StateDiff tables * Submit commitment when state diff > 300Kb * Add table to list * Add comments * Pick a better name for constant * Collect diff in prover storage * Add flag to force commitment * Fix state diff calculation * Add test * Fix clippy * Merge diffs * Deploy contract to generate large diffs * Fix waits * Fix clippy issues * Use a different contract * Add disabled logging lines * Commit / reset logic * Disable transport logs * Send transactions and fill diff * Use clone_from as clippy suggests * Remove InfiniteLoopContract import --- Cargo.lock | 1 + bin/citrea/src/lib.rs | 1 + bin/citrea/tests/e2e/mod.rs | 91 +++++++++++++++++++ crates/sequencer/Cargo.toml | 39 ++++---- crates/sequencer/src/commitment_controller.rs | 6 +- crates/sequencer/src/sequencer.rs | 79 +++++++++++++--- .../full-node/db/sov-db/src/ledger_db/mod.rs | 28 +++++- .../full-node/db/sov-db/src/schema/tables.rs | 8 +- .../sov-state/src/prover_storage.rs | 17 +++- 9 files changed, 222 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3e99637ed..607714b87 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1945,6 +1945,7 @@ dependencies = [ "alloy-rlp", "alloy-sol-types", "anyhow", + "bincode", "borsh", "chrono", "citrea-evm", diff --git a/bin/citrea/src/lib.rs b/bin/citrea/src/lib.rs index 4680e9337..7055d09a3 100644 --- a/bin/citrea/src/lib.rs +++ b/bin/citrea/src/lib.rs @@ -20,6 +20,7 @@ pub fn initialize_logging(level: Level) { level.as_str().to_owned(), "jmt=info".to_owned(), "hyper=info".to_owned(), + "alloy_transport_http=info".to_owned(), // Limit output as much as possible, use WARN. "risc0_zkvm=warn".to_owned(), "guest_execution=info".to_owned(), diff --git a/bin/citrea/tests/e2e/mod.rs b/bin/citrea/tests/e2e/mod.rs index fd0b1d0d5..05b3f96a1 100644 --- a/bin/citrea/tests/e2e/mod.rs +++ b/bin/citrea/tests/e2e/mod.rs @@ -128,6 +128,8 @@ async fn initialize_test( #[tokio::test(flavor = "multi_thread")] async fn test_soft_batch_save() -> Result<(), anyhow::Error> { + // citrea::initialize_logging(tracing::Level::DEBUG); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); @@ -3296,3 +3298,92 @@ async fn test_full_node_sync_status() { seq_task.abort(); full_node_task.abort(); } + +#[tokio::test(flavor = "multi_thread")] +async fn test_sequencer_commitment_threshold() { + // citrea::initialize_logging(tracing::Level::DEBUG); + + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + + let psql_db_name = "test_sequencer_commitment_threshold".to_owned(); + + let db_test_client = PostgresConnector::new_test_client(psql_db_name.clone()) + .await + .unwrap(); + + let mut sequencer_config = create_default_sequencer_config(4, Some(true), 10); + + sequencer_config.db_config = Some(SharedBackupDbConfig::default().set_db_name(psql_db_name)); + sequencer_config.mempool_conf = SequencerMempoolConfig { + max_account_slots: 1000, + ..Default::default() + }; + + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + + let da_db_dir_cloned = da_db_dir.clone(); + let seq_task = tokio::spawn(async { + start_rollup( + seq_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + None, + NodeMode::SequencerNode, + sequencer_db_dir, + da_db_dir_cloned, + 1_000_000, // Put a large number for commitment threshold + true, + None, + Some(sequencer_config), + Some(true), + DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, + ) + .await; + }); + + let seq_port = seq_port_rx.await.unwrap(); + + let seq_test_client = init_test_rollup(seq_port).await; + + seq_test_client.send_publish_batch_request().await; + + for _ in 0..10 { + for _ in 0..100 { + let address = Address::random(); + let _pending = seq_test_client + .send_eth(address, None, None, None, 1u128) + .await + .unwrap(); + } + seq_test_client.send_publish_batch_request().await; + } + + wait_for_l2_block(&seq_test_client, 11, Some(Duration::from_secs(60))).await; + + // At block 725, the state diff should be large enough to trigger a commitment. + wait_for_postgres_commitment(&db_test_client, 1, Some(Duration::from_secs(60))).await; + let commitments = db_test_client.get_all_commitments().await.unwrap(); + assert_eq!(commitments.len(), 1); + + for _ in 0..10 { + for _ in 0..100 { + let address = Address::random(); + let _pending = seq_test_client + .send_eth(address, None, None, None, 1u128) + .await + .unwrap(); + } + seq_test_client.send_publish_batch_request().await; + } + + wait_for_l2_block(&seq_test_client, 21, Some(Duration::from_secs(60))).await; + + // At block 1450, the state diff should be large enough to trigger a commitment. + // But the 50 remaining blocks state diff should NOT trigger a third. + wait_for_postgres_commitment(&db_test_client, 2, Some(Duration::from_secs(60))).await; + let commitments = db_test_client.get_all_commitments().await.unwrap(); + assert_eq!(commitments.len(), 2); + + seq_task.abort(); +} diff --git a/crates/sequencer/Cargo.toml b/crates/sequencer/Cargo.toml index 24f6fe659..7be8fbffb 100644 --- a/crates/sequencer/Cargo.toml +++ b/crates/sequencer/Cargo.toml @@ -12,18 +12,26 @@ readme = "README.md" resolver = "2" [dependencies] +# 3rd-party deps alloy-rlp = { workspace = true } alloy-sol-types = { workspace = true } - anyhow = { workspace = true } +bincode = { workspace = true } borsh = { workspace = true } chrono = { workspace = true } +deadpool-postgres = { workspace = true } digest = { workspace = true } futures = { workspace = true } +hex = { workspace = true } +hyper = { workspace = true } jsonrpsee = { workspace = true, features = ["http-client", "server"] } rs_merkle = { workspace = true } +schnellru = "0.2.1" serde = { workspace = true } serde_json = { workspace = true } +tokio = { workspace = true } +tower = { workspace = true } +tower-http = { workspace = true } tracing = { workspace = true } reth-db = { workspace = true } @@ -36,33 +44,22 @@ reth-rpc-types-compat = { workspace = true } reth-tasks = { workspace = true } reth-transaction-pool = { workspace = true } reth-trie = { workspace = true } - revm = { workspace = true } -deadpool-postgres = { workspace = true } -hyper = { workspace = true } -schnellru = "0.2.1" -tokio = { workspace = true } -tower = { workspace = true } -tower-http = { workspace = true } - -citrea-evm = { path = "../evm", features = ["native"] } -sov-db = { path = "../sovereign-sdk/full-node/db/sov-db" } -sov-rollup-interface = { path = "../sovereign-sdk/rollup-interface", features = ["native"] } - -sov-stf-runner = { path = "../sovereign-sdk/full-node/sov-stf-runner" } - -sov-modules-rollup-blueprint = { path = "../sovereign-sdk/module-system/sov-modules-rollup-blueprint" } -sov-modules-stf-blueprint = { path = "../sovereign-sdk/module-system/sov-modules-stf-blueprint" } - -citrea-stf = { path = "../citrea-stf", features = ["native"] } +# Sovereign SDK deps soft-confirmation-rule-enforcer = { path = "../soft-confirmation-rule-enforcer", features = ["native"] } sov-accounts = { path = "../sovereign-sdk/module-system/module-implementations/sov-accounts", default-features = false } +sov-db = { path = "../sovereign-sdk/full-node/db/sov-db" } sov-modules-api = { path = "../sovereign-sdk/module-system/sov-modules-api", default-features = false } +sov-modules-rollup-blueprint = { path = "../sovereign-sdk/module-system/sov-modules-rollup-blueprint" } +sov-modules-stf-blueprint = { path = "../sovereign-sdk/module-system/sov-modules-stf-blueprint" } +sov-rollup-interface = { path = "../sovereign-sdk/rollup-interface", features = ["native"] } sov-state = { path = "../sovereign-sdk/module-system/sov-state" } +sov-stf-runner = { path = "../sovereign-sdk/full-node/sov-stf-runner" } -hex = { workspace = true } - +# Citrea Deps +citrea-evm = { path = "../evm", features = ["native"] } +citrea-stf = { path = "../citrea-stf", features = ["native"] } shared-backup-db = { path = "../shared-backup-db" } [dev-dependencies] diff --git a/crates/sequencer/src/commitment_controller.rs b/crates/sequencer/src/commitment_controller.rs index fd57bab97..2a894d93e 100644 --- a/crates/sequencer/src/commitment_controller.rs +++ b/crates/sequencer/src/commitment_controller.rs @@ -26,6 +26,7 @@ pub fn get_commitment_info( ledger_db: &LedgerDB, min_soft_confirmations_per_commitment: u64, prev_l1_height: u64, + state_diff_threshold_reached: bool, ) -> anyhow::Result> { // first get when the last merkle root of soft confirmations was submitted let last_commitment_l1_height = ledger_db @@ -98,8 +99,9 @@ pub fn get_commitment_info( debug!("L2 range to submit: {:?}", l2_range_to_submit); debug!("L1 height range: {:?}", l1_height_range); - if (l2_range_to_submit.1 .0 + 1) - < min_soft_confirmations_per_commitment + l2_range_to_submit.0 .0 + if !state_diff_threshold_reached + && (l2_range_to_submit.1 .0 + 1) + < min_soft_confirmations_per_commitment + l2_range_to_submit.0 .0 { return Ok(None); } diff --git a/crates/sequencer/src/sequencer.rs b/crates/sequencer/src/sequencer.rs index 14feb7434..5d53660a3 100644 --- a/crates/sequencer/src/sequencer.rs +++ b/crates/sequencer/src/sequencer.rs @@ -1,5 +1,5 @@ use std::cmp::Ordering; -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::net::SocketAddr; use std::ops::RangeInclusive; @@ -32,7 +32,7 @@ use sov_db::schema::types::{BatchNumber, SlotNumber}; use sov_modules_api::hooks::HookSoftConfirmationInfo; use sov_modules_api::transaction::Transaction; use sov_modules_api::{ - Context, EncodeCall, PrivateKey, SignedSoftConfirmationBatch, SlotData, + Context, EncodeCall, PrivateKey, SignedSoftConfirmationBatch, SlotData, StateDiff, UnsignedSoftConfirmationBatch, WorkingSet, }; use sov_modules_stf_blueprint::StfBlueprintTrait; @@ -57,6 +57,8 @@ use crate::mempool::CitreaMempool; use crate::rpc::{create_rpc_module, RpcContext}; use crate::utils::recover_raw_transaction; +const MAX_STATEDIFF_SIZE_COMMITMENT_THRESHOLD: u64 = 300 * 1024; + type StateRoot = >::StateRoot; /// Represents information about the current DA state. /// @@ -88,6 +90,7 @@ where sequencer_pub_key: Vec, rpc_config: RpcConfig, soft_confirmation_rule_enforcer: SoftConfirmationRuleEnforcer, + last_state_diff: StateDiff, } enum L2BlockMode { @@ -154,6 +157,9 @@ where let soft_confirmation_rule_enforcer = SoftConfirmationRuleEnforcer::::Spec>::default(); + // Initialize the sequencer with the last state diff from DB. + let last_state_diff = ledger_db.get_state_diff()?; + Ok(Self { da_service, mempool: Arc::new(pool), @@ -171,6 +177,7 @@ where sequencer_pub_key: public_keys.sequencer_public_key, rpc_config, soft_confirmation_rule_enforcer, + last_state_diff, }) } @@ -341,6 +348,7 @@ where l2_block_mode: L2BlockMode, pg_pool: &Option, last_used_l1_height: u64, + da_commitment_tx: UnboundedSender<(u64, bool)>, ) -> anyhow::Result { let da_height = da_block.header().height(); let (l2_height, l1_height) = match self @@ -524,11 +532,10 @@ where self.ledger_db.commit_soft_batch(soft_batch_receipt, true)?; + let l1_height = da_block.header().height(); info!( "New block #{}, DA #{}, Tx count: #{}", - l2_height, - da_block.header().height(), - evm_txs_count, + l2_height, l1_height, evm_txs_count, ); self.state_root = next_state_root; @@ -542,6 +549,28 @@ where self.mempool.update_accounts(account_updates); + let merged_state_diff = self.merge_state_diffs( + self.last_state_diff.clone(), + slot_result.state_diff.clone(), + ); + // Serialize the state diff to check size later. + let serialized_state_diff = bincode::serialize(&merged_state_diff)?; + if serialized_state_diff.len() as u64 > MAX_STATEDIFF_SIZE_COMMITMENT_THRESHOLD { + // If we exceed the threshold, we should notify the commitment + // worker to initiate a commitment. + if da_commitment_tx.unbounded_send((l1_height, true)).is_err() { + error!("Commitment thread is dead!"); + } + self.last_state_diff.clone_from(&slot_result.state_diff); + self.ledger_db + .set_state_diff(self.last_state_diff.clone())?; + } else { + // Store state diff. + self.last_state_diff = merged_state_diff; + self.ledger_db + .set_state_diff(self.last_state_diff.clone())?; + } + if let Some(pg_pool) = pg_pool.clone() { // TODO: Is this okay? I'm not sure because we have a loop in this and I can't do async in spawn_blocking tokio::spawn(async move { @@ -571,15 +600,21 @@ where } } - async fn submit_commitment(&self, prev_l1_height: u64) -> anyhow::Result<()> { + async fn submit_commitment( + &mut self, + prev_l1_height: u64, + state_diff_threshold_reached: bool, + ) -> anyhow::Result<()> { debug!("Sequencer: new L1 block, checking if commitment should be submitted"); let inscription_queue = self.da_service.get_send_transaction_queue(); let min_soft_confirmations_per_commitment = self.config.min_soft_confirmations_per_commitment; + let commitment_info = commitment_controller::get_commitment_info( &self.ledger_db, min_soft_confirmations_per_commitment, prev_l1_height, + state_diff_threshold_reached, )?; if let Some(commitment_info) = commitment_info { @@ -658,6 +693,10 @@ where } } + // Clear state diff. + self.ledger_db.set_state_diff(vec![])?; + self.last_state_diff = vec![]; + info!("New commitment. L2 range: #{}-{}", l2_start, l2_end,); } Ok(()) @@ -725,7 +764,7 @@ where // Setup required workers to update our knowledge of the DA layer every X seconds (configurable). let (da_height_update_tx, mut da_height_update_rx) = mpsc::channel(1); - let (da_commitment_tx, mut da_commitment_rx) = unbounded::(); + let (da_commitment_tx, mut da_commitment_rx) = unbounded::<(u64, bool)>(); let da_monitor = da_block_monitor( self.da_service.clone(), da_height_update_tx, @@ -779,8 +818,8 @@ where } } }, - prev_l1_height = da_commitment_rx.select_next_some() => { - if let Err(e) = self.submit_commitment(prev_l1_height).await { + (prev_l1_height, force) = da_commitment_rx.select_next_some() => { + if let Err(e) = self.submit_commitment(prev_l1_height, force).await { error!("Failed to submit commitment: {}", e); } }, @@ -799,7 +838,7 @@ where .map_err(|e| anyhow!(e))?; debug!("Created an empty L2 for L1={}", needed_da_block_height); - if let Err(e) = self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::Empty, &pg_pool, last_used_l1_height).await { + if let Err(e) = self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::Empty, &pg_pool, last_used_l1_height, da_commitment_tx.clone()).await { error!("Sequencer error: {}", e); } } @@ -815,7 +854,7 @@ where } }; let l1_fee_rate = l1_fee_rate.clamp(*l1_fee_rate_range.start(), *l1_fee_rate_range.end()); - match self.produce_l2_block(last_finalized_block.clone(), l1_fee_rate, L2BlockMode::NotEmpty, &pg_pool, last_used_l1_height).await { + match self.produce_l2_block(last_finalized_block.clone(), l1_fee_rate, L2BlockMode::NotEmpty, &pg_pool, last_used_l1_height, da_commitment_tx.clone()).await { Ok(l1_block_number) => { last_used_l1_height = l1_block_number; }, @@ -843,7 +882,7 @@ where .map_err(|e| anyhow!(e))?; debug!("Created an empty L2 for L1={}", needed_da_block_height); - if let Err(e) = self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::Empty, &pg_pool, last_used_l1_height).await { + if let Err(e) = self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::Empty, &pg_pool, last_used_l1_height, da_commitment_tx.clone()).await { error!("Sequencer error: {}", e); } } @@ -861,7 +900,7 @@ where let l1_fee_rate = l1_fee_rate.clamp(*l1_fee_rate_range.start(), *l1_fee_rate_range.end()); let instant = Instant::now(); - match self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::NotEmpty, &pg_pool, last_used_l1_height).await { + match self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::NotEmpty, &pg_pool, last_used_l1_height, da_commitment_tx.clone()).await { Ok(l1_block_number) => { // Set the next iteration's wait time to produce a block based on the // previous block's execution time. @@ -1045,7 +1084,7 @@ where async fn maybe_submit_commitment( &self, - da_commitment_tx: UnboundedSender, + da_commitment_tx: UnboundedSender<(u64, bool)>, last_finalized_height: u64, last_used_l1_height: u64, ) -> anyhow::Result<()> { @@ -1061,7 +1100,10 @@ where }; if let Some(commit_up_to) = commit_up_to { - if da_commitment_tx.unbounded_send(commit_up_to).is_err() { + if da_commitment_tx + .unbounded_send((commit_up_to, false)) + .is_err() + { error!("Commitment thread is dead!"); } } @@ -1097,6 +1139,13 @@ where Ok(updates) } + + fn merge_state_diffs(&self, old_diff: StateDiff, new_diff: StateDiff) -> StateDiff { + let mut new_diff_map = HashMap::, Option>>::from_iter(old_diff); + + new_diff_map.extend(new_diff); + new_diff_map.into_iter().collect() + } } fn get_l1_fee_rate_range( diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index 1571766e9..d354c727a 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -5,7 +5,7 @@ use serde::de::DeserializeOwned; use serde::Serialize; use sov_rollup_interface::da::{DaSpec, SequencerCommitment}; use sov_rollup_interface::services::da::SlotData; -use sov_rollup_interface::stf::{BatchReceipt, Event, SoftBatchReceipt}; +use sov_rollup_interface::stf::{BatchReceipt, Event, SoftBatchReceipt, StateDiff}; use sov_rollup_interface::zk::Proof; use sov_schema_db::{Schema, SchemaBatch, SeekKeyEncoder, DB}; use tracing::instrument; @@ -13,9 +13,10 @@ use tracing::instrument; use crate::rocks_db_config::gen_rocksdb_options; use crate::schema::tables::{ BatchByHash, BatchByNumber, CommitmentsByNumber, EventByKey, EventByNumber, L2RangeByL1Height, - L2Witness, LastSequencerCommitmentSent, LastSequencerCommitmentSentL2, ProofBySlotNumber, - ProverLastScannedSlot, SlotByHash, SlotByNumber, SoftBatchByHash, SoftBatchByNumber, - SoftConfirmationStatus, TxByHash, TxByNumber, VerifiedProofsBySlotNumber, LEDGER_TABLES, + L2Witness, LastSequencerCommitmentSent, LastSequencerCommitmentSentL2, LastStateDiff, + ProofBySlotNumber, ProverLastScannedSlot, SlotByHash, SlotByNumber, SoftBatchByHash, + SoftBatchByNumber, SoftConfirmationStatus, TxByHash, TxByNumber, VerifiedProofsBySlotNumber, + LEDGER_TABLES, }; use crate::schema::types::{ split_tx_for_storage, BatchNumber, EventNumber, L2HeightRange, SlotNumber, StoredBatch, @@ -714,4 +715,23 @@ impl LedgerDB { pub fn get_l1_height_of_l1_hash(&self, hash: [u8; 32]) -> Result, anyhow::Error> { self.db.get::(&hash).map(|v| v.map(|a| a.0)) } + + /// Sets the latest state diff + #[instrument(level = "trace", skip(self), err, ret)] + pub fn set_state_diff(&self, state_diff: StateDiff) -> anyhow::Result<()> { + let mut schema_batch = SchemaBatch::new(); + schema_batch.put::(&(), &state_diff)?; + + self.db.write_schemas(schema_batch)?; + + Ok(()) + } + + /// Gets l1 height of l1 hash + #[instrument(level = "trace", skip(self), err, ret)] + pub fn get_state_diff(&self) -> Result { + self.db + .get::(&()) + .map(|diff| diff.unwrap_or_default()) + } } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index f113b86d7..66fca858a 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -30,7 +30,7 @@ use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use jmt::storage::{NibblePath, Node, NodeKey}; use jmt::Version; use sov_rollup_interface::da::SequencerCommitment; -use sov_rollup_interface::stf::{Event, EventKey}; +use sov_rollup_interface::stf::{Event, EventKey, StateDiff}; use sov_schema_db::schema::{KeyDecoder, KeyEncoder, ValueCodec}; use sov_schema_db::{CodecError, SeekKeyEncoder}; @@ -57,6 +57,7 @@ pub const LEDGER_TABLES: &[&str] = &[ SoftBatchByHash::table_name(), L2RangeByL1Height::table_name(), L2Witness::table_name(), + LastStateDiff::table_name(), LastSequencerCommitmentSent::table_name(), LastSequencerCommitmentSentL2::table_name(), ProverLastScannedSlot::table_name(), @@ -221,6 +222,11 @@ macro_rules! define_table_with_seek_key_codec { }; } +define_table_with_seek_key_codec!( + /// The State diff storage + (LastStateDiff) () => StateDiff +); + define_table_with_seek_key_codec!( /// The primary source for slot data (SlotByNumber) SlotNumber => StoredSlot diff --git a/crates/sovereign-sdk/module-system/sov-state/src/prover_storage.rs b/crates/sovereign-sdk/module-system/sov-state/src/prover_storage.rs index 06cd87813..91cc313a2 100644 --- a/crates/sovereign-sdk/module-system/sov-state/src/prover_storage.rs +++ b/crates/sovereign-sdk/module-system/sov-state/src/prover_storage.rs @@ -139,17 +139,24 @@ impl Storage for ProverStorage { let mut key_preimages = Vec::with_capacity(state_accesses.ordered_writes.len()); + let mut diff = vec![]; + // Compute the jmt update from the write batch let batch = state_accesses .ordered_writes .into_iter() .map(|(key, value)| { let key_hash = KeyHash::with::(key.key.as_ref()); + + let key_bytes = + Arc::try_unwrap(key.key.clone()).unwrap_or_else(|arc| (*arc).clone()); + let value_bytes = + value.map(|v| Arc::try_unwrap(v.value).unwrap_or_else(|arc| (*arc).clone())); + + diff.push((key_bytes, value_bytes.clone())); key_preimages.push((key_hash, key)); - ( - key_hash, - value.map(|v| Arc::try_unwrap(v.value).unwrap_or_else(|arc| (*arc).clone())), - ) + + (key_hash, value_bytes) }); let next_version = self.db.get_next_version(); @@ -169,7 +176,7 @@ impl Storage for ProverStorage { // We need the state diff to be calculated only inside zk context. // The diff then can be used by special nodes to construct the state of the rollup by verifying the zk proof. // And constructing the tree from the diff. - Ok((new_root, state_update, vec![])) + Ok((new_root, state_update, diff)) } fn commit(&self, state_update: &Self::StateUpdate, accessory_writes: &OrderedReadsAndWrites) {