From 4ed87bb34a0c7eecc87a98d26dac8f11d26a2616 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erce=20Can=20Bekt=C3=BCre?= <47954181+ercecan@users.noreply.github.com> Date: Fri, 10 May 2024 17:22:00 +0300 Subject: [PATCH] Implement ledger rpc endpoints for getting commitments on slot (#527) * Implement ledger rpc endpoints for getting commitments on slot * Lint * Use commitment ledger rpcs in full node instead of sequencer * review fixes * performance improvements * Fix commitment bug * change response for sequencer commitment --------- Co-authored-by: eyusufatik --- Cargo.lock | 1 + bin/citrea/tests/e2e/mod.rs | 4 +- bin/citrea/tests/sequencer_commitments/mod.rs | 106 +++++ bin/citrea/tests/test_client/mod.rs | 28 +- crates/sequencer/src/sequencer.rs | 1 + .../full-node/db/sov-db/Cargo.toml | 12 +- .../full-node/db/sov-db/src/ledger_db/mod.rs | 33 +- .../full-node/db/sov-db/src/ledger_db/rpc.rs | 29 +- .../full-node/db/sov-db/src/schema/tables.rs | 7 + .../full-node/sov-ledger-rpc/src/server.rs | 31 ++ .../full-node/sov-stf-runner/src/runner.rs | 384 +++++++----------- crates/sovereign-sdk/fuzz/Cargo.lock | 1 + .../rollup-interface/src/node/rpc/mod.rs | 39 ++ 13 files changed, 434 insertions(+), 242 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 918cd9ee6..d2718b959 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8977,6 +8977,7 @@ dependencies = [ "borsh", "byteorder", "criterion", + "hex", "jmt", "proptest", "proptest-derive 0.3.0", diff --git a/bin/citrea/tests/e2e/mod.rs b/bin/citrea/tests/e2e/mod.rs index 2f7f3c630..7a6ef0a3b 100644 --- a/bin/citrea/tests/e2e/mod.rs +++ b/bin/citrea/tests/e2e/mod.rs @@ -1072,8 +1072,8 @@ async fn test_soft_confirmations_status_two_l1() -> Result<(), anyhow::Error> { // publish new da block da_service.publish_test_block().await.unwrap(); - seq_test_client.send_publish_batch_request().await; // TODO https://github.com/chainwayxyz/citrea/issues/214 - seq_test_client.send_publish_batch_request().await; // TODO https://github.com/chainwayxyz/citrea/issues/214 + seq_test_client.send_publish_batch_request().await; + seq_test_client.send_publish_batch_request().await; sleep(Duration::from_secs(2)).await; diff --git a/bin/citrea/tests/sequencer_commitments/mod.rs b/bin/citrea/tests/sequencer_commitments/mod.rs index d6a1a4ea9..b4bfdc641 100644 --- a/bin/citrea/tests/sequencer_commitments/mod.rs +++ b/bin/citrea/tests/sequencer_commitments/mod.rs @@ -259,3 +259,109 @@ async fn check_commitment_in_offchain_db() { seq_task.abort(); } + +#[tokio::test] +async fn test_ledger_get_commitments_on_slot() { + // citrea::initialize_logging(); + + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + + let seq_task = tokio::spawn(async { + start_rollup( + seq_port_tx, + GenesisPaths::from_dir("../test-data/genesis/integration-tests"), + BasicKernelGenesisPaths { + chain_state: + "../test-data/genesis/integration-tests-low-limiting-number/chain_state.json" + .into(), + }, + RollupProverConfig::Execute, + NodeMode::SequencerNode, + None, + 4, + true, + None, + None, + Some(true), + DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, + ) + .await; + }); + + let seq_port = seq_port_rx.await.unwrap(); + let test_client = make_test_client(seq_port).await; + let da_service = MockDaService::new(MockAddress::from([0; 32])); + + let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); + + let full_node_task = tokio::spawn(async move { + start_rollup( + full_node_port_tx, + GenesisPaths::from_dir("../test-data/genesis/integration-tests"), + BasicKernelGenesisPaths { + chain_state: + "../test-data/genesis/integration-tests-low-limiting-number/chain_state.json" + .into(), + }, + RollupProverConfig::Execute, + NodeMode::FullNode(seq_port), + None, + 4, + true, + None, + None, + Some(true), + DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, + ) + .await; + }); + + let full_node_port = full_node_port_rx.await.unwrap(); + + let full_node_test_client = make_test_client(full_node_port).await; + da_service.publish_test_block().await.unwrap(); + sleep(Duration::from_secs(1)).await; + + test_client.send_publish_batch_request().await; + test_client.send_publish_batch_request().await; + test_client.send_publish_batch_request().await; + test_client.send_publish_batch_request().await; + da_service.publish_test_block().await.unwrap(); + // submits with new da block + test_client.send_publish_batch_request().await; + // full node gets the commitment + test_client.send_publish_batch_request().await; + // da_service.publish_test_block().await.unwrap(); + sleep(Duration::from_secs(4)).await; + + let commitments = full_node_test_client + .ledger_get_sequencer_commitments_on_slot_by_number(4) + .await + .unwrap() + .unwrap(); + assert_eq!(commitments.len(), 1); + + let second_hash = da_service.get_block_at(2).await.unwrap().header.hash; + assert_eq!( + commitments[0].l1_start_block_hash.to_vec(), + second_hash.0.to_vec() + ); + assert_eq!( + commitments[0].l1_end_block_hash.to_vec(), + second_hash.0.to_vec() + ); + + assert_eq!(commitments[0].found_in_l1, 4); + + let fourth_block_hash = da_service.get_block_at(4).await.unwrap().header.hash; + + let commitments_hash = full_node_test_client + .ledger_get_sequencer_commitments_on_slot_by_hash(fourth_block_hash.0) + .await + .unwrap() + .unwrap(); + assert_eq!(commitments_hash, commitments); + + seq_task.abort(); + full_node_task.abort(); +} diff --git a/bin/citrea/tests/test_client/mod.rs b/bin/citrea/tests/test_client/mod.rs index deb186f1f..beadbae4b 100644 --- a/bin/citrea/tests/test_client/mod.rs +++ b/bin/citrea/tests/test_client/mod.rs @@ -17,7 +17,7 @@ use jsonrpsee::rpc_params; use reth_primitives::BlockNumberOrTag; use reth_rpc_types::trace::geth::{GethDebugTracingOptions, GethTrace}; use sequencer_client::GetSoftBatchResponse; -use sov_rollup_interface::rpc::SoftConfirmationStatus; +use sov_rollup_interface::rpc::{SequencerCommitmentResponse, SoftConfirmationStatus}; pub const MAX_FEE_PER_GAS: u64 = 1000000001; @@ -584,6 +584,32 @@ impl TestClient { .map_err(|e| e.into()) } + pub(crate) async fn ledger_get_sequencer_commitments_on_slot_by_number( + &self, + height: u64, + ) -> Result>, Box> { + self.http_client + .request( + "ledger_getSequencerCommitmentsOnSlotByNumber", + rpc_params![height], + ) + .await + .map_err(|e| e.into()) + } + + pub(crate) async fn ledger_get_sequencer_commitments_on_slot_by_hash( + &self, + hash: [u8; 32], + ) -> Result>, Box> { + self.http_client + .request( + "ledger_getSequencerCommitmentsOnSlotByHash", + rpc_params![hash], + ) + .await + .map_err(|e| e.into()) + } + pub(crate) async fn get_limiting_number(&self) -> u64 { self.http_client .request( diff --git a/crates/sequencer/src/sequencer.rs b/crates/sequencer/src/sequencer.rs index d171b132b..8f924faf5 100644 --- a/crates/sequencer/src/sequencer.rs +++ b/crates/sequencer/src/sequencer.rs @@ -788,6 +788,7 @@ where commitment_info.l1_height_range.end().0, )) .expect("Sequencer: Failed to set last sequencer commitment L1 height"); + warn!("Commitment info: {:?}", commitment_info); if let Some(db_config) = db_config { match PostgresConnector::new(db_config).await { diff --git a/crates/sovereign-sdk/full-node/db/sov-db/Cargo.toml b/crates/sovereign-sdk/full-node/db/sov-db/Cargo.toml index 41d36e999..121b56e06 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/Cargo.toml +++ b/crates/sovereign-sdk/full-node/db/sov-db/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "sov-db" description = "A high-level DB interface for the Sovereign SDK" -license = "Apache-2.0" # This license is inherited from Aptos +license = "Apache-2.0" # This license is inherited from Aptos edition = { workspace = true } authors = { workspace = true } homepage = { workspace = true } @@ -17,13 +17,18 @@ resolver = "2" # Maintained by sovereign labs jmt = { workspace = true } sov-schema-db = { path = "../sov-schema-db", version = "0.3" } -sov-rollup-interface = { path = "../../../rollup-interface", version = "0.3", features = ["native"] } +sov-rollup-interface = { path = "../../../rollup-interface", version = "0.3", features = [ + "native", +] } # External anyhow = { workspace = true, default-features = true } arbitrary = { workspace = true, optional = true } byteorder = { workspace = true, default-features = true } -borsh = { workspace = true, default-features = true, features = ["bytes", "rc"] } +borsh = { workspace = true, default-features = true, features = [ + "bytes", + "rc", +] } proptest = { workspace = true, optional = true, default-features = true } proptest-derive = { workspace = true, optional = true } serde = { workspace = true, default-features = true, features = ["rc"] } @@ -31,6 +36,7 @@ tempfile = { workspace = true, optional = true } rocksdb = { workspace = true } bincode = { workspace = true } tokio = { workspace = true } +hex = { workspace = true } [dev-dependencies] diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index f5af5d6fa..2e3d62dcf 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -2,14 +2,14 @@ use std::path::Path; use std::sync::{Arc, Mutex}; use serde::Serialize; -use sov_rollup_interface::da::DaSpec; +use sov_rollup_interface::da::{DaSpec, SequencerCommitment}; use sov_rollup_interface::services::da::SlotData; use sov_rollup_interface::stf::{BatchReceipt, Event, SoftBatchReceipt}; use sov_schema_db::{Schema, SchemaBatch, SeekKeyEncoder, DB}; use crate::rocks_db_config::gen_rocksdb_options; use crate::schema::tables::{ - BatchByHash, BatchByNumber, EventByKey, EventByNumber, L2RangeByL1Height, + BatchByHash, BatchByNumber, CommitmentsByNumber, EventByKey, EventByNumber, L2RangeByL1Height, LastSequencerCommitmentSent, SlotByHash, SlotByNumber, SoftBatchByHash, SoftBatchByNumber, SoftConfirmationStatus, TxByHash, TxByNumber, LEDGER_TABLES, }; @@ -519,4 +519,33 @@ impl LedgerDB { ) -> anyhow::Result> { self.db.get::(&l1_height) } + + /// Gets the commitments in the da slot with given height if any + /// Adds the new coming commitment info + pub fn update_commitments_on_da_slot( + &self, + height: u64, + commitment: SequencerCommitment, + ) -> anyhow::Result<()> { + // get commitments + let commitments = self.db.get::(&SlotNumber(height))?; + + match commitments { + // If there were other commitments, upsert + Some(mut commitments) => { + commitments.push(commitment); + self.db + .put::(&SlotNumber(height), &commitments) + } + // Else insert + None => self + .db + .put::(&SlotNumber(height), &vec![commitment]), + } + } + + /// Sets l1 height of l1 hash + pub fn set_l1_height_of_l1_hash(&self, hash: [u8; 32], height: u64) -> anyhow::Result<()> { + self.db.put::(&hash, &SlotNumber(height)) + } } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/rpc.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/rpc.rs index 481b1a43a..17c798bab 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/rpc.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/rpc.rs @@ -1,15 +1,16 @@ use serde::de::DeserializeOwned; use sov_rollup_interface::rpc::{ - BatchIdAndOffset, BatchIdentifier, BatchResponse, EventIdentifier, ItemOrHash, - LedgerRpcProvider, QueryMode, SlotIdAndOffset, SlotIdentifier, SlotResponse, - SoftBatchIdentifier, SoftBatchResponse, TxIdAndOffset, TxIdentifier, TxResponse, + sequencer_commitment_to_response, BatchIdAndOffset, BatchIdentifier, BatchResponse, + EventIdentifier, ItemOrHash, LedgerRpcProvider, QueryMode, SequencerCommitmentResponse, + SlotIdAndOffset, SlotIdentifier, SlotResponse, SoftBatchIdentifier, SoftBatchResponse, + TxIdAndOffset, TxIdentifier, TxResponse, }; use sov_rollup_interface::stf::Event; use tokio::sync::broadcast::Receiver; use crate::schema::tables::{ - BatchByHash, BatchByNumber, EventByNumber, SlotByHash, SlotByNumber, SoftBatchByHash, - SoftBatchByNumber, SoftConfirmationStatus, TxByHash, TxByNumber, + BatchByHash, BatchByNumber, CommitmentsByNumber, EventByNumber, SlotByHash, SlotByNumber, + SoftBatchByHash, SoftBatchByNumber, SoftConfirmationStatus, TxByHash, TxByNumber, }; use crate::schema::types::{ BatchNumber, EventNumber, SlotNumber, StoredBatch, StoredSlot, TxNumber, @@ -364,6 +365,24 @@ impl LedgerRpcProvider for LedgerDB { None => Ok(sov_rollup_interface::rpc::SoftConfirmationStatus::Trusted), } } + fn get_slot_number_by_hash(&self, hash: [u8; 32]) -> Result, anyhow::Error> { + self.db.get::(&hash).map(|v| v.map(|a| a.0)) + } + + fn get_sequencer_commitments_on_slot_by_number( + &self, + height: u64, + ) -> Result>, anyhow::Error> { + match self.db.get::(&SlotNumber(height))? { + Some(commitments) => Ok(Some( + commitments + .into_iter() + .map(|commitment| sequencer_commitment_to_response(commitment, height)) + .collect(), + )), + None => Ok(None), + } + } fn subscribe_slots(&self) -> Result, anyhow::Error> { Ok(self.slot_subscriptions.subscribe()) diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index 784618f4e..9a933732d 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -29,6 +29,7 @@ use borsh::{maybestd, BorshDeserialize, BorshSerialize}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use jmt::storage::{NibblePath, Node, NodeKey}; use jmt::Version; +use sov_rollup_interface::da::SequencerCommitment; use sov_rollup_interface::stf::{Event, EventKey}; use sov_schema_db::schema::{KeyDecoder, KeyEncoder, ValueCodec}; use sov_schema_db::{CodecError, SeekKeyEncoder}; @@ -62,6 +63,7 @@ pub const LEDGER_TABLES: &[&str] = &[ TxByNumber::table_name(), EventByKey::table_name(), EventByNumber::table_name(), + CommitmentsByNumber::table_name(), ]; /// A list of all tables used by the NativeDB. These tables store @@ -223,6 +225,11 @@ define_table_with_default_codec!( (SlotByHash) DbHash => SlotNumber ); +define_table_with_default_codec!( + /// The primary source for sequencer commitment data + (CommitmentsByNumber) SlotNumber => Vec +); + define_table_with_seek_key_codec!( /// The primary source for soft batch data (SoftBatchByNumber) BatchNumber => StoredSoftBatch diff --git a/crates/sovereign-sdk/full-node/sov-ledger-rpc/src/server.rs b/crates/sovereign-sdk/full-node/sov-ledger-rpc/src/server.rs index 4498e481d..6d4413e4b 100644 --- a/crates/sovereign-sdk/full-node/sov-ledger-rpc/src/server.rs +++ b/crates/sovereign-sdk/full-node/sov-ledger-rpc/src/server.rs @@ -173,6 +173,37 @@ where }, )?; + rpc.register_async_method( + "ledger_getSequencerCommitmentsOnSlotByNumber", + |params, ledger| async move { + // Returns commitments on DA slot with given height. + let height: u64 = params.one()?; + + ledger + .get_sequencer_commitments_on_slot_by_number(height) + .map_err(|e| to_jsonrpsee_error_object(LEDGER_RPC_ERROR, e)) + }, + )?; + + rpc.register_async_method( + "ledger_getSequencerCommitmentsOnSlotByHash", + |params, ledger| async move { + // Returns commitments on DA slot with given hash. + let hash: [u8; 32] = params.one()?; + println!("hash:{:?}", hash); + let height = ledger + .get_slot_number_by_hash(hash) + .map_err(|e| to_jsonrpsee_error_object(LEDGER_RPC_ERROR, e))?; + println!("height:{:?}", height); + match height { + Some(height) => ledger + .get_sequencer_commitments_on_slot_by_number(height) + .map_err(|e| to_jsonrpsee_error_object(LEDGER_RPC_ERROR, e)), + None => Ok(None), + } + }, + )?; + rpc.register_subscription( "ledger_subscribeSlots", "ledger_slotProcessed", diff --git a/crates/sovereign-sdk/full-node/sov-stf-runner/src/runner.rs b/crates/sovereign-sdk/full-node/sov-stf-runner/src/runner.rs index 0740dc9c1..da3ac3bea 100644 --- a/crates/sovereign-sdk/full-node/sov-stf-runner/src/runner.rs +++ b/crates/sovereign-sdk/full-node/sov-stf-runner/src/runner.rs @@ -562,8 +562,8 @@ where return Err(anyhow::anyhow!("Sequencer Client is not initialized")); }; - let mut seen_block_headers: VecDeque<::BlockHeader> = VecDeque::new(); - let mut seen_receipts: VecDeque<_> = VecDeque::new(); + let mut last_l1_height = 0; + let mut cur_l1_block = None; let mut height = self.start_height; info!("Starting to sync from height {}", height); @@ -609,166 +609,175 @@ where }, }; - // TODO: for a node, the da block at slot_height might not have been finalized yet - // should wait for it to be finalized - let filtered_block = self - .da_service - .get_block_at(soft_batch.da_slot_height) - .await?; - - // TODO: when legit blocks are implemented use below to - // check for reorgs - // Checking if reorg happened or not. - // if let Some(prev_block_header) = seen_block_headers.back() { - // if prev_block_header.hash() != filtered_block.header().prev_hash() { - // tracing::warn!("Block at height={} does not belong in current chain. Chain has forked. Traversing backwards", height); - // while let Some(seen_block_header) = seen_block_headers.pop_back() { - // seen_receipts.pop_back(); - // let block = self - // .da_service - // .get_block_at(seen_block_header.height()) - // .await?; - // if block.header().prev_hash() == seen_block_header.prev_hash() { - // height = seen_block_header.height(); - // filtered_block = block; - // break; - // } - // } - // tracing::info!("Resuming execution on height={}", height); - // } - // } - - // Merkle root hash - L1 start height - L1 end height - // TODO: How to confirm this is what we submit - use? - // TODO: Add support for multiple commitments in a single block - - let mut sequencer_commitments = Vec::::new(); - let mut zk_proofs = Vec::::new(); - - self.da_service - .extract_relevant_blobs(&filtered_block) - .into_iter() - .for_each(|mut tx| { - let data = DaData::try_from_slice(tx.full_data()); - - if tx.sender().as_ref() == self.sequencer_da_pub_key.as_slice() { - if let Ok(DaData::SequencerCommitment(seq_com)) = data { - sequencer_commitments.push(seq_com); - } else { - tracing::warn!( - "Found broken DA data in block 0x{}: {:?}", - hex::encode(filtered_block.hash()), - data - ); - } - } else if tx.sender().as_ref() == self.prover_da_pub_key.as_slice() { - if let Ok(DaData::ZKProof(batch_proof)) = data { - zk_proofs.push(batch_proof); + if last_l1_height != soft_batch.da_slot_height || cur_l1_block.is_none() { + last_l1_height = soft_batch.da_slot_height; + // TODO: for a node, the da block at slot_height might not have been finalized yet + // should wait for it to be finalized + let filtered_block = self + .da_service + .get_block_at(soft_batch.da_slot_height) + .await?; + + // Set the l1 height of the l1 hash + self.ledger_db + .set_l1_height_of_l1_hash( + filtered_block.header().hash().into(), + soft_batch.da_slot_height, + ) + .unwrap(); + + // Merkle root hash - L1 start height - L1 end height + // TODO: How to confirm this is what we submit - use? + // TODO: Add support for multiple commitments in a single block + + let mut sequencer_commitments = Vec::::new(); + let mut zk_proofs = Vec::::new(); + + self.da_service + .extract_relevant_blobs(&filtered_block) + .into_iter() + .for_each(|mut tx| { + let data = DaData::try_from_slice(tx.full_data()); + + if tx.sender().as_ref() == self.sequencer_da_pub_key.as_slice() { + if let Ok(DaData::SequencerCommitment(seq_com)) = data { + sequencer_commitments.push(seq_com); + } else { + tracing::warn!( + "Found broken DA data in block 0x{}: {:?}", + hex::encode(filtered_block.hash()), + data + ); + } + } else if tx.sender().as_ref() == self.prover_da_pub_key.as_slice() { + if let Ok(DaData::ZKProof(batch_proof)) = data { + zk_proofs.push(batch_proof); + } else { + tracing::warn!( + "Found broken DA data in block 0x{}: {:?}", + hex::encode(filtered_block.hash()), + data + ); + } } else { - tracing::warn!( - "Found broken DA data in block 0x{}: {:?}", - hex::encode(filtered_block.hash()), - data - ); + // TODO: This is where force transactions will land - try to parse DA data force transaction } - } else { - // TODO: This is where force transactions will land - try to parse DA data force transaction - } - }); + }); - if !zk_proofs.is_empty() { - // TODO: Implement this - } + if !zk_proofs.is_empty() { + // TODO: Implement this + } - for sequencer_commitment in sequencer_commitments.iter() { - let start_l1_height = self - .da_service - .get_block_by_hash(sequencer_commitment.l1_start_block_hash) - .await? - .header() - .height(); + for sequencer_commitment in sequencer_commitments.iter() { + tracing::warn!( + "Processing sequencer commitment: {:?}", + sequencer_commitment + ); + let start_l1_height = self + .da_service + .get_block_by_hash(sequencer_commitment.l1_start_block_hash) + .await? + .header() + .height(); - let end_l1_height = self - .da_service - .get_block_by_hash(sequencer_commitment.l1_end_block_hash) - .await? - .header() - .height(); - - let start_l2_height = match self - .ledger_db - .get_l2_range_by_l1_height(SlotNumber(start_l1_height)) - { - Ok(Some((start_l2_height, _))) => start_l2_height, - Ok(None) => bail!( - "Sequencer: L1 L2 connection does not exist. L1 height = {}", - start_l1_height - ), - Err(e) => bail!("Sequencer: Failed to get L1 L2 connection. Err: {}", e), - }; - - let end_l2_height = match self - .ledger_db - .get_l2_range_by_l1_height(SlotNumber(start_l1_height)) - { - Ok(Some((_, end_l2_height))) => end_l2_height, - Ok(None) => bail!( - "Sequencer: L1 L2 connection does not exist. L1 height = {}", - start_l1_height - ), - Err(e) => bail!("Sequencer: Failed to get L1 L2 connection. Err: {}", e), - }; - - let range_end = BatchNumber(end_l2_height.0 + 1); - // Traverse each item's field of vector of transactions, put them in merkle tree - // and compare the root with the one from the ledger - let stored_soft_batches: Vec = self - .ledger_db - .get_soft_batch_range(&(start_l2_height..range_end))?; - - let soft_batches_tree = MerkleTree::::from_leaves( - stored_soft_batches - .iter() - .map(|x| x.hash) - .collect::>() - .as_slice(), - ); + let end_l1_height = self + .da_service + .get_block_by_hash(sequencer_commitment.l1_end_block_hash) + .await? + .header() + .height(); - if soft_batches_tree.root() != Some(sequencer_commitment.merkle_root) { tracing::warn!( - "Merkle root mismatch - expected 0x{} but got 0x{}", - hex::encode( - soft_batches_tree - .root() - .ok_or(anyhow!("Could not calculate soft batch tree root"))? + "start height: {}, end height: {}", + start_l1_height, + end_l1_height + ); + + let start_l2_height = match self + .ledger_db + .get_l2_range_by_l1_height(SlotNumber(start_l1_height)) + { + Ok(Some((start_l2_height, _))) => start_l2_height, + Ok(None) => bail!( + "Runner: L1 L2 connection does not exist. L1 height = {}", + start_l1_height + ), + Err(e) => bail!("Runner: Failed to get L1 L2 connection. Err: {}", e), + }; + + let end_l2_height = match self + .ledger_db + .get_l2_range_by_l1_height(SlotNumber(end_l1_height)) + { + Ok(Some((_, end_l2_height))) => end_l2_height, + Ok(None) => bail!( + "Runner: L1 L2 connection does not exist. L1 height = {}", + end_l1_height ), - hex::encode(sequencer_commitment.merkle_root) + Err(e) => bail!("Runner: Failed to get L1 L2 connection. Err: {}", e), + }; + + let range_end = BatchNumber(end_l2_height.0 + 1); + // Traverse each item's field of vector of transactions, put them in merkle tree + // and compare the root with the one from the ledger + let stored_soft_batches: Vec = self + .ledger_db + .get_soft_batch_range(&(start_l2_height..range_end))?; + + let soft_batches_tree = MerkleTree::::from_leaves( + stored_soft_batches + .iter() + .map(|x| x.hash) + .collect::>() + .as_slice(), ); - } - for i in start_l1_height..=end_l1_height { - self.ledger_db - .put_soft_confirmation_status( - SlotNumber(i), - SoftConfirmationStatus::Finalized, - ) - .unwrap_or_else(|_| { - panic!( + if soft_batches_tree.root() != Some(sequencer_commitment.merkle_root) { + tracing::warn!( + "Merkle root mismatch - expected 0x{} but got 0x{}", + hex::encode( + soft_batches_tree + .root() + .ok_or(anyhow!("Could not calculate soft batch tree root"))? + ), + hex::encode(sequencer_commitment.merkle_root) + ); + } else { + self.ledger_db + .update_commitments_on_da_slot( + soft_batch.da_slot_height, + sequencer_commitment.clone(), + ) + .unwrap(); + for i in start_l1_height..=end_l1_height { + self.ledger_db + .put_soft_confirmation_status( + SlotNumber(i), + SoftConfirmationStatus::Finalized, + ) + .unwrap_or_else(|_| { + panic!( "Failed to put soft confirmation status in the ledger db {}", i ) - }); + }); + } + } } + + cur_l1_block = Some(filtered_block); } + let cur_l1_block = cur_l1_block.clone().unwrap(); + info!( "Running soft confirmation batch #{} with hash: 0x{} on DA block #{}", height, hex::encode(soft_batch.hash), - filtered_block.header().height() + cur_l1_block.header().height() ); - let mut data_to_commit = SlotCommit::new(filtered_block.clone()); + let mut data_to_commit = SlotCommit::new(cur_l1_block.clone()); let pre_state = self.storage_manager.create_storage_on_l2_height(height)?; @@ -778,8 +787,8 @@ where &self.state_root, pre_state, Default::default(), - filtered_block.header(), - &filtered_block.validity_condition(), + cur_l1_block.header(), + &cur_l1_block.validity_condition(), &mut soft_batch.clone().into(), ); @@ -787,58 +796,9 @@ where data_to_commit.add_batch(receipt); } - // let (inclusion_proof, completeness_proof) = self - // .da_service - // .get_extraction_proof(&filtered_block, vec_blobs.as_slice()) - // .await; - - // let _transition_data: StateTransitionData = - // StateTransitionData { - // // TODO(https://github.com/Sovereign-Labs/sovereign-sdk/issues/1247): incorrect pre-state root in case of re-org - // initial_state_root: self.state_root.clone(), - // final_state_root: slot_result.state_root.clone(), - // da_block_header: filtered_block.header().clone(), - // inclusion_proof, - // completeness_proof, - // blobs: vec_blobs, - // state_transition_witness: slot_result.witness, - // }; - self.storage_manager .save_change_set_l2(height, slot_result.change_set)?; - // ---------------- - // Create ZK proof. - // { - // let header_hash = transition_data.da_block_header.hash(); - // self.prover_service.submit_witness(transition_data).await; - // // TODO(https://github.com/Sovereign-Labs/sovereign-sdk/issues/1185): - // // This section will be moved and called upon block finalization once we have fork management ready. - // self.prover_service - // .prove(header_hash.clone()) - // .await - // .expect("The proof creation should succeed"); - - // loop { - // let status = self - // .prover_service - // .send_proof_to_da(header_hash.clone()) - // .await; - - // match status { - // Ok(ProofSubmissionStatus::Success) => { - // break; - // } - // // TODO(https://github.com/Sovereign-Labs/sovereign-sdk/issues/1185): Add timeout handling. - // Ok(ProofSubmissionStatus::ProofGenerationInProgress) => { - // tokio::time::sleep(tokio::time::Duration::from_millis(100)).await - // } - // // TODO(https://github.com/Sovereign-Labs/sovereign-sdk/issues/1185): Add handling for DA submission errors. - // Err(e) => panic!("{:?}", e), - // } - // } - // } - let batch_receipt = data_to_commit.batch_receipts()[0].clone(); let next_state_root = slot_result.state_root; @@ -853,9 +813,9 @@ where post_state_root: next_state_root.as_ref().to_vec(), phantom_data: PhantomData::, batch_hash: batch_receipt.batch_hash, - da_slot_hash: filtered_block.header().hash(), - da_slot_height: filtered_block.header().height(), - da_slot_txs_commitment: filtered_block.header().txs_commitment(), + da_slot_hash: cur_l1_block.header().hash(), + da_slot_height: cur_l1_block.header().height(), + da_slot_txs_commitment: cur_l1_block.header().txs_commitment(), tx_receipts: batch_receipt.tx_receipts, soft_confirmation_signature: soft_batch.soft_confirmation_signature, pub_key: soft_batch.pub_key, @@ -867,51 +827,17 @@ where self.ledger_db .commit_soft_batch(soft_batch_receipt, self.include_tx_body)?; self.ledger_db.extend_l2_range_of_l1_slot( - SlotNumber(filtered_block.header().height()), + SlotNumber(cur_l1_block.header().height()), BatchNumber(height), )?; self.state_root = next_state_root; - seen_receipts.push_back(data_to_commit); - seen_block_headers.push_back(filtered_block.header().clone()); info!( "New State Root after soft confirmation #{} is: {:?}", height, self.state_root ); - // ---------------- - // Finalization. Done after seen block for proper handling of instant finality - // Can be moved to another thread to improve throughput - let last_finalized = self.da_service.get_last_finalized_block_header().await?; - // For safety we finalize blocks one by one - tracing::info!( - "Last finalized header height is {}, ", - last_finalized.height() - ); - // Checking all seen blocks, in case if there was delay in getting last finalized header. - // while let Some(earliest_seen_header) = seen_block_headers.front() { - // tracing::debug!( - // "Checking seen header height={}", - // earliest_seen_header.height() - // ); - // if earliest_seen_header.height() <= last_finalized.height() { - // tracing::debug!( - // "Finalizing seen header height={}", - // earliest_seen_header.height() - // ); - - // continue; - // } - - // break; - // } - // self.storage_manager.finalize(earliest_seen_header)?; - seen_block_headers.pop_front(); - let receipts = seen_receipts - .pop_front() - .ok_or(anyhow!("No seen receipts exist"))?; - self.ledger_db.commit_slot(receipts)?; self.storage_manager.finalize_l2(height)?; height += 1; diff --git a/crates/sovereign-sdk/fuzz/Cargo.lock b/crates/sovereign-sdk/fuzz/Cargo.lock index a2ca1c8ed..2b1fd90e3 100644 --- a/crates/sovereign-sdk/fuzz/Cargo.lock +++ b/crates/sovereign-sdk/fuzz/Cargo.lock @@ -2947,6 +2947,7 @@ dependencies = [ "bincode", "borsh", "byteorder", + "hex", "jmt", "rocksdb", "serde", diff --git a/crates/sovereign-sdk/rollup-interface/src/node/rpc/mod.rs b/crates/sovereign-sdk/rollup-interface/src/node/rpc/mod.rs index 5a5232865..e71ec3448 100644 --- a/crates/sovereign-sdk/rollup-interface/src/node/rpc/mod.rs +++ b/crates/sovereign-sdk/rollup-interface/src/node/rpc/mod.rs @@ -7,6 +7,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; +use crate::da::SequencerCommitment; use crate::maybestd::vec::Vec; #[cfg(feature = "native")] use crate::stf::Event; @@ -215,6 +216,35 @@ pub struct SoftBatchResponse { pub timestamp: u64, } +/// The response to a JSON-RPC request for sequencer commitments on a DA Slot. +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct SequencerCommitmentResponse { + /// L1 block hash the commitment was on + pub found_in_l1: u64, + /// Hex encoded Merkle root of soft confirmation hashes + #[serde(with = "hex::serde")] + pub merkle_root: [u8; 32], + /// Hex encoded Start L1 block's hash + #[serde(with = "hex::serde")] + pub l1_start_block_hash: [u8; 32], + /// Hex encoded End L1 block's hash + #[serde(with = "hex::serde")] + pub l1_end_block_hash: [u8; 32], +} + +/// Converts `SequencerCommitment` to `SequencerCommitmentResponse` +pub fn sequencer_commitment_to_response( + commitment: SequencerCommitment, + l1_height: u64, +) -> SequencerCommitmentResponse { + SequencerCommitmentResponse { + found_in_l1: l1_height, + merkle_root: commitment.merkle_root, + l1_start_block_hash: commitment.l1_start_block_hash, + l1_end_block_hash: commitment.l1_end_block_hash, + } +} + /// The response to a JSON-RPC request for a particular batch. #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct BatchResponse { @@ -418,6 +448,15 @@ pub trait LedgerRpcProvider { soft_batch_receipt: u64, ) -> Result; + /// Returns the slot number of a given hash + fn get_slot_number_by_hash(&self, hash: [u8; 32]) -> Result, anyhow::Error>; + + /// Takes an L1 height and and returns all the sequencer commitments on the slot + fn get_sequencer_commitments_on_slot_by_number( + &self, + height: u64, + ) -> Result>, anyhow::Error>; + /// Get a notification each time a slot is processed fn subscribe_slots(&self) -> Result, anyhow::Error>; }