diff --git a/Cargo.lock b/Cargo.lock index 0b4891540..4210ef51d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1800,6 +1800,7 @@ dependencies = [ "metrics-exporter-prometheus", "metrics-util", "prover-services", + "rand", "regex", "reqwest", "reth-primitives", @@ -1908,7 +1909,7 @@ dependencies = [ [[package]] name = "citrea-e2e" version = "0.1.0" -source = "git+https://github.com/chainwayxyz/citrea-e2e?rev=f75fe92#f75fe92b0594724b9785eff857bb6aff861a2a55" +source = "git+https://github.com/chainwayxyz/citrea-e2e?rev=72dd81385b472ec0c8b2483de739b8a566859082#72dd81385b472ec0c8b2483de739b8a566859082" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 4bb5e4b9d..8ed2aaada 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -154,7 +154,7 @@ alloy-eips = { version = "0.4.2", default-features = false } alloy-consensus = { version = "0.4.2", default-features = false, features = ["serde", "serde-bincode-compat"] } alloy-network = { version = "0.4.2", default-features = false } -citrea-e2e = { git = "https://github.com/chainwayxyz/citrea-e2e", rev = "f75fe92" } +citrea-e2e = { git = "https://github.com/chainwayxyz/citrea-e2e", rev = "72dd81385b472ec0c8b2483de739b8a566859082" } [patch.crates-io] bitcoincore-rpc = { version = "0.18.0", git = "https://github.com/chainwayxyz/rust-bitcoincore-rpc.git", rev = "ca3cfa2" } diff --git a/bin/citrea/Cargo.toml b/bin/citrea/Cargo.toml index 84eab5078..0428ad5e8 100644 --- a/bin/citrea/Cargo.toml +++ b/bin/citrea/Cargo.toml @@ -81,6 +81,7 @@ bincode = { workspace = true } borsh = { workspace = true } hex = { workspace = true } jmt = { workspace = true } +rand = { workspace = true } reqwest = { workspace = true } risc0-zkvm = { workspace = true, default-features = false, features = ["std"] } rs_merkle = { workspace = true } diff --git a/bin/citrea/src/rollup/mod.rs b/bin/citrea/src/rollup/mod.rs index 8e36c8f91..ce328412f 100644 --- a/bin/citrea/src/rollup/mod.rs +++ b/bin/citrea/src/rollup/mod.rs @@ -12,6 +12,7 @@ use citrea_sequencer::CitreaSequencer; use jsonrpsee::RpcModule; use sov_db::ledger_db::migrations::LedgerDBMigrator; use sov_db::ledger_db::{LedgerDB, SharedLedgerOps}; +use sov_db::mmr_db::MmrDB; use sov_db::rocks_db_config::RocksdbConfig; use sov_db::schema::types::SoftConfirmationNumber; use sov_modules_api::Spec; @@ -449,6 +450,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { None, ); let ledger_db = self.create_ledger_db(&rocksdb_config); + let mmr_db = MmrDB::new(&rocksdb_config)?; let prover_service = self .create_prover_service( @@ -500,6 +502,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { batch_prover_code_commitments_by_spec, light_client_prover_code_commitment, light_client_prover_elfs, + mmr_db, task_manager, )?; diff --git a/bin/citrea/tests/bitcoin_e2e/light_client_test.rs b/bin/citrea/tests/bitcoin_e2e/light_client_test.rs index 30a4bb50b..bec9f79ab 100644 --- a/bin/citrea/tests/bitcoin_e2e/light_client_test.rs +++ b/bin/citrea/tests/bitcoin_e2e/light_client_test.rs @@ -6,6 +6,7 @@ use alloy_primitives::U64; use async_trait::async_trait; use bitcoin_da::service::{BitcoinService, BitcoinServiceConfig, FINALITY_DEPTH}; use bitcoin_da::spec::{BitcoinSpec, RollupParams}; +use bitcoincore_rpc::RpcApi; use citrea_batch_prover::rpc::BatchProverRpcClient; use citrea_batch_prover::GroupCommitments; use citrea_common::tasks::manager::TaskManager; @@ -19,6 +20,7 @@ use citrea_e2e::test_case::{TestCase, TestCaseRunner}; use citrea_e2e::Result; use citrea_light_client_prover::rpc::LightClientProverRpcClient; use citrea_primitives::{TO_BATCH_PROOF_PREFIX, TO_LIGHT_CLIENT_PREFIX}; +use rand::{thread_rng, Rng}; use risc0_zkvm::{FakeReceipt, InnerReceipt, MaybePruned, Receipt, ReceiptClaim}; use sov_ledger_rpc::LedgerRpcClient; use sov_rollup_interface::da::{BatchProofMethodId, DaTxRequest}; @@ -829,6 +831,8 @@ impl TestCase for LightClientUnverifiableBatchProofTest { [1u8; 32], fork1_height + 1, method_ids[1].1, + None, + false, ); let _ = bitcoin_da_service .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) @@ -840,6 +844,8 @@ impl TestCase for LightClientUnverifiableBatchProofTest { [3u8; 32], fork1_height * 3, method_ids[1].1, + None, + false, ); let _ = bitcoin_da_service .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) @@ -847,13 +853,14 @@ impl TestCase for LightClientUnverifiableBatchProofTest { .unwrap(); // Expect unparsable journal to be skipped - let unparsable_batch_proof = - create_serialized_fake_receipt_batch_proof_with_malformed_journal( - [3u8; 32], - [5u8; 32], - fork1_height * 4, - method_ids[1].1, - ); + let unparsable_batch_proof = create_serialized_fake_receipt_batch_proof( + [3u8; 32], + [5u8; 32], + fork1_height * 4, + method_ids[1].1, + None, + true, + ); let _ = bitcoin_da_service .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unparsable_batch_proof), 1) .await @@ -864,6 +871,8 @@ impl TestCase for LightClientUnverifiableBatchProofTest { [2u8; 32], fork1_height * 2, method_ids[1].1, + None, + false, ); let _ = bitcoin_da_service .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) @@ -877,6 +886,8 @@ impl TestCase for LightClientUnverifiableBatchProofTest { [4u8; 32], fork1_height * 4, random_method_id, + None, + false, ); let _ = bitcoin_da_service .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unverifiable_batch_proof), 1) @@ -923,40 +934,428 @@ async fn test_light_client_unverifiable_batch_proof() -> Result<()> { .await } -fn create_serialized_fake_receipt_batch_proof( - initial_state_root: [u8; 32], - final_state_root: [u8; 32], - last_l2_height: u64, - method_id: [u32; 8], -) -> Vec { - let batch_proof_output = BatchProofCircuitOutput:: { - initial_state_root, - final_state_root, - last_l2_height, - da_slot_hash: [0u8; 32].into(), - prev_soft_confirmation_hash: [0u8; 32], - final_soft_confirmation_hash: [0u8; 32], - state_diff: BTreeMap::new(), - sequencer_commitments_range: (0, 0), - sequencer_da_public_key: [0u8; 32].to_vec(), - sequencer_public_key: [0u8; 32].to_vec(), - preproven_commitments: vec![], - }; - let output_serialized = borsh::to_vec(&batch_proof_output).unwrap(); +#[derive(Default)] +struct VerifyChunkedTxsInLightClient { + task_manager: TaskManager<()>, +} - let claim = MaybePruned::Value(ReceiptClaim::ok(method_id, output_serialized.clone())); - let fake_receipt = FakeReceipt::new(claim); - // Receipt with verifiable claim - let receipt = Receipt::new(InnerReceipt::Fake(fake_receipt), output_serialized.clone()); - bincode::serialize(&receipt).unwrap() +#[async_trait] +impl TestCase for VerifyChunkedTxsInLightClient { + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_sequencer: true, + with_light_client_prover: true, + with_full_node: true, + ..Default::default() + } + } + + fn light_client_prover_config() -> LightClientProverConfig { + LightClientProverConfig { + enable_recovery: false, + initial_da_height: 171, + ..Default::default() + } + } + + fn sequencer_config() -> SequencerConfig { + SequencerConfig { + min_soft_confirmations_per_commitment: 10000, + ..Default::default() + } + } + + async fn cleanup(&self) -> Result<()> { + self.task_manager.abort().await; + Ok(()) + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + let da = f.bitcoin_nodes.get(0).unwrap(); + let sequencer = f.sequencer.as_ref().unwrap(); + let light_client_prover = f.light_client_prover.as_ref().unwrap(); + let full_node = f.full_node.as_ref().unwrap(); + + let da_config = &da.config; + let bitcoin_da_service_config = BitcoinServiceConfig { + node_url: format!( + "http://127.0.0.1:{}/wallet/{}", + da_config.rpc_port, + NodeKind::Bitcoin + ), + node_username: da_config.rpc_user.clone(), + node_password: da_config.rpc_password.clone(), + network: bitcoin::Network::Regtest, + da_private_key: Some( + // This is the regtest private key of batch prover + "56D08C2DDE7F412F80EC99A0A328F76688C904BD4D1435281EFC9270EC8C8707".to_string(), + ), + tx_backup_dir: Self::test_config() + .dir + .join("tx_backup_dir") + .display() + .to_string(), + monitoring: Default::default(), + mempool_space_url: None, + }; + let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); + + let bitcoin_da_service = Arc::new( + BitcoinService::new_with_wallet_check( + bitcoin_da_service_config, + RollupParams { + to_light_client_prefix: TO_LIGHT_CLIENT_PREFIX.to_vec(), + to_batch_proof_prefix: TO_BATCH_PROOF_PREFIX.to_vec(), + }, + tx, + ) + .await + .unwrap(), + ); + + self.task_manager + .spawn(|tk| bitcoin_da_service.clone().run_da_queue(rx, tk)); + + da.generate(FINALITY_DEPTH).await?; + let finalized_height = da.get_finalized_height().await?; + + // Wait for light client prover to create light client proof. + light_client_prover + .wait_for_l1_height(finalized_height, Some(TEN_MINS)) + .await + .unwrap(); + + // Expect light client prover to have generated light client proof + let lcp = light_client_prover + .client + .http_client() + .get_light_client_proof_by_l1_height(finalized_height) + .await?; + let lcp_output = lcp.unwrap().light_client_proof_output; + + // Get initial method ids and genesis state root + let method_ids = lcp_output.batch_proof_method_ids; + let genesis_state_root = lcp_output.state_root; + + let fork1_height = method_ids[1].0; + + // Even though the state diff is 100kb the proof will be 200kb because the fake receipt claim also has the journal + // But the compressed size will go down to 100kb + let state_diff_100kb = create_random_state_diff(100); + + // Create a 100kb (compressed size) batch proof (not 1mb because if testing feature is enabled max body size is 39700), this batch proof will consist of 3 chunk and 1 aggregate transactions because 100kb/40kb = 3 chunks + let verifiable_100kb_batch_proof = create_serialized_fake_receipt_batch_proof( + genesis_state_root, + [1u8; 32], + fork1_height + 1, + method_ids[1].1, + Some(state_diff_100kb), + false, + ); + println!("size of proof: {:?}", verifiable_100kb_batch_proof.len()); + let _ = bitcoin_da_service + .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_100kb_batch_proof), 1) + .await + .unwrap(); + + // In total 3 chunks 1 aggregate with all of them having reveal and commit txs we should have 8 txs in mempool + da.wait_mempool_len(8, Some(TEN_MINS)).await?; + + // Finalize the DA block which contains the batch proof txs + da.generate(FINALITY_DEPTH).await?; + + // Make sure all of them are in the block + da.wait_mempool_len(0, Some(TEN_MINS)).await?; + + let batch_proof_l1_height = da.get_finalized_height().await?; + + // let block = da.get_block(batch_proof_l1_height).await?; + + // for tx in block.txdata.iter() { + // println!("tx: {:?}", tx); + // } + + sequencer.client.send_publish_batch_request().await?; + // Wait for light client prover to process verifiable batch proof + light_client_prover + .wait_for_l1_height(batch_proof_l1_height, Some(TEN_MINS)) + .await + .unwrap(); + sequencer.client.send_publish_batch_request().await?; + + full_node + .wait_for_l1_height(batch_proof_l1_height, Some(TEN_MINS)) + .await?; + + // Check if full node can verify the proof + let batch_proofs = + wait_for_zkproofs(full_node, batch_proof_l1_height, Some(TEN_MINS)).await?; + + println!( + "output: {:?}", + batch_proofs[0].proof_output.final_state_root + ); + + // Expect light client prover to have generated light client proof + let lcp = light_client_prover + .client + .http_client() + .get_light_client_proof_by_l1_height(batch_proof_l1_height) + .await?; + + let lcp_output = lcp.unwrap().light_client_proof_output; + + // The batch proof should have updated the state root and the last l2 height + assert_eq!(lcp_output.state_root, [1u8; 32]); + assert_eq!(lcp_output.last_l2_height, fork1_height + 1); + assert!(lcp_output.unchained_batch_proofs_info.is_empty()); + + // Now generate another proof but this time: + // Have 4 chunks and 1 aggregate + // First two chunks will should be in block n + // Last two chunks should be in block n+1 + // And the aggregate should be in block n+2 + // After the block n+2 is processed we should see the state root updated + let state_diff_130kb = create_random_state_diff(130); + + let verifiable_130kb_batch_proof = create_serialized_fake_receipt_batch_proof( + [1u8; 32], + [2u8; 32], + fork1_height * 2, + method_ids[1].1, + Some(state_diff_130kb), + false, + ); + println!("size of proof: {:?}", verifiable_130kb_batch_proof.len()); + let _ = bitcoin_da_service + .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_130kb_batch_proof), 1) + .await + .unwrap(); + + // In total 4 chunks 1 aggregate with all of them having reveal and commit txs we should have 8 txs in mempool + da.wait_mempool_len(10, Some(TEN_MINS)).await?; + + // Get txs from mempool + let txs = da.get_raw_mempool().await?; + // // Get the first four txs ( first two chunks ) + let first_two_chunks = txs + .iter() + .take(4) + .map(|tx| tx.to_string()) + .collect::>(); + let last_two_chunks = txs + .iter() + .skip(4) + .take(4) + .map(|tx| tx.to_string()) + .collect::>(); + let aggregate = txs + .iter() + .skip(8) + .map(|tx| tx.to_string()) + .collect::>(); + + da.generate_block( + "03015a7c4d2cc1c771198686e2ebef6fe7004f4136d61f6225b061d1bb9b821b9b".to_owned(), + first_two_chunks, + ) + .await?; + // First two chunks should be in block n + da.wait_mempool_len(6, Some(TEN_MINS)).await?; + + da.generate_block( + "03015a7c4d2cc1c771198686e2ebef6fe7004f4136d61f6225b061d1bb9b821b9b".to_owned(), + last_two_chunks, + ) + .await?; + // Last two chunks should be in block n+1 + da.wait_mempool_len(2, Some(TEN_MINS)).await?; + + da.generate_block( + "03015a7c4d2cc1c771198686e2ebef6fe7004f4136d61f6225b061d1bb9b821b9b".to_owned(), + aggregate, + ) + .await?; + // Aggregate should be in block n+2 + da.wait_mempool_len(0, Some(TEN_MINS)).await?; + + // Finalize the DA block which contains the aggregate txs + da.generate(FINALITY_DEPTH).await?; + + let batch_proof_l1_height = da.get_finalized_height().await?; + + // Wait for light client prover to process verifiable batch proof + light_client_prover + .wait_for_l1_height(batch_proof_l1_height, Some(TEN_MINS)) + .await + .unwrap(); + + // Expect light client prover to have generated light client proof + let lcp = light_client_prover + .client + .http_client() + .get_light_client_proof_by_l1_height(batch_proof_l1_height) + .await?; + + let lcp_output = lcp.unwrap().light_client_proof_output; + + // The batch proof should have updated the state root and the last l2 height + assert_eq!(lcp_output.state_root, [2u8; 32]); + assert_eq!(lcp_output.last_l2_height, fork1_height * 2); + assert!(lcp_output.unchained_batch_proofs_info.is_empty()); + + // let random_method_id = [1u32; 8]; + // let unverifiable_1mb_batch_proof = create_serialized_fake_receipt_batch_proof( + // genesis_state_root, + // [1u8; 32], + // fork1_height + 1, + // random_method_id, + // Some(state_diff_100kb), + // false, + // ); + // let _ = bitcoin_da_service + // .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unverifiable_1mb_batch_proof), 1) + // .await + // .unwrap(); + + // let verifiable_batch_proof = create_serialized_fake_receipt_batch_proof( + // [2u8; 32], + // [3u8; 32], + // fork1_height * 3, + // method_ids[1].1, + // ); + // let _ = bitcoin_da_service + // .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) + // .await + // .unwrap(); + + // let verifiable_batch_proof = create_serialized_fake_receipt_batch_proof( + // [1u8; 32], + // [2u8; 32], + // fork1_height * 2, + // method_ids[1].1, + // ); + // let _ = bitcoin_da_service + // .send_transaction_with_fee_rate(DaTxRequest::ZKProof(verifiable_batch_proof), 1) + // .await + // .unwrap(); + + // // Give it a random method id to make it unverifiable + + // let unverifiable_batch_proof = create_serialized_fake_receipt_batch_proof( + // [3u8; 32], + // [4u8; 32], + // fork1_height * 4, + // random_method_id, + // ); + // let _ = bitcoin_da_service + // .send_transaction_with_fee_rate(DaTxRequest::ZKProof(unverifiable_batch_proof), 1) + // .await + // .unwrap(); + + // // Ensure that all four batch proofs is submitted to DA + // da.wait_mempool_len(10, None).await?; + + // // Finalize the DA block which contains the batch proof txs + // da.generate(FINALITY_DEPTH).await?; + + // let batch_proof_l1_height = da.get_finalized_height().await?; + + // // Wait for light client prover to process unverifiable batch proof + // light_client_prover + // .wait_for_l1_height(batch_proof_l1_height, Some(TEN_MINS)) + // .await + // .unwrap(); + + // // Expect light client prover to have generated light client proof without panic but it should not have updated the state root + // let lcp = light_client_prover + // .client + // .http_client() + // .get_light_client_proof_by_l1_height(batch_proof_l1_height) + // .await?; + + // let lcp_output = lcp.unwrap().light_client_proof_output; + + // // The unverifiable batch proof and malformed journal batch proof should not have updated the state root or the last l2 height + // assert_eq!(lcp_output.state_root, [3u8; 32]); + // assert_eq!(lcp_output.last_l2_height, fork1_height * 3); + // assert!(lcp_output.unchained_batch_proofs_info.is_empty()); + + Ok(()) + } +} + +#[tokio::test] +async fn test_verify_chunked_txs_in_light_client() -> Result<()> { + TestCaseRunner::new(VerifyChunkedTxsInLightClient::default()) + .set_citrea_path(get_citrea_path()) + .run() + .await +} + +pub fn create_random_state_diff(size_in_kb: u64) -> BTreeMap, Option>> { + let mut rng = thread_rng(); + let mut map = BTreeMap::new(); + let mut total_size: u64 = 0; + + // Convert size to bytes + let size_in_bytes = size_in_kb * 1024; + println!("Size in bytes: {}", size_in_bytes); + + while total_size < size_in_bytes { + // Generate a random 32-byte key + let key: Vec = (0..32).map(|_| rng.gen::()).collect(); + + // Randomly decide if the value is `None` or a `Vec` of random length + let value: Option> = if rng.gen_bool(0.1) { + None + } else { + let value_size: usize = rng.gen_range(1..=2048); + Some((0..value_size).map(|_| rng.gen::()).collect()) + }; + + // Calculate the size of the key and value + let key_size = key.len() as u64; + let value_size = match &value { + Some(v) => v.len() as u64 + 1, + None => 1, + }; + + // Add to the map + map.insert(key, value); + + // Update the total size + total_size += key_size + value_size; + } + println!("Total size: {}", total_size); + + map +} + +#[test] +fn test_random_gen() { + let state_diff = create_random_state_diff(1); + for (key, value) in state_diff { + println!("Key: {:?}, Value: {:?}", key, value); + } } -fn create_serialized_fake_receipt_batch_proof_with_malformed_journal( +fn create_serialized_fake_receipt_batch_proof( initial_state_root: [u8; 32], final_state_root: [u8; 32], last_l2_height: u64, method_id: [u32; 8], + state_diff: Option, Option>>>, + malformed_journal: bool, ) -> Vec { + let sequencer_da_public_key = vec![ + 2, 88, 141, 32, 42, 252, 193, 238, 74, 181, 37, 76, 120, 71, 236, 37, 185, 161, 53, 187, + 218, 15, 43, 198, 158, 225, 167, 20, 116, 159, 215, 125, 201, + ]; + let sequencer_public_key = vec![ + 32, 64, 64, 227, 100, 193, 15, 43, 236, 156, 31, 229, 0, 161, 205, 76, 36, 124, 137, 214, + 80, 160, 30, 215, 232, 44, 171, 168, 103, 135, 124, 33, + ]; let batch_proof_output = BatchProofCircuitOutput:: { initial_state_root, final_state_root, @@ -964,26 +1363,22 @@ fn create_serialized_fake_receipt_batch_proof_with_malformed_journal( da_slot_hash: [0u8; 32].into(), prev_soft_confirmation_hash: [0u8; 32], final_soft_confirmation_hash: [0u8; 32], - state_diff: BTreeMap::new(), + state_diff: state_diff.unwrap_or_default(), sequencer_commitments_range: (0, 0), - sequencer_da_public_key: [0u8; 32].to_vec(), - sequencer_public_key: [0u8; 32].to_vec(), + sequencer_da_public_key, + sequencer_public_key, preproven_commitments: vec![], }; - let output_serialized = borsh::to_vec(&batch_proof_output).unwrap(); + let mut output_serialized = borsh::to_vec(&batch_proof_output).unwrap(); - let mut output_serialized_malformed = vec![1u8]; - output_serialized_malformed.extend(output_serialized.clone()); + // Distorts the output and make it unparsable + if malformed_journal { + output_serialized.push(1u8); + } - let claim = MaybePruned::Value(ReceiptClaim::ok( - method_id, - output_serialized_malformed.clone(), - )); + let claim = MaybePruned::Value(ReceiptClaim::ok(method_id, output_serialized.clone())); let fake_receipt = FakeReceipt::new(claim); // Receipt with verifiable claim - let receipt = Receipt::new( - InnerReceipt::Fake(fake_receipt), - output_serialized_malformed.clone(), - ); + let receipt = Receipt::new(InnerReceipt::Fake(fake_receipt), output_serialized); bincode::serialize(&receipt).unwrap() } diff --git a/crates/bitcoin-da/src/helpers/builders/light_client_proof_namespace.rs b/crates/bitcoin-da/src/helpers/builders/light_client_proof_namespace.rs index be1477cc2..dd41db339 100644 --- a/crates/bitcoin-da/src/helpers/builders/light_client_proof_namespace.rs +++ b/crates/bitcoin-da/src/helpers/builders/light_client_proof_namespace.rs @@ -359,112 +359,151 @@ pub fn create_inscription_type_1( ); } // push end if - let reveal_script = reveal_script_builder.push_opcode(OP_ENDIF).into_script(); - - let (control_block, merkle_root, tapscript_hash) = - build_taproot(&reveal_script, public_key, SECP256K1); - - // create commit tx address - let commit_tx_address = Address::p2tr(SECP256K1, public_key, merkle_root, network); + let reveal_script_builder = reveal_script_builder.push_opcode(OP_ENDIF); + + // Start loop to find a 'nonce' i.e. random number that makes the reveal tx hash starting with zeros given length + let mut nonce: i64 = 16; // skip the first digits to avoid OP_PUSHNUM_X + loop { + if nonce % 1000 == 0 { + trace!(nonce, "Trying to find commit & reveal nonce for chunk"); + if nonce > 16384 { + warn!("Too many iterations finding nonce for chunk"); + } + } + // ownerships are moved to the loop + let mut reveal_script_builder = reveal_script_builder.clone(); + + // push nonce + reveal_script_builder = reveal_script_builder + .push_slice(nonce.to_le_bytes()) + // drop the second item, bc there is a big chance it's 0 (tx kind) and nonce is >= 16 + .push_opcode(OP_NIP); + nonce += 1; + + // finalize reveal script + let reveal_script = reveal_script_builder.into_script(); + + let (control_block, merkle_root, tapscript_hash) = + build_taproot(&reveal_script, public_key, SECP256K1); + + // create commit tx address + let commit_tx_address = Address::p2tr(SECP256K1, public_key, merkle_root, network); + + let reveal_value = REVEAL_OUTPUT_AMOUNT; + let fee = get_size_reveal( + change_address.script_pubkey(), + reveal_value, + &reveal_script, + &control_block, + ) as u64 + * reveal_fee_rate; + let reveal_input_value = fee + reveal_value; + + // build commit tx + let (unsigned_commit_tx, leftover_utxos) = build_commit_transaction( + prev_utxo.clone(), + utxos.clone(), + commit_tx_address.clone(), + change_address.clone(), + reveal_input_value, + commit_fee_rate, + )?; + + let output_to_reveal = unsigned_commit_tx.output[0].clone(); + + // If commit + let commit_change = if unsigned_commit_tx.output.len() > 1 { + Some(UTXO { + tx_id: unsigned_commit_tx.compute_txid(), + vout: 1, + address: None, + script_pubkey: unsigned_commit_tx.output[0].script_pubkey.to_hex_string(), + amount: unsigned_commit_tx.output[1].value.to_sat(), + confirmations: 0, + spendable: true, + solvable: true, + }) + } else { + None + }; + + let mut reveal_tx = build_reveal_transaction( + output_to_reveal.clone(), + unsigned_commit_tx.compute_txid(), + 0, + change_address.clone(), + reveal_value, + reveal_fee_rate, + &reveal_script, + &control_block, + )?; + + build_witness( + &unsigned_commit_tx, + &mut reveal_tx, + tapscript_hash, + reveal_script, + control_block, + &key_pair, + SECP256K1, + ); - let reveal_value = REVEAL_OUTPUT_AMOUNT; - let fee = get_size_reveal( - change_address.script_pubkey(), - reveal_value, - &reveal_script, - &control_block, - ) as u64 - * reveal_fee_rate; - let reveal_input_value = fee + reveal_value; + let reveal_wtxid = reveal_tx.compute_wtxid(); + let reveal_hash = reveal_wtxid.as_raw_hash().to_byte_array(); - // build commit tx - let (unsigned_commit_tx, leftover_utxos) = build_commit_transaction( - prev_utxo.clone(), - utxos, - commit_tx_address.clone(), - change_address.clone(), - reveal_input_value, - commit_fee_rate, - )?; + // check if first N bytes equal to the given prefix + if !reveal_hash.starts_with(reveal_tx_prefix) { + // try another nonce + continue; + } - let output_to_reveal = unsigned_commit_tx.output[0].clone(); + // check if inscription locked to the correct address + let recovery_key_pair = key_pair.tap_tweak(SECP256K1, merkle_root); + let (x_only_pub_key, _parity) = recovery_key_pair.to_inner().x_only_public_key(); + assert_eq!( + Address::p2tr_tweaked( + TweakedPublicKey::dangerous_assume_tweaked(x_only_pub_key), + network, + ), + commit_tx_address + ); - // If commit - let commit_change = if unsigned_commit_tx.output.len() > 1 { - Some(UTXO { - tx_id: unsigned_commit_tx.compute_txid(), - vout: 1, + // set prev utxo to last reveal tx[0] to chain txs in order + prev_utxo = Some(UTXO { + tx_id: reveal_tx.compute_txid(), + vout: 0, + script_pubkey: reveal_tx.output[0].script_pubkey.to_hex_string(), address: None, - script_pubkey: unsigned_commit_tx.output[0].script_pubkey.to_hex_string(), - amount: unsigned_commit_tx.output[1].value.to_sat(), + amount: reveal_tx.output[0].value.to_sat(), confirmations: 0, spendable: true, solvable: true, - }) - } else { - None - }; + }); - let mut reveal_tx = build_reveal_transaction( - output_to_reveal.clone(), - unsigned_commit_tx.compute_txid(), - 0, - change_address.clone(), - reveal_value, - reveal_fee_rate, - &reveal_script, - &control_block, - )?; + commit_chunks.push(unsigned_commit_tx); + reveal_chunks.push(reveal_tx); - build_witness( - &unsigned_commit_tx, - &mut reveal_tx, - tapscript_hash, - reveal_script, - control_block, - &key_pair, - SECP256K1, - ); - - // check if inscription locked to the correct address - let recovery_key_pair = key_pair.tap_tweak(SECP256K1, merkle_root); - let (x_only_pub_key, _parity) = recovery_key_pair.to_inner().x_only_public_key(); - assert_eq!( - Address::p2tr_tweaked( - TweakedPublicKey::dangerous_assume_tweaked(x_only_pub_key), - network, - ), - commit_tx_address - ); + // Replace utxos with leftovers so we don't use prev utxos in next chunks + utxos = leftover_utxos; + if let Some(change) = commit_change { + utxos.push(change); + } - // set prev utxo to last reveal tx[0] to chain txs in order - prev_utxo = Some(UTXO { - tx_id: reveal_tx.compute_txid(), - vout: 0, - script_pubkey: reveal_tx.output[0].script_pubkey.to_hex_string(), - address: None, - amount: reveal_tx.output[0].value.to_sat(), - confirmations: 0, - spendable: true, - solvable: true, - }); - - commit_chunks.push(unsigned_commit_tx); - reveal_chunks.push(reveal_tx); - - // Replace utxos with leftovers so we don't use prev utxos in next chunks - utxos = leftover_utxos; - if let Some(change) = commit_change { - utxos.push(change); + break; } } - let reveal_tx_ids: Vec<_> = reveal_chunks + let (reveal_tx_ids, reveal_wtx_ids): (Vec<_>, Vec<_>) = reveal_chunks .iter() - .map(|tx| tx.compute_txid().to_byte_array()) + .map(|tx| { + ( + tx.compute_txid().to_byte_array(), + tx.compute_wtxid().to_byte_array(), + ) + }) .collect(); - let aggregate = DaDataLightClient::Aggregate(reveal_tx_ids); + let aggregate = DaDataLightClient::Aggregate(reveal_tx_ids, reveal_wtx_ids); // To sign the list of tx ids we assume they form a contigious list of bytes let reveal_body: Vec = @@ -500,9 +539,9 @@ pub fn create_inscription_type_1( let mut nonce: i64 = 16; // skip the first digits to avoid OP_PUSHNUM_X loop { if nonce % 1000 == 0 { - trace!(nonce, "Trying to find commit & reveal nonce"); + trace!(nonce, "Trying to find commit & reveal nonce for aggr"); if nonce > 16384 { - warn!("Too many iterations finding nonce"); + warn!("Too many iterations finding nonce for aggr"); } } let utxos = utxos.clone(); @@ -515,6 +554,7 @@ pub fn create_inscription_type_1( .push_slice(nonce.to_le_bytes()) // drop the second item, bc there is a big chance it's 0 (tx kind) and nonce is >= 16 .push_opcode(OP_NIP); + nonce += 1; // finalize reveal script let reveal_script = reveal_script_builder.into_script(); @@ -609,8 +649,6 @@ pub fn create_inscription_type_1( ); } } - - nonce += 1; } } diff --git a/crates/bitcoin-da/src/helpers/parsers.rs b/crates/bitcoin-da/src/helpers/parsers.rs index 593f5c3bb..fd9ac3730 100644 --- a/crates/bitcoin-da/src/helpers/parsers.rs +++ b/crates/bitcoin-da/src/helpers/parsers.rs @@ -69,6 +69,11 @@ pub trait VerifyParsed { fn signature(&self) -> &[u8]; fn body(&self) -> &[u8]; + /// Returns the hash of the body + fn get_unverified_hash(&self) -> Option<[u8; 32]> { + Some(calculate_sha256(self.body())) + } + /// Verifies the signature of the inscription and returns the hash of the body fn get_sig_verified_hash(&self) -> Option<[u8; 32]> { let public_key = secp256k1::PublicKey::from_slice(self.public_key()); @@ -125,6 +130,18 @@ impl VerifyParsed for ParsedSequencerCommitment { } } +impl VerifyParsed for ParsedChunk { + fn public_key(&self) -> &[u8] { + unimplemented!("public_key call Should not be used with chunks") + } + fn signature(&self) -> &[u8] { + unimplemented!("signature call Should not be used with chunks") + } + fn body(&self) -> &[u8] { + &self.body + } +} + impl VerifyParsed for ParsedBatchProverMethodId { fn public_key(&self) -> &[u8] { &self.public_key @@ -432,6 +449,16 @@ mod light_client { } } + // Nonce + let _nonce = read_push_bytes(instructions)?; + if OP_NIP != read_opcode(instructions)? { + return Err(ParserError::UnexpectedOpcode); + } + // END of transaction + if instructions.next().is_some() { + return Err(ParserError::UnexpectedOpcode); + } + let body_size: usize = chunks.iter().map(|c| c.len()).sum(); let mut body = Vec::with_capacity(body_size); for chunk in chunks { diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index d7b6aa933..63995ba50 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -18,7 +18,7 @@ use bitcoin::hashes::Hash; use bitcoin::secp256k1::SecretKey; use bitcoin::{Amount, BlockHash, CompactTarget, Transaction, Txid, Wtxid}; use bitcoincore_rpc::json::{SignRawTransactionInput, TestMempoolAcceptResult}; -use bitcoincore_rpc::{Auth, Client, Error, RpcApi, RpcError}; +use bitcoincore_rpc::{Auth, Client, Error as BitcoinError, Error, RpcApi, RpcError}; use borsh::BorshDeserialize; use citrea_primitives::compression::{compress_blob, decompress_blob}; use citrea_primitives::MAX_TXBODY_SIZE; @@ -586,7 +586,7 @@ impl BitcoinService { { bail!( "{}", - reject_reason.unwrap_or("[testmempoolaccept] Unkown rejection".to_string()) + reject_reason.unwrap_or("[testmempoolaccept] Unknown rejection".to_string()) ) } } @@ -754,6 +754,11 @@ impl DaService for BitcoinService { Ok(head_block_header.header) } + fn decompress_chunks(&self, complete_chunks: Vec) -> Result, Self::Error> { + borsh::from_slice(decompress_blob(&complete_chunks).as_slice()) + .map_err(|_| anyhow!("Failed to parse complete chunks")) + } + async fn extract_relevant_zk_proofs( &self, block: &Self::FilteredBlock, @@ -814,7 +819,7 @@ impl DaService for BitcoinService { let mut body = Vec::new(); let data = DaDataLightClient::try_from_slice(&aggregate.body) .map_err(|e| anyhow!("{}: Failed to parse aggregate: {e}", tx_id))?; - let DaDataLightClient::Aggregate(chunk_ids) = data else { + let DaDataLightClient::Aggregate(chunk_ids, _wtx_ids) = data else { error!("{}: Aggregate: unexpected kind", tx_id); continue; }; @@ -830,12 +835,9 @@ impl DaService for BitcoinService { self.client .get_raw_transaction(&chunk_id, None) .await - .map_err(|e| { - use bitcoincore_rpc::Error; - match e { - Error::Io(_) => backoff::Error::transient(e), - _ => backoff::Error::permanent(e), - } + .map_err(|e| match e { + BitcoinError::Io(_) => backoff::Error::transient(e), + _ => backoff::Error::permanent(e), }) }) .await; @@ -847,6 +849,7 @@ impl DaService for BitcoinService { } } }; + let wrapped: TransactionWrapper = tx_raw.into(); let parsed = match parse_light_client_transaction(&wrapped) { Ok(r) => r, @@ -989,6 +992,7 @@ impl DaService for BitcoinService { let mut relevant_txs = vec![]; for tx in &completeness_proof { + let wtxid = tx.compute_wtxid(); match namespace { DaNamespace::ToBatchProver => { if let Ok(tx) = parse_batch_proof_transaction(tx) { @@ -999,6 +1003,7 @@ impl DaService for BitcoinService { seq_comm.body, seq_comm.public_key, hash, + None, ); relevant_txs.push(relevant_tx); @@ -1013,9 +1018,12 @@ impl DaService for BitcoinService { ParsedLightClientTransaction::Complete(complete) => { if let Some(hash) = complete.get_sig_verified_hash() { let blob = decompress_blob(&complete.body); - let relevant_tx = - BlobWithSender::new(blob, complete.public_key, hash); - + let relevant_tx = BlobWithSender::new( + blob, + complete.public_key, + hash, + Some(wtxid.to_byte_array()), + ); relevant_txs.push(relevant_tx); } } @@ -1025,13 +1033,19 @@ impl DaService for BitcoinService { aggregate.body, aggregate.public_key, hash, + Some(wtxid.to_byte_array()), ); - relevant_txs.push(relevant_tx); } } - ParsedLightClientTransaction::Chunk(_) => { - // ignore + ParsedLightClientTransaction::Chunk(chunk) => { + let relevant_tx = BlobWithSender::new( + chunk.body, + vec![0], + [0; 32], + Some(wtxid.to_byte_array()), + ); + relevant_txs.push(relevant_tx); } ParsedLightClientTransaction::BatchProverMethodId(method_id) => { if let Some(hash) = method_id.get_sig_verified_hash() { @@ -1039,8 +1053,8 @@ impl DaService for BitcoinService { method_id.body, method_id.public_key, hash, + Some(wtxid.to_byte_array()), ); - relevant_txs.push(relevant_tx); } } @@ -1180,7 +1194,7 @@ pub fn get_relevant_blobs_from_txs( ParsedBatchProofTransaction::SequencerCommitment(seq_comm) => { if let Some(hash) = seq_comm.get_sig_verified_hash() { let relevant_tx = - BlobWithSender::new(seq_comm.body, seq_comm.public_key, hash); + BlobWithSender::new(seq_comm.body, seq_comm.public_key, hash, None); relevant_txs.push(relevant_tx); } @@ -1208,6 +1222,8 @@ impl From for [u8; 32] { fn split_proof(zk_proof: Proof) -> RawLightClientData { let original_blob = borsh::to_vec(&zk_proof).expect("zk::Proof serialize must not fail"); let original_compressed = compress_blob(&original_blob); + println!("original_compressed.len() = {}", original_compressed.len()); + println!("MAX_TXBODY_SIZE = {}", MAX_TXBODY_SIZE); if original_compressed.len() < MAX_TXBODY_SIZE { let data = DaDataLightClient::Complete(zk_proof); let blob = borsh::to_vec(&data).expect("zk::Proof serialize must not fail"); @@ -1220,6 +1236,7 @@ fn split_proof(zk_proof: Proof) -> RawLightClientData { let blob = borsh::to_vec(&data).expect("zk::Proof Chunk serialize must not fail"); chunks.push(blob) } + println!("chunks.len() = {}", chunks.len()); RawLightClientData::Chunks(chunks) } } diff --git a/crates/bitcoin-da/src/spec/blob.rs b/crates/bitcoin-da/src/spec/blob.rs index 3f501ddff..4ed870de8 100644 --- a/crates/bitcoin-da/src/spec/blob.rs +++ b/crates/bitcoin-da/src/spec/blob.rs @@ -13,8 +13,20 @@ pub struct BlobBuf { pub offset: usize, } +// BlobWithSender is a wrapper around BlobBuf to implement BlobReaderTrait +#[derive(Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize, Serialize, Deserialize)] +pub struct BlobWithSender { + pub hash: [u8; 32], + + pub sender: AddressWrapper, + + pub blob: CountedBufReader, + + pub wtxid: Option<[u8; 32]>, +} + impl BlobWithSender { - pub fn new(blob: Vec, sender: Vec, hash: [u8; 32]) -> Self { + pub fn new(blob: Vec, sender: Vec, hash: [u8; 32], wtxid: Option<[u8; 32]>) -> Self { Self { blob: CountedBufReader::new(BlobBuf { data: blob, @@ -22,6 +34,7 @@ impl BlobWithSender { }), sender: AddressWrapper(sender), hash, + wtxid, } } } @@ -40,16 +53,6 @@ impl Buf for BlobBuf { } } -// BlobWithSender is a wrapper around BlobBuf to implement BlobReaderTrait -#[derive(Clone, Debug, PartialEq, BorshSerialize, BorshDeserialize, Serialize, Deserialize)] -pub struct BlobWithSender { - pub hash: [u8; 32], - - pub sender: AddressWrapper, - - pub blob: CountedBufReader, -} - impl BlobReaderTrait for BlobWithSender { type Address = AddressWrapper; @@ -61,6 +64,10 @@ impl BlobReaderTrait for BlobWithSender { self.hash } + fn wtxid(&self) -> Option<[u8; 32]> { + self.wtxid + } + fn verified_data(&self) -> &[u8] { self.blob.accumulator() } @@ -74,4 +81,21 @@ impl BlobReaderTrait for BlobWithSender { self.blob.advance(num_bytes); self.verified_data() } + + fn serialize_v1(&self) -> borsh::io::Result> { + let v1 = BlobWithSenderV1 { + hash: self.hash, + sender: self.sender.clone(), + blob: &self.blob, + }; + borsh::to_vec(&v1) + } +} + +#[derive(BorshSerialize)] +/// Internal type to ease serialization process +struct BlobWithSenderV1<'a> { + hash: [u8; 32], + sender: AddressWrapper, + blob: &'a CountedBufReader, } diff --git a/crates/bitcoin-da/src/verifier.rs b/crates/bitcoin-da/src/verifier.rs index a0061226a..2f5e204f8 100644 --- a/crates/bitcoin-da/src/verifier.rs +++ b/crates/bitcoin-da/src/verifier.rs @@ -57,6 +57,7 @@ pub enum ValidationError { InvalidTargetHash, InvalidTimestamp, HeaderInclusionTxCountMismatch, + FailedToDeserializeCompleteChunks, } impl DaVerifier for BitcoinVerifier { @@ -71,6 +72,11 @@ impl DaVerifier for BitcoinVerifier { } } + fn decompress_chunks(&self, complete_chunks: Vec) -> Result, Self::Error> { + borsh::from_slice(decompress_blob(&complete_chunks).as_slice()) + .map_err(|_| ValidationError::FailedToDeserializeCompleteChunks) + } + // Verify that the given list of blob transactions is complete and correct. fn verify_transactions( &self, @@ -144,8 +150,14 @@ impl DaVerifier for BitcoinVerifier { } } } - ParsedLightClientTransaction::Chunk(_chunk) => { - // ignore + ParsedLightClientTransaction::Chunk(chunk) => { + if let Some(blob_content) = + verified_blob_chunk(&chunk, &mut blobs_iter, wtxid)? + { + if blob_content != chunk.body { + return Err(ValidationError::BlobContentWasModified); + } + } } ParsedLightClientTransaction::BatchProverMethodId(method_id) => { if let Some(blob_content) = @@ -525,6 +537,35 @@ impl BitcoinVerifier { } } +fn verified_blob_chunk<'a, T, I>( + tx: &T, + blobs_iter: &mut I, + wtxid: &[u8; 32], +) -> Result, ValidationError> +where + T: VerifyParsed, + I: Iterator, +{ + if let Some(blob_hash) = tx.get_unverified_hash() { + let blob = blobs_iter.next(); + + let Some(blob) = blob else { + return Err(ValidationError::ValidBlobNotFoundInBlobs); + }; + + if blob.hash != blob_hash { + return Err(ValidationError::BlobWasTamperedWith); + } + + if blob.wtxid != Some(*wtxid) { + return Err(ValidationError::BlobWasTamperedWith); + } + + return Ok(Some(blob.verified_data())); + } + Ok(None) +} + // Get associated blob content only if signatures, hashes and public keys match fn verified_blob_content<'a, T, I>( tx: &T, diff --git a/crates/bitcoin-da/tests/test_utils.rs b/crates/bitcoin-da/tests/test_utils.rs index 075224979..bd25922b8 100644 --- a/crates/bitcoin-da/tests/test_utils.rs +++ b/crates/bitcoin-da/tests/test_utils.rs @@ -444,7 +444,7 @@ pub fn get_blob_with_sender(tx: &Transaction, ty: MockData) -> anyhow::Result { DaTxsCouldntBeVerified(DaV::Error), @@ -74,7 +79,22 @@ pub fn run_circuit( .map_err(|err| LightClientVerificationError::DaTxsCouldntBeVerified(err))?; // Mapping from initial state root to final state root and last L2 height - let mut initial_to_final = std::collections::BTreeMap::<[u8; 32], ([u8; 32], u64)>::new(); + let mut initial_to_final = BTreeMap::<[u8; 32], ([u8; 32], u64)>::new(); + + let (mut last_state_root, mut last_l2_height, mut mmr_guest) = + previous_light_client_proof_output.as_ref().map_or_else( + || { + // if no previous proof, we start from genesis state root + (l2_genesis_root, 0, MMRGuest::new()) + }, + |prev_journal| { + ( + prev_journal.state_root, + prev_journal.last_l2_height, + prev_journal.mmr_guest.clone(), + ) + }, + ); // If we have a previous light client proof, check they can be chained // If not, skip for now @@ -91,14 +111,8 @@ pub fn run_circuit( } } - let (mut last_state_root, mut last_l2_height) = - previous_light_client_proof_output.as_ref().map_or_else( - || { - // if no previous proof, we start from genesis state root - (l2_genesis_root, 0) - }, - |prev_journal| (prev_journal.state_root, prev_journal.last_l2_height), - ); + let mut in_memory_chunks: BTreeMap> = Default::default(); + let mut mmr_hints = input.mmr_hints.clone(); // index only incremented on processing of a complete or aggregate DA tx let mut current_proof_index = 0u32; @@ -111,84 +125,81 @@ pub fn run_circuit( if let Ok(data) = data { match data { DaDataLightClient::Complete(proof) => { - let Ok(journal) = G::extract_raw_output(&proof) else { - // cannot parse the output, skip - continue; - }; - - let ( - batch_proof_output_initial_state_root, - batch_proof_output_final_state_root, - batch_proof_output_last_l2_height, - ) = if let Ok(output) = G::deserialize_output::< - BatchProofCircuitOutput, - >(&journal) - { - ( - output.initial_state_root, - output.final_state_root, - output.last_l2_height, - ) - } else if let Ok(output) = G::deserialize_output::< - OldBatchProofCircuitOutput, - >(&journal) - { - (output.initial_state_root, output.final_state_root, 0) - } else { - continue; // cannot parse the output, skip - }; - - // Do not add if last l2 height is smaller or equal to previous output - // This is to defend against replay attacks, for example if somehow there is the script of batch proof 1 we do not need to go through it again - if batch_proof_output_last_l2_height <= last_l2_height - && last_l2_height != 0 - { - current_proof_index += 1; - continue; + let expected_to_fail = expected_to_fail_hints + .next_if(|&x| x == current_proof_index) + .is_some(); + match process_complete_proof::( + proof, + &batch_proof_method_ids, + last_l2_height, + &mut initial_to_final, + expected_to_fail, + ) { + Ok(()) => current_proof_index += 1, + Err(e) => println!("Error processing complete proof: {e}"), } + } + DaDataLightClient::Aggregate(_tx_ids, wtx_ids) => { + let mut aggregate_chunks = vec![]; + for wtxid in &wtx_ids { + if let Some((wtxid, chunk)) = in_memory_chunks.remove_entry(wtxid) { + // If the wtxid belongs to a chunk that we've seen in the same L1 block, + // We add it to the aggregate. + aggregate_chunks.push(MMRChunk::new(wtxid, chunk)); + } else { + // If the wtxid belongs to a chunk that we've seen in a previous L1 block, + // We use the hints to verify the existence of the chunk. + let hint = mmr_hints.pop_front().expect("No more hints left"); - let batch_proof_method_id = if batch_proof_method_ids.len() == 1 { - // Check if last l2 height is greater than or equal to the only batch proof method id activation height - batch_proof_method_ids[0].1 - } else { - let idx = match batch_proof_method_ids - // Returns err and the index to be inserted, which is the index of the first element greater than the key - // That is why we need to subtract 1 to get the last element smaller than the key - .binary_search_by_key( - &batch_proof_output_last_l2_height, - |(height, _)| *height, - ) { - Ok(idx) => idx, - Err(idx) => idx.saturating_sub(1), - }; - batch_proof_method_ids[idx].1 - }; - - if expected_to_fail_hints - .next_if(|&x| x == current_proof_index) - .is_some() - { - // if index is in the expected to fail hints, then it should fail - G::verify_expected_to_fail(&proof, &batch_proof_method_id.into()) - .expect_err("Proof hinted to fail passed"); - } else { - // if index is not in the expected to fail hints, then it should pass - G::verify(&journal, &batch_proof_method_id.into()) - .expect("Proof hinted to pass failed"); - recursive_match_state_roots( - &mut initial_to_final, - &BatchProofInfo::new( - batch_proof_output_initial_state_root, - batch_proof_output_final_state_root, - batch_proof_output_last_l2_height, - ), - ); + // If the hint was provided as None, which could happen due to the non-existence of the chunk + // in the same block as aggregate, we skip trying to prove the aggregate. + // TODO: This is an issue we must solve in the future. Since the prover can provide a hint as None, + // it can ignore proofs, opening a censorship attack vector. + let Some((chunk, proof)) = hint else { + continue; // ignore this aggregate + }; + + if *wtxid != chunk.wtxid { + panic!("Hint wtxid does not match chunk wtxid!"); + } + + if mmr_guest.verify_proof(&chunk, &proof) { + aggregate_chunks.push(chunk); + } else { + panic!("Failed to verify MMR proof for hint"); + } + } } + // Concatenate complete proof + // TODO: Continue on error + let complete_proof = da_verifier + .decompress_chunks( + aggregate_chunks + .into_iter() + .flat_map(|n| n.body) + .collect(), + ) + .expect("Should decompress and borsh deserialize"); - current_proof_index += 1; + let expected_to_fail = expected_to_fail_hints + .next_if(|&x| x == current_proof_index) + .is_some(); + match process_complete_proof::( + complete_proof, + &batch_proof_method_ids, + last_l2_height, + &mut initial_to_final, + expected_to_fail, + ) { + Ok(()) => current_proof_index += 1, + Err(e) => println!("Error processing aggregated proof: {e}"), + } + } + DaDataLightClient::Chunk(chunk) => { + // Store the chunk in memory + in_memory_chunks + .insert(blob.wtxid().expect("Chunk should have a wtxid"), chunk); } - DaDataLightClient::Aggregate(_) => todo!(), - DaDataLightClient::Chunk(_) => todo!(), DaDataLightClient::BatchProofMethodId(_) => {} // if coming from batch prover, ignore } } @@ -227,6 +238,10 @@ pub fn run_circuit( // Collect unchained outputs let unchained_outputs = collect_unchained_outputs(&initial_to_final, last_l2_height); + for (wtxid, chunk) in in_memory_chunks { + mmr_guest.append(MMRChunk::new(wtxid, chunk)); + } + Ok(LightClientCircuitOutput { state_root: last_state_root, light_client_proof_method_id: input.light_client_proof_method_id, @@ -234,5 +249,77 @@ pub fn run_circuit( unchained_batch_proofs_info: unchained_outputs, last_l2_height, batch_proof_method_ids, + mmr_guest, }) } + +fn process_complete_proof( + proof: Vec, + batch_proof_method_ids: &InitialBatchProofMethodIds, + last_l2_height: u64, + initial_to_final: &mut std::collections::BTreeMap<[u8; 32], ([u8; 32], u64)>, + expected_to_fail: bool, +) -> Result<(), CircuitError> { + let Ok(journal) = G::extract_raw_output(&proof) else { + return Err("Failed to extract output from proof"); + }; + + let ( + batch_proof_output_initial_state_root, + batch_proof_output_final_state_root, + batch_proof_output_last_l2_height, + ) = if let Ok(output) = + G::deserialize_output::>(&journal) + { + ( + output.initial_state_root, + output.final_state_root, + output.last_l2_height, + ) + } else if let Ok(output) = + G::deserialize_output::>(&journal) + { + (output.initial_state_root, output.final_state_root, 0) + } else { + return Err("Failed to parse proof"); + }; + + // Do not add if last l2 height is smaller or equal to previous output + // This is to defend against replay attacks, for example if somehow there is the script of batch proof 1 we do not need to go through it again + if batch_proof_output_last_l2_height <= last_l2_height && last_l2_height != 0 { + return Err("Last L2 height is less than proof's last l2 height"); + } + + let batch_proof_method_id = if batch_proof_method_ids.len() == 1 { + batch_proof_method_ids[0].1 + } else { + let idx = match batch_proof_method_ids + // Returns err and the index to be inserted, which is the index of the first element greater than the key + // That is why we need to subtract 1 to get the last element smaller than the key + .binary_search_by_key(&batch_proof_output_last_l2_height, |(height, _)| *height) + { + Ok(idx) => idx, + Err(idx) => idx.saturating_sub(1), + }; + batch_proof_method_ids[idx].1 + }; + + if expected_to_fail { + // if index is in the expected to fail hints, then it should fail + G::verify_expected_to_fail(&proof, &batch_proof_method_id.into()) + .expect_err("Proof hinted to fail passed"); + } else { + // if index is not in the expected to fail hints, then it should pass + G::verify(&journal, &batch_proof_method_id.into()).expect("Proof hinted to pass failed"); + recursive_match_state_roots( + initial_to_final, + &BatchProofInfo::new( + batch_proof_output_initial_state_root, + batch_proof_output_final_state_root, + batch_proof_output_last_l2_height, + ), + ); + } + + Ok(()) +} diff --git a/crates/light-client-prover/src/da_block_handler.rs b/crates/light-client-prover/src/da_block_handler.rs index 60edffbb6..9deaf13e5 100644 --- a/crates/light-client-prover/src/da_block_handler.rs +++ b/crates/light-client-prover/src/da_block_handler.rs @@ -1,4 +1,4 @@ -use std::collections::{HashMap, VecDeque}; +use std::collections::{BTreeMap, HashMap, VecDeque}; use std::sync::Arc; use borsh::BorshDeserialize; @@ -7,9 +7,11 @@ use citrea_common::da::get_da_block_at_height; use citrea_common::LightClientProverConfig; use citrea_primitives::forks::fork_from_block_number; use sov_db::ledger_db::{LightClientProverLedgerOps, SharedLedgerOps}; +use sov_db::mmr_db::MmrDB; use sov_db::schema::types::{SlotNumber, StoredLightClientProofOutput}; use sov_modules_api::{BatchProofCircuitOutput, BlobReaderTrait, DaSpec, Zkvm}; use sov_rollup_interface::da::{BlockHeaderTrait, DaDataLightClient, DaNamespace}; +use sov_rollup_interface::mmr::{MMRChunk, MMRNative, Wtxid}; use sov_rollup_interface::services::da::{DaService, SlotData}; use sov_rollup_interface::spec::SpecId; use sov_rollup_interface::zk::{ @@ -20,7 +22,7 @@ use tokio::select; use tokio::sync::{mpsc, Mutex}; use tokio::time::{sleep, Duration}; use tokio_util::sync::CancellationToken; -use tracing::{error, info}; +use tracing::{error, info, warn}; use crate::metrics::LIGHT_CLIENT_METRICS; @@ -41,6 +43,7 @@ where light_client_proof_elfs: HashMap>, l1_block_cache: Arc>>, queued_l1_blocks: VecDeque<::FilteredBlock>, + mmr_native: MMRNative, } impl L1BlockHandler @@ -60,7 +63,9 @@ where batch_proof_code_commitments: HashMap, light_client_proof_code_commitments: HashMap, light_client_proof_elfs: HashMap>, + mmr_db: MmrDB, ) -> Self { + let mmr_native = MMRNative::new(mmr_db); Self { _prover_config: prover_config, prover_service, @@ -72,6 +77,7 @@ where light_client_proof_elfs, l1_block_cache: Arc::new(Mutex::new(L1BlockCache::new())), queued_l1_blocks: VecDeque::new(), + mmr_native, } } @@ -122,7 +128,8 @@ where let l1_block = self .queued_l1_blocks .front() - .expect("Pending l1 blocks cannot be empty"); + .expect("Pending l1 blocks cannot be empty") + .clone(); self.process_l1_block(l1_block).await?; @@ -132,7 +139,7 @@ where Ok(()) } - async fn process_l1_block(&self, l1_block: &Da::FilteredBlock) -> anyhow::Result<()> { + async fn process_l1_block(&mut self, l1_block: Da::FilteredBlock) -> anyhow::Result<()> { let l1_hash = l1_block.header().hash().into(); let l1_height = l1_block.header().height(); @@ -143,7 +150,7 @@ where let (mut da_data, inclusion_proof, completeness_proof) = self .da_service - .extract_relevant_blobs_with_proof(l1_block, DaNamespace::ToLightClientProver); + .extract_relevant_blobs_with_proof(&l1_block, DaNamespace::ToLightClientProver); // Even though following extract_batch_proofs call does full_data on batch proofs, // we also need to do it for BatchProofMethodId txs @@ -186,58 +193,125 @@ where batch_proofs.len() ); + let mut unused_chunks = BTreeMap::>::new(); + let mut mmr_hints = vec![]; // index only incremented for complete and aggregated proofs, in line with the circuit let mut proof_index = 0u32; let mut expected_to_fail_hint = vec![]; - for batch_proof in batch_proofs { - // TODO handle aggreagates - if let DaDataLightClient::Complete(proof) = batch_proof { - let batch_proof_last_l2_height = match Vm::extract_output::< - BatchProofCircuitOutput<::Spec, [u8; 32]>, - >(&proof) - { - Ok(output) => output.last_l2_height, - Err(e) => { - info!("Failed to extract post fork 1 output from proof: {:?}. Trying to extract pre fork 1 output", e); - if Vm::extract_output::< - OldBatchProofCircuitOutput<::Spec, [u8; 32]>, - >(&proof) - .is_err() - { - tracing::info!( - "Failed to extract pre fork1 and fork1 output from proof" - ); - continue; + for (wtxid, batch_proof) in batch_proofs.clone() { + match batch_proof { + DaDataLightClient::Chunk(body) => { + tracing::warn!("Chunk wtxid: {:?}", wtxid); + tracing::warn!("Chunk body len: {}", body.len()); + // For now, this chunk is unused by any aggregate in the block. + // unused_chunks.insert(wtxid, body); + } + DaDataLightClient::Aggregate(_, _) => { + tracing::warn!("Aggregate wtxid: {:?}", wtxid) + } + _ => tracing::warn!("Other wtxid: {:?}", wtxid), + } + } + + for (wtxid, batch_proof) in batch_proofs { + match batch_proof { + DaDataLightClient::Complete(proof) => { + match self.verify_complete_proof(&proof, l2_last_height) { + Ok(is_valid) => { + if is_valid { + assumptions.push(proof); + } else { + expected_to_fail_hint.push(proof_index); + } + + proof_index += 1; + } + Err(err) => { + error!("Batch proof verification failed: {err}"); } - // If this is a pre fork 1 proof, then we need to convert it to post fork 1 proof - 0 } - }; - - if batch_proof_last_l2_height <= l2_last_height && l2_last_height != 0 { - proof_index += 1; - continue; } + DaDataLightClient::Aggregate(_txids, wtxids) => { + // For each of the chunks, if the chunk is not contained by any + // aggregate in the current block, this tells us that it has been already seen in a previous + // L1 block. + // Given that we've updated MMR native with existing chunks, we now have a consistent MMR tree + // from which we can generate a hint for the guest MMR. + let mut complete_proof = vec![]; + tracing::warn!("wtxids count: {}", wtxids.len()); + for wtxid in wtxids { + tracing::warn!("wtxid: {:?}", wtxid); + // Cleanup unused_chunks from wtxids which are actually used by the current aggregate + if let Some(chunk) = unused_chunks.remove(&wtxid) { + tracing::warn!("chunk len: {}", chunk.len()); + complete_proof.push(chunk); + } else { + let hint = self.mmr_native.generate_proof(wtxid)?; + if let Some((chunk, _)) = hint.as_ref() { + complete_proof.push(chunk.body.clone()); + mmr_hints.push(hint); + } else { + // This aggregate is not provable since we don't have all the chunks yet. + // Push None and continue next proof + mmr_hints.push(None); + continue; + } + } + } - let current_spec = fork_from_block_number(batch_proof_last_l2_height).spec_id; - let batch_proof_method_id = self - .batch_proof_code_commitments - .get(¤t_spec) - .expect("Batch proof code commitment not found"); - if let Err(e) = Vm::verify(proof.as_slice(), batch_proof_method_id) { - tracing::error!("Failed to verify batch proof: {:?}", e); - expected_to_fail_hint.push(proof_index); - } else { - assumptions.push(proof); - } + tracing::warn!( + "complete_proof chunk count before decompress: {}", + complete_proof.len() + ); + + // TODO: Handle error + let complete_proof = self + .da_service + .decompress_chunks(complete_proof.into_iter().flatten().collect()) + .unwrap(); + + tracing::warn!( + "complete_proof total len after decompress: {}", + complete_proof.len() + ); - proof_index += 1; + match self.verify_complete_proof(&complete_proof, l2_last_height) { + Ok(is_valid) => { + if is_valid { + assumptions.push(complete_proof); + } else { + expected_to_fail_hint.push(proof_index); + } + + proof_index += 1; + } + Err(err) => { + error!("Aggregated batch proof verification failed: {err}"); + } + } + } + DaDataLightClient::Chunk(body) => { + tracing::warn!("Chunk wtxid: {:?}", wtxid); + tracing::warn!("Chunk body len: {}", body.len()); + // For now, this chunk is unused by any aggregate in the block. + unused_chunks.insert(wtxid, body); + } + _ => { + continue; + } } } tracing::debug!("assumptions len: {:?}", assumptions.len()); + // Add unused chunks to MMR native. + // Up until this point, the proof has been generated by aggregates in the block, + // so it's okay to update the MMR tree now. + for (wtxid, body) in unused_chunks.into_iter() { + self.mmr_native.append(MMRChunk::new(wtxid, body))?; + } + let current_fork = fork_from_block_number(l2_last_height); let light_client_proof_code_commitment = self .light_client_proof_code_commitments @@ -256,6 +330,7 @@ where da_block_header: l1_block.header().clone(), light_client_proof_method_id: light_client_proof_code_commitment.clone().into(), previous_light_client_proof_journal: light_client_proof_journal, + mmr_hints: mmr_hints.into(), expected_to_fail_hint, }; @@ -288,28 +363,83 @@ where Ok(()) } + fn verify_complete_proof( + &self, + proof: &Vec, + light_client_l2_height: u64, + ) -> anyhow::Result { + let batch_proof_last_l2_height = match Vm::extract_output::< + BatchProofCircuitOutput<::Spec, [u8; 32]>, + >(proof) + { + Ok(output) => output.last_l2_height, + Err(e) => { + warn!("Failed to extract post fork 1 output from proof: {:?}. Trying to extract pre fork 1 output", e); + if Vm::extract_output::< + OldBatchProofCircuitOutput<::Spec, [u8; 32]>, + >(proof) + .is_err() + { + return Err(anyhow::anyhow!("Failed to extract both pre-fork1 and fork1 output from proof")); + } + 0 + } + }; + + if batch_proof_last_l2_height <= light_client_l2_height && light_client_l2_height != 0 { + return Err(anyhow::anyhow!( + "Batch proof l2 height is less than latest light client proof l2 height" + )); + } + + let current_spec = fork_from_block_number(batch_proof_last_l2_height).spec_id; + let batch_proof_method_id = self + .batch_proof_code_commitments + .get(¤t_spec) + .expect("Batch proof code commitment not found"); + + if let Err(e) = Vm::verify(proof.as_slice(), batch_proof_method_id) { + warn!("Failed to verify batch proof: {:?}", e); + Ok(false) + } else { + Ok(true) + } + } + async fn extract_batch_proofs( &self, da_data: &mut [<::Spec as DaSpec>::BlobTransaction], da_slot_hash: [u8; 32], // passing this as an argument is not clever - ) -> Vec { + ) -> Vec<(Wtxid, DaDataLightClient)> { let mut batch_proofs = Vec::new(); da_data.iter_mut().for_each(|tx| { - // Check for commitment - if tx.sender().as_ref() == self.batch_prover_da_pub_key.as_slice() { - let data = DaDataLightClient::try_from_slice(tx.full_data()); - - if let Ok(proof) = data { - batch_proofs.push(proof); - } else { - tracing::warn!( - "Found broken DA data in block 0x{}: {:?}", - hex::encode(da_slot_hash), - data - ); + if let Ok(data) = DaDataLightClient::try_from_slice(tx.full_data()) { + match data { + DaDataLightClient::Chunk(_) => { + tracing::warn!( + "Chunk wtxid: {:?}", + tx.wtxid().expect("Blob should have wtxid") + ); + batch_proofs.push((tx.wtxid().expect("Blob should have wtxid"), data)) + } + _ => { + tracing::warn!( + "other wtxid: {:?}", + tx.wtxid().expect("Blob should have wtxid") + ); + if tx.sender().as_ref() == self.batch_prover_da_pub_key.as_slice() { + batch_proofs.push((tx.wtxid().expect("Blob should have wtxid"), data)); + } + } } + } else { + tracing::warn!( + "Found broken DA data in block 0x{}", + hex::encode(da_slot_hash) + ); } + // Check for commitment }); batch_proofs } diff --git a/crates/light-client-prover/src/runner.rs b/crates/light-client-prover/src/runner.rs index a6040f688..1dcec51ff 100644 --- a/crates/light-client-prover/src/runner.rs +++ b/crates/light-client-prover/src/runner.rs @@ -7,6 +7,7 @@ use citrea_common::{LightClientProverConfig, RollupPublicKeys, RpcConfig, Runner use jsonrpsee::server::{BatchRequestConfig, ServerBuilder}; use jsonrpsee::RpcModule; use sov_db::ledger_db::{LightClientProverLedgerOps, SharedLedgerOps}; +use sov_db::mmr_db::MmrDB; use sov_db::schema::types::SlotNumber; use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::spec::SpecId; @@ -37,6 +38,7 @@ where batch_proof_commitments_by_spec: HashMap, light_client_proof_commitment: HashMap, light_client_proof_elfs: HashMap>, + mmr_db: MmrDB, } impl CitreaLightClientProver @@ -58,6 +60,7 @@ where batch_proof_commitments_by_spec: HashMap, light_client_proof_commitment: HashMap, light_client_proof_elfs: HashMap>, + mmr_db: MmrDB, task_manager: TaskManager<()>, ) -> Result { Ok(Self { @@ -72,6 +75,7 @@ where batch_proof_commitments_by_spec, light_client_proof_commitment, light_client_proof_elfs, + mmr_db, }) } @@ -150,6 +154,7 @@ where let prover_config = self.prover_config.clone(); let prover_service = self.prover_service.clone(); let ledger_db = self.ledger_db.clone(); + let mmr_db = self.mmr_db.clone(); let da_service = self.da_service.clone(); let batch_prover_da_pub_key = self.public_keys.prover_da_pub_key.clone(); let batch_proof_commitments_by_spec = self.batch_proof_commitments_by_spec.clone(); @@ -166,6 +171,7 @@ where batch_proof_commitments_by_spec, light_client_proof_commitment, light_client_proof_elfs, + mmr_db, ); l1_block_handler .run(last_l1_height_scanned.0, cancellation_token) diff --git a/crates/light-client-prover/src/tests/mod.rs b/crates/light-client-prover/src/tests/mod.rs index a4876944a..d792b7a40 100644 --- a/crates/light-client-prover/src/tests/mod.rs +++ b/crates/light-client-prover/src/tests/mod.rs @@ -28,6 +28,7 @@ fn test_light_client_circuit_valid_da_valid_data() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -66,6 +67,7 @@ fn test_light_client_circuit_valid_da_valid_data() { light_client_proof_method_id, inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -103,6 +105,7 @@ fn test_wrong_order_da_blocks_should_still_work() { da_data: vec![blob_2, blob_1], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -144,6 +147,7 @@ fn create_unchainable_outputs_then_chain_them_on_next_block() { da_data: vec![blob_2, blob_1], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -193,6 +197,7 @@ fn create_unchainable_outputs_then_chain_them_on_next_block() { da_data: vec![blob_1], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -231,6 +236,7 @@ fn test_header_chain_proof_height_and_hash() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -269,6 +275,7 @@ fn test_header_chain_proof_height_and_hash() { light_client_proof_method_id, inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -307,6 +314,7 @@ fn test_unverifiable_batch_proofs() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![1], }; @@ -350,6 +358,7 @@ fn test_unverifiable_prev_light_client_proof() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![1], }; @@ -386,6 +395,7 @@ fn test_unverifiable_prev_light_client_proof() { light_client_proof_method_id, inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -425,6 +435,7 @@ fn test_new_method_id_txs() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -457,6 +468,7 @@ fn test_new_method_id_txs() { da_data: vec![blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -491,6 +503,7 @@ fn test_new_method_id_txs() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; @@ -535,6 +548,7 @@ fn test_expect_to_fail_on_correct_proof() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![1], }; @@ -572,6 +586,7 @@ fn test_expected_to_fail_proof_not_hinted() { da_data: vec![blob_1, blob_2], inclusion_proof: [1u8; 32], completeness_proof: (), + mmr_hints: Default::default(), expected_to_fail_hint: vec![], }; diff --git a/crates/light-client-prover/src/tests/test_utils.rs b/crates/light-client-prover/src/tests/test_utils.rs index 688b67738..23bff2fba 100644 --- a/crates/light-client-prover/src/tests/test_utils.rs +++ b/crates/light-client-prover/src/tests/test_utils.rs @@ -45,7 +45,7 @@ pub(crate) fn create_mock_batch_proof( let da_data = DaDataLightClient::Complete(mock_serialized); let da_data_ser = borsh::to_vec(&da_data).expect("should serialize"); - let mut blob = MockBlob::new(da_data_ser, MockAddress::new([9u8; 32]), [0u8; 32]); + let mut blob = MockBlob::new(da_data_ser, MockAddress::new([9u8; 32]), [0u8; 32], None); blob.full_data(); blob @@ -74,7 +74,7 @@ pub(crate) fn create_new_method_id_tx( let da_data_ser = borsh::to_vec(&da_data).expect("should serialize"); - let mut blob = MockBlob::new(da_data_ser, MockAddress::new(pub_key), [0u8; 32]); + let mut blob = MockBlob::new(da_data_ser, MockAddress::new(pub_key), [0u8; 32], None); blob.full_data(); blob diff --git a/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs b/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs index c6703d4ce..4df38144f 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs @@ -150,8 +150,8 @@ mod tests { header: MockBlockHeader::from_height(at_height), is_valid: true, blobs: vec![ - MockBlob::new(vec![2; 44], MockAddress::new([1; 32]), [2; 32]), - MockBlob::new(vec![3; 12], MockAddress::new([2; 32]), [5; 32]), + MockBlob::new(vec![2; 44], MockAddress::new([1; 32]), [2; 32], None), + MockBlob::new(vec![3; 12], MockAddress::new([2; 32]), [5; 32], None), ], } } diff --git a/crates/sovereign-sdk/adapters/mock-da/src/service.rs b/crates/sovereign-sdk/adapters/mock-da/src/service.rs index a96a4035e..cbe476ba7 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/service.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/service.rs @@ -223,6 +223,7 @@ impl MockDaService { zkp_proof, self.sequencer_da_address.clone(), data_hash, + None, ); let header = MockBlockHeader { prev_hash: previous_block_hash, @@ -349,6 +350,11 @@ impl DaService for MockDaService { type Error = anyhow::Error; type BlockHash = [u8; 32]; + /// Decompress and deserialize chunks + fn decompress_chunks(&self, complete_chunks: Vec) -> Result, Self::Error> { + Ok(complete_chunks) + } + /// Gets block at given height /// If block is not available, waits until it is /// It is possible to read non-finalized and last finalized blocks multiple times diff --git a/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs b/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs index 9d981dd1c..0f5eb1260 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs @@ -177,6 +177,7 @@ pub struct MockDaVerifier {} pub struct MockBlob { pub(crate) address: MockAddress, pub(crate) hash: [u8; 32], + pub(crate) wtxid: Option<[u8; 32]>, /// Actual data from the blob. Public for testing purposes. pub data: CountedBufReader, // Data for the aggregated ZK proof. @@ -185,12 +186,18 @@ pub struct MockBlob { impl MockBlob { /// Creates a new mock blob with the given data, claiming to have been published by the provided address. - pub fn new(data: Vec, address: MockAddress, hash: [u8; 32]) -> Self { + pub fn new( + data: Vec, + address: MockAddress, + hash: [u8; 32], + wtxid: Option<[u8; 32]>, + ) -> Self { Self { address, data: CountedBufReader::new(Bytes::from(data)), zk_proofs_data: Default::default(), hash, + wtxid, } } @@ -200,12 +207,14 @@ impl MockBlob { zk_proofs_data: Vec, address: MockAddress, hash: [u8; 32], + wtxid: Option<[u8; 32]>, ) -> Self { Self { address, hash, data: CountedBufReader::new(Bytes::from(data)), zk_proofs_data, + wtxid, } } } diff --git a/crates/sovereign-sdk/adapters/mock-da/src/verifier.rs b/crates/sovereign-sdk/adapters/mock-da/src/verifier.rs index 74416cedd..bcf9c4b54 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/verifier.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/verifier.rs @@ -17,6 +17,10 @@ impl BlobReaderTrait for MockBlob { self.hash } + fn wtxid(&self) -> Option<[u8; 32]> { + self.wtxid + } + fn verified_data(&self) -> &[u8] { self.data.accumulator() } @@ -30,6 +34,10 @@ impl BlobReaderTrait for MockBlob { self.data.advance(num_bytes); self.verified_data() } + + fn serialize_v1(&self) -> borsh::io::Result> { + borsh::to_vec(self) + } } /// A [`sov_rollup_interface::da::DaSpec`] suitable for testing. @@ -51,6 +59,10 @@ impl DaVerifier for MockDaVerifier { type Error = anyhow::Error; + fn decompress_chunks(&self, complete_chunks: Vec) -> Result, Self::Error> { + Ok(complete_chunks) + } + fn new(_params: ::ChainParams) -> Self { Self {} } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/lib.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/lib.rs index b1e99aec4..00ad4ff27 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/lib.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/lib.rs @@ -19,6 +19,8 @@ pub mod schema; /// This is primarily used as the backing store for the [JMT(JellyfishMerkleTree)](https://docs.rs/jmt/latest/jmt/). pub mod state_db; +pub mod mmr_db; + /// Implements a wrapper around RocksDB meant for storing state only accessible /// outside of the zkVM execution environment, as this data is not included in /// the JMT and does not contribute to proofs of execution. diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/mmr_db.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/mmr_db.rs new file mode 100644 index 000000000..4f10637cd --- /dev/null +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/mmr_db.rs @@ -0,0 +1,90 @@ +#![allow(missing_docs)] +use std::sync::Arc; + +use sov_rollup_interface::mmr::NodeStore; +use sov_schema_db::DB; +use tracing::instrument; + +use crate::rocks_db_config::RocksdbConfig; +use crate::schema::tables::{MMRChunks, MMRNodes, MMRTreeSize, MMR_TABLES}; + +#[derive(Clone, Debug)] +pub struct MmrDB { + db: Arc, +} + +impl MmrDB { + const DB_PATH_SUFFIX: &'static str = "mmr"; + const DB_NAME: &'static str = "mmr-db"; + + /// Initialize [`sov_schema_db::DB`] that should be used by snapshots. + pub fn setup_schema_db(cfg: &RocksdbConfig) -> anyhow::Result { + let raw_options = cfg.as_raw_options(false); + let mmr_db_path = cfg.path.join(Self::DB_PATH_SUFFIX); + sov_schema_db::DB::open( + mmr_db_path, + Self::DB_NAME, + MMR_TABLES.iter().copied(), + &raw_options, + ) + } + + /// Open a [`MMRDB`] (backed by RocksDB) at the specified path. + #[instrument(level = "trace", skip_all, err)] + pub fn new(cfg: &RocksdbConfig) -> Result { + let path = cfg.path.join(Self::DB_PATH_SUFFIX); + let raw_options = cfg.as_raw_options(false); + let tables: Vec<_> = MMR_TABLES.iter().map(|e| e.to_string()).collect(); + let inner = DB::open(path, Self::DB_NAME, tables, &raw_options)?; + + Ok(Self { + db: Arc::new(inner), + }) + } +} + +impl NodeStore for MmrDB { + fn save_node( + &mut self, + level: u32, + index: u32, + node_hash: sov_rollup_interface::mmr::MMRNodeHash, + ) -> anyhow::Result<()> { + self.db.put::(&(level, index), &node_hash) + } + + fn load_node( + &self, + level: u32, + index: u32, + ) -> anyhow::Result> { + self.db.get::(&(level, index)) + } + + fn get_tree_size(&self) -> u32 { + self.db + .get::(&()) + .ok() + .flatten() + .unwrap_or_default() + } + + fn set_tree_size(&mut self, size: u32) -> anyhow::Result<()> { + self.db.put::(&(), &size) + } + + fn save_chunk( + &mut self, + wtxid: sov_rollup_interface::mmr::Wtxid, + chunk: sov_rollup_interface::mmr::MMRChunk, + ) -> anyhow::Result<()> { + self.db.put::(&wtxid, &chunk) + } + + fn load_chunk( + &self, + wtxid: sov_rollup_interface::mmr::Wtxid, + ) -> anyhow::Result> { + self.db.get::(&wtxid) + } +} diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index f8d1554c0..244ee4c37 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -13,6 +13,7 @@ use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use jmt::storage::{NibblePath, Node, NodeKey}; use jmt::Version; use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::mmr::{MMRChunk, MMRNodeHash, Wtxid}; use sov_rollup_interface::stf::StateDiff; use sov_schema_db::schema::{KeyDecoder, KeyEncoder, ValueCodec}; use sov_schema_db::{CodecError, SeekKeyEncoder}; @@ -23,6 +24,14 @@ use super::types::{ StoredSoftConfirmation, StoredVerifiedProof, }; +/// A list of all tables used by the StateDB. These tables store rollup state - meaning +/// account balances, nonces, etc. +pub const MMR_TABLES: &[&str] = &[ + MMRNodes::table_name(), + MMRTreeSize::table_name(), + MMRChunks::table_name(), +]; + /// A list of all tables used by the StateDB. These tables store rollup state - meaning /// account balances, nonces, etc. pub const STATE_TABLES: &[&str] = &[ @@ -321,6 +330,21 @@ define_table_with_seek_key_codec!( (LastPrunedBlock) () => u64 ); +define_table_with_seek_key_codec!( + /// Stores the chunk's hash of an MMR + (MMRNodes) (u32, u32) => MMRNodeHash +); + +define_table_with_seek_key_codec!( + /// Stores the chunk's content by hash + (MMRChunks) Wtxid => MMRChunk +); + +define_table_with_seek_key_codec!( + /// Stores the MMR tree size + (MMRTreeSize) () => u32 +); + #[cfg(test)] define_table_with_seek_key_codec!( /// Test table old diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types.rs index 2a40ae9c5..91e1746d5 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/types.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use borsh::{BorshDeserialize, BorshSerialize}; use sov_rollup_interface::da::LatestDaState; +use sov_rollup_interface::mmr::MMRGuest; use sov_rollup_interface::rpc::{ BatchProofOutputRpcResponse, BatchProofResponse, HexTx, LatestDaStateRpcResponse, LightClientProofOutputRpcResponse, LightClientProofResponse, SoftConfirmationResponse, @@ -94,6 +95,8 @@ pub struct StoredLightClientProofOutput { pub last_l2_height: u64, /// L2 activation height of the fork and the Method ids of the batch proofs that were verified in the light client proof pub batch_proof_method_ids: Vec<(u64, [u32; 8])>, + /// A list of unprocessed chunks + pub mmr_guest: MMRGuest, } impl From for LightClientProofOutputRpcResponse { @@ -133,6 +136,7 @@ impl From for StoredLightClientProofOutput { unchained_batch_proofs_info: circuit_output.unchained_batch_proofs_info, last_l2_height: circuit_output.last_l2_height, batch_proof_method_ids: circuit_output.batch_proof_method_ids, + mmr_guest: circuit_output.mmr_guest, } } } @@ -154,6 +158,7 @@ impl From for LightClientCircuitOutput { unchained_batch_proofs_info: db_output.unchained_batch_proofs_info, last_l2_height: db_output.last_l2_height, batch_proof_method_ids: db_output.batch_proof_method_ids, + mmr_guest: db_output.mmr_guest, } } } diff --git a/crates/sovereign-sdk/module-system/sov-modules-api/src/reexport_macros.rs b/crates/sovereign-sdk/module-system/sov-modules-api/src/reexport_macros.rs index a72a43077..af1eed527 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-api/src/reexport_macros.rs +++ b/crates/sovereign-sdk/module-system/sov-modules-api/src/reexport_macros.rs @@ -2,6 +2,9 @@ /// type. #[cfg(feature = "macros")] pub use sov_modules_macros::DispatchCall; +/// Implements ForkCodec trait. Requires the type to be enum. +#[cfg(feature = "macros")] +pub use sov_modules_macros::ForkCodec; /// Derives the [`Genesis`](trait.Genesis.html) trait for the underlying runtime /// `struct`. #[cfg(feature = "macros")] diff --git a/crates/sovereign-sdk/module-system/sov-modules-macros/src/fork_codec.rs b/crates/sovereign-sdk/module-system/sov-modules-macros/src/fork_codec.rs new file mode 100644 index 000000000..6bd5f1b5e --- /dev/null +++ b/crates/sovereign-sdk/module-system/sov-modules-macros/src/fork_codec.rs @@ -0,0 +1,67 @@ +use proc_macro2::Span; +use quote::quote; +use syn::{Data, DeriveInput, Error}; + +pub fn derive_fork_codec(input: DeriveInput) -> Result { + // Extract the name of the enum + let name = &input.ident; + + // Ensure it's an enum + let Data::Enum(data_enum) = &input.data else { + return Err(Error::new( + Span::call_site(), + "ForkCodec can only be derived for enums. Use borsh derive directly instead.", + )); + }; + + // Extract variants + let variants = &data_enum.variants; + + // Generate match arms for encode and decode + let encode_arms = variants.iter().map(|variant| { + let variant_name = &variant.ident; + quote! { + Self::#variant_name(inner) => borsh::to_vec(inner).map_err(|e| e.into()), + } + }); + + let decode_arms = variants.iter().enumerate().map(|(index, variant)| { + let variant_name = &variant.ident; + quote! { + #index => { + let inner = borsh::from_slice(slice)?; + Ok(Self::#variant_name(inner)) + } + } + }); + + // Fallback for remaining SpecId variants + let fallback_variant = &variants.last().unwrap().ident; + let fallback_arm = quote! { + _ => { + let inner = borsh::from_slice(slice)?; + Ok(Self::#fallback_variant(inner)) + } + }; + + // Generate the implementation + let expanded = quote! { + impl sov_rollup_interface::fork::ForkCodec for #name { + fn encode(&self) -> anyhow::Result> { + match self { + #(#encode_arms)* + } + } + + fn decode(bytes: impl AsRef<[u8]>, spec: sov_rollup_interface::spec::SpecId) -> anyhow::Result { + let slice = bytes.as_ref(); + match spec as u8 as usize { + #(#decode_arms)* + #fallback_arm + } + } + } + }; + + Ok(proc_macro::TokenStream::from(expanded)) +} diff --git a/crates/sovereign-sdk/module-system/sov-modules-macros/src/lib.rs b/crates/sovereign-sdk/module-system/sov-modules-macros/src/lib.rs index e7dafae87..c94e8dca5 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-macros/src/lib.rs +++ b/crates/sovereign-sdk/module-system/sov-modules-macros/src/lib.rs @@ -14,6 +14,7 @@ mod cli_parser; mod common; mod default_runtime; mod dispatch; +mod fork_codec; mod make_constants; mod manifest; mod module_call_json_schema; @@ -27,6 +28,7 @@ use default_runtime::DefaultRuntimeMacro; use dispatch::dispatch_call::DispatchCallMacro; use dispatch::genesis::GenesisMacro; use dispatch::message_codec::MessageCodec; +use fork_codec::derive_fork_codec; use make_constants::{make_const, PartialItemConst}; use module_call_json_schema::derive_module_call_json_schema; use module_info::ModuleType; @@ -212,9 +214,16 @@ pub fn cli_parser(input: TokenStream) -> TokenStream { let cli_parser = CliParserMacro::new("Cmd"); handle_macro_error(cli_parser.cli_macro(input)) } + #[cfg(feature = "native")] #[proc_macro_derive(CliWalletArg)] pub fn custom_enum_clap(input: TokenStream) -> TokenStream { let input: syn::DeriveInput = parse_macro_input!(input); handle_macro_error(derive_cli_wallet_arg(input)) } + +#[proc_macro_derive(ForkCodec)] +pub fn fork_codec_derive(input: TokenStream) -> TokenStream { + let input: syn::DeriveInput = parse_macro_input!(input); + handle_macro_error(derive_fork_codec(input)) +} diff --git a/crates/sovereign-sdk/rollup-interface/Cargo.toml b/crates/sovereign-sdk/rollup-interface/Cargo.toml index 5342b08c9..92dffae8d 100644 --- a/crates/sovereign-sdk/rollup-interface/Cargo.toml +++ b/crates/sovereign-sdk/rollup-interface/Cargo.toml @@ -21,10 +21,10 @@ borsh = { workspace = true } bytes = { workspace = true, optional = true, default-features = true } digest = { workspace = true } futures = { workspace = true, optional = true } -jmt = { workspace = true, optional = true } hex = { workspace = true } +jmt = { workspace = true, optional = true } serde = { workspace = true } -sha2 = { workspace = true, optional = true } +sha2 = { workspace = true } thiserror = { workspace = true, optional = true } # TODO: Remove tokio when https://github.com/Sovereign-Labs/sovereign-sdk/issues/1161 is resolved tokio = { workspace = true, optional = true } @@ -46,6 +46,5 @@ std = [ "hex/default", "jmt", "serde/default", - "sha2", "thiserror", ] diff --git a/crates/sovereign-sdk/rollup-interface/src/fork/mod.rs b/crates/sovereign-sdk/rollup-interface/src/fork/mod.rs index c7e03fb9b..b4f536593 100644 --- a/crates/sovereign-sdk/rollup-interface/src/fork/mod.rs +++ b/crates/sovereign-sdk/rollup-interface/src/fork/mod.rs @@ -5,6 +5,8 @@ mod migration; #[cfg(test)] mod tests; +use alloc::vec::Vec; + pub use manager::*; pub use migration::*; @@ -79,3 +81,55 @@ pub fn fork_pos_from_block_number(forks: &[Fork], block_number: u64) -> usize { Err(idx) => idx.saturating_sub(1), } } + +/// ForkCodec is the serialization trait for types that require forking when changed. +/// Optimal usecase would be the type to be versioned enum, and do untagged enum ser/de. +/// +/// Example: +/// +/// ``` +/// use sov_rollup_interface::fork::ForkCodec; +/// use sov_rollup_interface::spec::SpecId; +/// +/// #[derive(borsh::BorshSerialize, borsh::BorshDeserialize)] +/// struct InputV1 {} +/// +/// #[derive(borsh::BorshSerialize, borsh::BorshDeserialize)] +/// struct InputV2 {} +/// +/// enum Input { +/// V1(InputV1), +/// V2(InputV2), +/// } +/// +/// impl Input { +/// pub fn new_v1(v1: InputV1) -> Self { +/// Self::V1(v1) +/// } +/// +/// pub fn new_v2(v2: InputV2) -> Self { +/// Self::V2(v2) +/// } +/// } +/// +/// impl ForkCodec for Input { +/// fn encode(&self) -> anyhow::Result> { +/// match self { +/// Self::V1(v1) => Ok(borsh::to_vec(v1)?), +/// Self::V2(v2) => Ok(borsh::to_vec(v2)?), +/// } +/// } +/// +/// fn decode(bytes: impl AsRef<[u8]>, spec: SpecId) -> anyhow::Result { +/// let slice = bytes.as_ref(); +/// match spec { +/// SpecId::Genesis => Ok(Self::new_v1(borsh::from_slice(slice)?)), +/// SpecId::Fork1 => Ok(Self::new_v2(borsh::from_slice(slice)?)), +/// } +/// } +/// } +/// ``` +pub trait ForkCodec: Sized { + fn encode(&self) -> anyhow::Result>; + fn decode(bytes: impl AsRef<[u8]>, spec: SpecId) -> anyhow::Result; +} diff --git a/crates/sovereign-sdk/rollup-interface/src/lib.rs b/crates/sovereign-sdk/rollup-interface/src/lib.rs index 2d5e35369..605bdb3b8 100644 --- a/crates/sovereign-sdk/rollup-interface/src/lib.rs +++ b/crates/sovereign-sdk/rollup-interface/src/lib.rs @@ -14,24 +14,21 @@ extern crate alloc; #[cfg(feature = "native")] pub const CITREA_VERSION: &str = "v0.5.5"; -mod state_machine; -pub use state_machine::*; - +/// Fork module +pub mod fork; +pub mod mmr; mod network; -pub use network::*; - mod node; +/// Specs module +pub mod spec; +mod state_machine; #[cfg(not(target_has_atomic = "ptr"))] pub use alloc::rc::Rc as RefCount; #[cfg(target_has_atomic = "ptr")] pub use alloc::sync::Arc as RefCount; +pub use network::*; pub use node::*; +pub use state_machine::*; pub use {anyhow, digest}; - -/// Fork module -pub mod fork; - -/// Specs module -pub mod spec; diff --git a/crates/sovereign-sdk/rollup-interface/src/mmr/guest.rs b/crates/sovereign-sdk/rollup-interface/src/mmr/guest.rs new file mode 100644 index 000000000..91612cc99 --- /dev/null +++ b/crates/sovereign-sdk/rollup-interface/src/mmr/guest.rs @@ -0,0 +1,55 @@ +use alloc::vec::Vec; + +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; + +use super::{hash_pair, MMRChunk, MMRInclusionProof}; + +#[derive( + Default, Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize, +)] +pub struct MMRGuest { + pub subroots: Vec<[u8; 32]>, + pub size: u32, +} + +impl MMRGuest { + pub fn new() -> Self { + MMRGuest { + subroots: Vec::new(), + size: 0, + } + } + + pub fn append(&mut self, chunk: MMRChunk) { + let mut current = chunk.wtxid; + let mut size = self.size; + + while size % 2 == 1 { + let sibling = self.subroots.pop().unwrap(); + current = hash_pair(sibling, current); + size /= 2; + } + + self.subroots.push(current); + self.size += 1; + } + + pub fn verify_proof(&self, chunk: &MMRChunk, mmr_proof: &MMRInclusionProof) -> bool { + let mut current_hash = chunk.wtxid; + + for (i, sibling) in mmr_proof.inclusion_proof.iter().enumerate() { + if mmr_proof.internal_idx & (1 << i) == 0 { + current_hash = hash_pair(current_hash, *sibling); + } else { + current_hash = hash_pair(*sibling, current_hash); + } + } + + if mmr_proof.subroot_idx >= self.subroots.len() as u32 { + return false; // Subroot index is out of bounds + } + + self.subroots[mmr_proof.subroot_idx as usize] == current_hash + } +} diff --git a/crates/sovereign-sdk/rollup-interface/src/mmr/mod.rs b/crates/sovereign-sdk/rollup-interface/src/mmr/mod.rs new file mode 100644 index 000000000..e2735473f --- /dev/null +++ b/crates/sovereign-sdk/rollup-interface/src/mmr/mod.rs @@ -0,0 +1,78 @@ +#![allow(missing_docs)] + +use alloc::vec::Vec; + +use anyhow::Result; +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; + +mod guest; +#[cfg(any(feature = "native", feature = "testing"))] +mod native; +#[cfg(test)] +mod tests; + +pub use guest::*; +#[cfg(any(feature = "native", feature = "testing"))] +pub use native::*; + +pub type MMRNodeHash = [u8; 32]; +pub type Wtxid = [u8; 32]; + +pub trait NodeStore { + fn save_node(&mut self, level: u32, index: u32, hash: MMRNodeHash) -> Result<()>; + fn load_node(&self, level: u32, index: u32) -> Result>; + fn save_chunk(&mut self, wtxid: Wtxid, chunk: MMRChunk) -> Result<()>; + fn load_chunk(&self, wtxid: Wtxid) -> Result>; + fn get_tree_size(&self) -> u32; + fn set_tree_size(&mut self, size: u32) -> Result<()>; +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct MMRInclusionProof { + pub subroot_idx: u32, + pub internal_idx: u32, + pub inclusion_proof: Vec, +} + +impl MMRInclusionProof { + pub fn new(subroot_idx: u32, internal_idx: u32, inclusion_proof: Vec) -> Self { + MMRInclusionProof { + subroot_idx, + internal_idx, + inclusion_proof, + } + } + + pub fn get_subroot(&self, leaf: MMRNodeHash) -> MMRNodeHash { + let mut current_hash = leaf; + for (i, sibling) in self.inclusion_proof.iter().enumerate() { + if self.internal_idx & (1 << i) == 0 { + current_hash = hash_pair(current_hash, *sibling); + } else { + current_hash = hash_pair(*sibling, current_hash); + } + } + current_hash + } +} + +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize)] +pub struct MMRChunk { + pub wtxid: Wtxid, + pub body: Vec, +} + +impl MMRChunk { + pub fn new(wtxid: Wtxid, body: Vec) -> Self { + MMRChunk { wtxid, body } + } +} + +pub fn hash_pair(left: [u8; 32], right: [u8; 32]) -> [u8; 32] { + let mut hasher = Sha256::default(); + hasher.update(left); + hasher.update(right); + hasher.finalize().into() +} diff --git a/crates/sovereign-sdk/rollup-interface/src/mmr/native.rs b/crates/sovereign-sdk/rollup-interface/src/mmr/native.rs new file mode 100644 index 000000000..60982ee69 --- /dev/null +++ b/crates/sovereign-sdk/rollup-interface/src/mmr/native.rs @@ -0,0 +1,157 @@ +use alloc::vec; +use alloc::vec::Vec; +use std::collections::BTreeMap; + +use anyhow::Result; +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; + +use super::{hash_pair, MMRChunk, MMRInclusionProof, MMRNodeHash, NodeStore, Wtxid}; + +#[derive( + Default, Serialize, Deserialize, Eq, PartialEq, Clone, Debug, BorshDeserialize, BorshSerialize, +)] +pub struct MMRNative { + pub store: S, + pub cache: BTreeMap<(u32, u32), MMRNodeHash>, +} + +impl MMRNative { + pub fn new(store: S) -> Self { + let mut mmr = MMRNative { + store, + cache: BTreeMap::new(), + }; + mmr.recalculate_peaks().unwrap(); + mmr + } + + pub fn append(&mut self, chunk: MMRChunk) -> Result<()> { + let wtxid = chunk.wtxid; + self.store.save_chunk(wtxid, chunk)?; + let current_size = self.store.get_tree_size(); + self.store.save_node(0, current_size, wtxid)?; + self.cache.insert((0, current_size), wtxid); + self.store.set_tree_size(current_size + 1)?; + self.recalculate_peaks()?; + Ok(()) + } + + pub fn contains(&self, wtxid: Wtxid) -> Result { + self.store.load_chunk(wtxid).map(|chunk| chunk.is_some()) + } + + fn recalculate_peaks(&mut self) -> Result<()> { + let mut size = self.store.get_tree_size(); + let mut level = 0; + + while size > 1 { + if size % 2 == 0 { + let left = self.load_node(level, size - 2)?.unwrap(); + let right = self.load_node(level, size - 1)?.unwrap(); + let parent = hash_pair(left, right); + + self.store.save_node(level + 1, size / 2 - 1, parent)?; + self.cache.insert((level + 1, size / 2 - 1), parent); + } + size /= 2; + level += 1; + } + Ok(()) + } + + pub fn generate_proof( + &mut self, + wtxid: Wtxid, + ) -> Result> { + let Some(chunk) = self.store.load_chunk(wtxid)? else { + return Ok(None); + }; + let index = self + .find_chunk_index(chunk.wtxid)? + .expect("Found chunk in db but could not find its index"); + + let mut proof: Vec = vec![]; + let mut current_index = index; + let mut current_level = 0; + + while current_index % 2 == 1 || self.load_node(current_level, current_index + 1)?.is_some() + { + let sibling_index = if current_index % 2 == 0 { + current_index + 1 + } else { + current_index - 1 + }; + proof.push(self.load_node(current_level, sibling_index)?.unwrap()); + current_index /= 2; + current_level += 1; + } + + let (subroot_idx, internal_idx) = self.get_helpers_from_index(index); + let mmr_proof = MMRInclusionProof::new(subroot_idx, internal_idx, proof); + Ok(Some((chunk, mmr_proof))) + } + + fn load_node(&mut self, level: u32, index: u32) -> Result> { + if let Some(&hash) = self.cache.get(&(level, index)) { + Ok(Some(hash)) + } else { + let Some(node) = self.store.load_node(level, index)? else { + return Ok(None); + }; + + self.cache.insert((level, index), node); + + Ok(Some(node)) + } + } + + fn find_chunk_index(&mut self, hash: MMRNodeHash) -> Result> { + let size = self.store.get_tree_size(); + for i in 0..size { + if let Some(node_hash) = self.load_node(0, i)? { + if node_hash == hash { + return Ok(Some(i)); + } + } + } + Ok(None) + } + + fn get_helpers_from_index(&self, index: u32) -> (u32, u32) { + let xor = self.store.get_tree_size() ^ index; + let xor_leading_digit = 31 - xor.leading_zeros(); + let internal_idx = index & ((1 << xor_leading_digit) - 1); + let leading_zeros_size = 31 - self.store.get_tree_size().leading_zeros(); + let mut subtree_idx = 0; + for i in xor_leading_digit + 1..=leading_zeros_size { + if self.store.get_tree_size() & (1 << i) != 0 { + subtree_idx += 1; + } + } + (subtree_idx, internal_idx) + } + + pub fn verify_proof(&mut self, chunk: MMRChunk, mmr_proof: &MMRInclusionProof) -> bool { + let subroot = mmr_proof.get_subroot(chunk.wtxid); + let subroots = self.get_subroots(); + subroots[mmr_proof.subroot_idx as usize] == subroot + } + + pub(crate) fn get_subroots(&mut self) -> Vec { + let mut subroots: Vec = vec![]; + let mut size = self.store.get_tree_size(); + let mut level = 0; + + while size > 0 { + if size % 2 == 1 { + let subroot = self.load_node(level, size - 1).ok().flatten().unwrap(); + subroots.push(subroot); + } + size /= 2; + level += 1; + } + subroots.reverse(); + subroots + } +} diff --git a/crates/sovereign-sdk/rollup-interface/src/mmr/tests.rs b/crates/sovereign-sdk/rollup-interface/src/mmr/tests.rs new file mode 100644 index 000000000..ed5021993 --- /dev/null +++ b/crates/sovereign-sdk/rollup-interface/src/mmr/tests.rs @@ -0,0 +1,186 @@ +use std::collections::BTreeMap; + +use super::*; + +#[derive(Clone)] +struct InMemoryStore { + storage: BTreeMap<(u32, u32), MMRNodeHash>, + chunks: BTreeMap, + tree_size: u32, +} + +impl InMemoryStore { + fn new() -> Self { + InMemoryStore { + storage: BTreeMap::new(), + chunks: BTreeMap::new(), + tree_size: 0, + } + } +} + +impl NodeStore for InMemoryStore { + fn save_node(&mut self, level: u32, index: u32, hash: MMRNodeHash) -> Result<()> { + self.storage.insert((level, index), hash); + Ok(()) + } + + fn load_node(&self, level: u32, index: u32) -> Result> { + Ok(self.storage.get(&(level, index)).cloned()) + } + + fn save_chunk(&mut self, hash: MMRNodeHash, chunk: MMRChunk) -> Result<()> { + self.chunks.insert(hash, chunk); + Ok(()) + } + + fn load_chunk(&self, hash: MMRNodeHash) -> Result> { + Ok(self.chunks.get(&hash).cloned()) + } + + fn get_tree_size(&self) -> u32 { + self.tree_size + } + + fn set_tree_size(&mut self, size: u32) -> Result<()> { + self.tree_size = size; + Ok(()) + } +} + +#[test] +fn test_mmr_native() { + let mut mmr = MMRNative::new(InMemoryStore::new()); + let mut nodes = vec![]; + + for i in 0..42 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 8]; + let node = MMRChunk::new(wtxid, body); + nodes.push(node.clone()); + + mmr.append(node).unwrap(); + + for j in 0..=i { + let proof_node = nodes[j as usize].clone(); + let (node, mmr_proof) = mmr.generate_proof(proof_node.wtxid).ok().flatten().unwrap(); + assert!(mmr.verify_proof(node.clone(), &mmr_proof)); + } + } +} + +#[test] +fn test_mmr_native_simple() { + let store = InMemoryStore::new(); + let mut mmr = MMRNative::new(store.clone()); + + let chunk1 = MMRChunk::new([1; 32], vec![10, 20, 30]); + let chunk2 = MMRChunk::new([2; 32], vec![40, 50, 60]); + let chunk3 = MMRChunk::new([3; 32], vec![70, 80, 90]); + + mmr.append(chunk1.clone()).unwrap(); + mmr.append(chunk2.clone()).unwrap(); + mmr.append(chunk3.clone()).unwrap(); + + let proof = mmr.generate_proof([1; 32]).unwrap(); + assert!(proof.is_some()); + let (chunk, mmr_proof) = proof.unwrap(); + assert_eq!(chunk, chunk1); + assert!(mmr.verify_proof(chunk, &mmr_proof)); +} + +#[test] +fn test_native_proof_with_guest_verification() { + let mut mmr_native = MMRNative::new(InMemoryStore::new()); + let mut mmr_guest = MMRGuest::new(); + + for i in 0..42 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 8]; + let node = MMRChunk::new(wtxid, body); + + // Append to both Native and Guest + mmr_native.append(node.clone()).unwrap(); + mmr_guest.append(node.clone()); + + // Generate proof in Native and verify in Guest + for j in 0..=i { + let proof_node = MMRChunk::new([j as u8; 32], vec![j as u8; 8]); + let (_, mmr_proof) = mmr_native + .generate_proof(proof_node.wtxid) + .ok() + .flatten() + .unwrap(); + + // Verify proof using Guest + assert!(mmr_guest.verify_proof(&proof_node, &mmr_proof)); + } + } +} + +#[test] +fn test_consistency_between_native_and_guest() { + let mut mmr_native = MMRNative::new(InMemoryStore::new()); + let mut mmr_guest = MMRGuest::new(); + + for i in 0..10 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 8]; + let node = MMRChunk::new(wtxid, body); + + mmr_native.append(node.clone()).unwrap(); + mmr_guest.append(node.clone()); + } + + // Check subroots consistency + let native_subroots = mmr_native.get_subroots(); + assert_eq!(native_subroots, mmr_guest.subroots); +} + +#[test] +fn test_large_dataset_verification() { + let mut mmr_native = MMRNative::new(InMemoryStore::new()); + let mut mmr_guest = MMRGuest::new(); + let mut nodes = vec![]; + + for i in 0..100 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 16]; + let node = MMRChunk::new(wtxid, body); + nodes.push(node.clone()); + + mmr_native.append(node.clone()).unwrap(); + mmr_guest.append(node.clone()); + } + + for node in nodes { + let (_, mmr_proof) = mmr_native + .generate_proof(node.wtxid) + .ok() + .flatten() + .unwrap(); + assert!(mmr_guest.verify_proof(&node, &mmr_proof)); + } +} + +#[test] +fn test_mmr_with_store() { + let store = InMemoryStore::new(); + let mut mmr = MMRNative::new(store); + + for i in 0..42 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 8]; + let node = MMRChunk::new(wtxid, body); + mmr.append(node).unwrap(); + } + + let mut mmr = MMRNative::new(mmr.store.clone()); + for i in 0..42 { + let wtxid = [i as u8; 32]; + let body = vec![i as u8; 8]; + let node = MMRChunk::new(wtxid, body); + let (_, proof) = mmr.generate_proof(wtxid).ok().flatten().unwrap(); + assert!(mmr.verify_proof(node, &proof)); + } +} diff --git a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs index 002c67b3c..64ad9fb39 100644 --- a/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/node/services/da.rs @@ -107,6 +107,9 @@ pub trait DaService: Send + Sync + 'static { ::CompletenessProof, ); + /// Decompress and deserialize the chunks into a single complete proof. + fn decompress_chunks(&self, complete_chunks: Vec) -> Result, Self::Error>; + /// Send a transaction directly to the DA layer. /// blob is the serialized and signed transaction. /// Returns nothing if the transaction was successfully sent. diff --git a/crates/sovereign-sdk/rollup-interface/src/spec.rs b/crates/sovereign-sdk/rollup-interface/src/spec.rs index bf303ed88..a5c1e6da7 100644 --- a/crates/sovereign-sdk/rollup-interface/src/spec.rs +++ b/crates/sovereign-sdk/rollup-interface/src/spec.rs @@ -19,6 +19,7 @@ use serde::{Deserialize, Serialize}; Deserialize, Hash, )] +#[repr(u8)] #[borsh(use_discriminant = true)] pub enum SpecId { /// Genesis spec diff --git a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs index eb15938b0..720a4553a 100644 --- a/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs +++ b/crates/sovereign-sdk/rollup-interface/src/state_machine/da.rs @@ -60,7 +60,7 @@ pub enum DaDataLightClient { /// A zk proof and state diff Complete(Proof), /// A list of tx ids - Aggregate(Vec<[u8; 32]>), + Aggregate(Vec<[u8; 32]>, Vec<[u8; 32]>), /// A chunk of an aggregate Chunk(Vec), /// A new batch proof method_id @@ -178,6 +178,9 @@ pub trait DaVerifier: Send + Sync { block_header: &::BlockHeader, network: Network, ) -> Result; + + /// Decompress chunks to complete + fn decompress_chunks(&self, complete_chunks: Vec) -> Result, Self::Error>; } #[cfg(feature = "std")] @@ -260,6 +263,9 @@ pub trait BlobReaderTrait: /// Returns the hash of the blob as it appears on the DA layer fn hash(&self) -> [u8; 32]; + /// Returns the witness transaction ID of the blob as it appears on the DA layer + fn wtxid(&self) -> Option<[u8; 32]>; + /// Returns a slice containing all the data accessible to the rollup at this point in time. /// When running in native mode, the rollup can extend this slice by calling `advance`. In zk-mode, /// the rollup is limited to only the verified data. @@ -290,6 +296,9 @@ pub trait BlobReaderTrait: fn full_data(&mut self) -> &[u8] { self.advance(self.total_len()) } + + /// Weird method to serialize blob as v1. Should be removed when a better way is introduced in the future. + fn serialize_v1(&self) -> borsh::io::Result>; } /// Trait with collection of trait bounds for a block hash. diff --git a/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs b/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs index a5c28d210..cada817ad 100644 --- a/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs +++ b/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs @@ -18,8 +18,10 @@ use borsh::{BorshDeserialize, BorshSerialize}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; +use super::da::BlobReaderTrait; use super::soft_confirmation::SignedSoftConfirmationV1; use crate::da::{DaSpec, LatestDaState}; +use crate::mmr::{MMRChunk, MMRGuest, MMRInclusionProof}; use crate::soft_confirmation::SignedSoftConfirmation; use crate::spec::SpecId; @@ -296,7 +298,14 @@ impl, + /// A map from tx hash to chunk data. + /// MMRGuest is an impl. MMR, which only needs to hold considerably small amount of data. + /// like 32 hashes and some u64 + pub mmr_guest: MMRGuest, } /// The input of light client proof @@ -425,13 +438,13 @@ pub struct LightClientCircuitInput { pub completeness_proof: Da::CompletenessProof, /// DA block header that the batch proofs were found in. pub da_block_header: Da::BlockHeader, - /// Light client proof method id pub light_client_proof_method_id: [u32; 8], /// Light client proof output /// Optional because the first light client proof doesn't have a previous proof pub previous_light_client_proof_journal: Option>, - + /// Hints for the guest MMR tree. + pub mmr_hints: VecDeque>, /// Hint for which proofs are expected to fail /// /// Note: Indices are u32 even though we don't expect that many proofs