diff --git a/bin/citrea/provers/risc0/guest-bitcoin/Cargo.lock b/bin/citrea/provers/risc0/guest-bitcoin/Cargo.lock index bd747c648..785ce103f 100644 --- a/bin/citrea/provers/risc0/guest-bitcoin/Cargo.lock +++ b/bin/citrea/provers/risc0/guest-bitcoin/Cargo.lock @@ -3852,9 +3852,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.10.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2cc8827d6c0994478a15c53f374f46fbd41bea663d809b14744bc42e6b109c" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" dependencies = [ "yoke", "zerofrom", @@ -3863,9 +3863,9 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", diff --git a/bin/citrea/provers/risc0/guest-mock/Cargo.lock b/bin/citrea/provers/risc0/guest-mock/Cargo.lock index 5fc6d7a1c..20f3c7897 100644 --- a/bin/citrea/provers/risc0/guest-mock/Cargo.lock +++ b/bin/citrea/provers/risc0/guest-mock/Cargo.lock @@ -3640,9 +3640,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.10.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2cc8827d6c0994478a15c53f374f46fbd41bea663d809b14744bc42e6b109c" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" dependencies = [ "yoke", "zerofrom", @@ -3651,9 +3651,9 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", diff --git a/bin/citrea/src/lib.rs b/bin/citrea/src/lib.rs index 7055d09a3..7c0f4e2ea 100644 --- a/bin/citrea/src/lib.rs +++ b/bin/citrea/src/lib.rs @@ -23,6 +23,7 @@ pub fn initialize_logging(level: Level) { "alloy_transport_http=info".to_owned(), // Limit output as much as possible, use WARN. "risc0_zkvm=warn".to_owned(), + "risc0_circuit_rv32im=info".to_owned(), "guest_execution=info".to_owned(), "jsonrpsee-server=info".to_owned(), "reqwest=info".to_owned(), diff --git a/bin/citrea/tests/e2e/mod.rs b/bin/citrea/tests/e2e/mod.rs index ed38cce30..2dc753b16 100644 --- a/bin/citrea/tests/e2e/mod.rs +++ b/bin/citrea/tests/e2e/mod.rs @@ -1106,20 +1106,9 @@ async fn test_soft_confirmations_status_one_l1() -> Result<(), anyhow::Error> { assert_eq!(SoftConfirmationStatus::Trusted, status_node.unwrap()); } - // publish new da block - // - // This will trigger the sequencer's DA monitor to see a newly published - // block and will therefore initiate a commitment submission to the MockDA. - // Therefore, creating yet another DA block. - da_service.publish_test_block().await.unwrap(); - - // The above L1 block has been created, - // we wait until the block is actually received by the DA monitor. - wait_for_l1_block(&da_service, 2, None).await; - - // Wait for DA block #3 containing the commitment + // Wait for DA block #2 containing the commitment // submitted by sequencer. - wait_for_l1_block(&da_service, 3, None).await; + wait_for_l1_block(&da_service, 2, None).await; // now retrieve confirmation status from the sequencer and full node and check if they are the same for i in 1..=6 { @@ -1159,41 +1148,32 @@ async fn test_soft_confirmations_status_two_l1() -> Result<(), anyhow::Error> { .await; // first publish a few blocks fast make it land in the same da block - for _ in 1..=2 { + for _ in 1..=3 { seq_test_client.send_publish_batch_request().await; } - wait_for_l2_block(&seq_test_client, 2, None).await; - - // publish new da block - da_service.publish_test_block().await.unwrap(); + wait_for_l2_block(&seq_test_client, 3, None).await; + // L2 blocks 1-3 would create an L1 block with commitment wait_for_l1_block(&da_service, 2, None).await; - for _ in 2..=6 { + for _ in 4..=6 { seq_test_client.send_publish_batch_request().await; } - wait_for_l2_block(&full_node_test_client, 7, None).await; + wait_for_l2_block(&full_node_test_client, 6, None).await; + // L2 blocks 4-6 would create an L1 block with commitment + wait_for_l1_block(&da_service, 3, None).await; // now retrieve confirmation status from the sequencer and full node and check if they are the same - for i in 1..=2 { + for i in 1..=3 { let status_node = full_node_test_client .ledger_get_soft_confirmation_status(i) .await .unwrap(); - assert_eq!(SoftConfirmationStatus::Trusted, status_node.unwrap()); + assert_eq!(SoftConfirmationStatus::Finalized, status_node.unwrap()); } - // publish new da block - da_service.publish_test_block().await.unwrap(); - wait_for_l1_block(&da_service, 3, None).await; - - seq_test_client.send_publish_batch_request().await; - seq_test_client.send_publish_batch_request().await; - - wait_for_l2_block(&full_node_test_client, 9, None).await; - // Check that these L2 blocks are bounded on different L1 block let mut batch_infos = vec![]; for i in 1..=6 { @@ -1203,10 +1183,16 @@ async fn test_soft_confirmations_status_two_l1() -> Result<(), anyhow::Error> { .unwrap(); batch_infos.push(full_node_soft_conf); } - assert_eq!(batch_infos[0].da_slot_height, batch_infos[1].da_slot_height); - assert!(batch_infos[2..] + + // First three blocks got created on L1 height 1. + assert!(batch_infos[0..3] + .iter() + .all(|x| { x.da_slot_height == batch_infos[0].da_slot_height })); + + // Blocks 4, 5, 6 were created on L1 height 2 + assert!(batch_infos[3..6] .iter() - .all(|x| x.da_slot_height == batch_infos[2].da_slot_height)); + .all(|x| { x.da_slot_height == batch_infos[3].da_slot_height })); assert_ne!(batch_infos[0].da_slot_height, batch_infos[5].da_slot_height); // now retrieve confirmation status from the sequencer and full node and check if they are the same @@ -1381,7 +1367,7 @@ async fn test_prover_sync_with_commitments() -> Result<(), anyhow::Error> { #[tokio::test(flavor = "multi_thread")] async fn test_reopen_prover() -> Result<(), anyhow::Error> { - citrea::initialize_logging(tracing::Level::INFO); + // citrea::initialize_logging(tracing::Level::INFO); let storage_dir = tempdir_with_children(&["DA", "sequencer", "prover"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); @@ -2070,7 +2056,7 @@ async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { assert_eq!(commitments[0].l2_start_height, 1); assert_eq!(commitments[0].l2_end_height, 4); assert_eq!(commitments[1].l2_start_height, 5); - assert_eq!(commitments[1].l2_end_height, 9); + assert_eq!(commitments[1].l2_end_height, 8); seq_task.abort(); @@ -2421,11 +2407,8 @@ async fn test_db_get_proof() { test_client.send_publish_batch_request().await; wait_for_l2_block(&test_client, 4, None).await; - da_service.publish_test_block().await.unwrap(); // Commitment wait_for_l1_block(&da_service, 3, None).await; - // Proof - wait_for_l1_block(&da_service, 4, None).await; // wait here until we see from prover's rpc that it finished proving wait_for_prover_l1_height( @@ -2438,7 +2421,7 @@ async fn test_db_get_proof() { wait_for_postgres_proofs(&db_test_client, 1, Some(Duration::from_secs(60))).await; let ledger_proof = prover_node_test_client - .ledger_get_proof_by_slot_height(4) + .ledger_get_proof_by_slot_height(3) .await; let db_proofs = db_test_client.get_all_proof_data().await.unwrap(); @@ -2570,27 +2553,23 @@ async fn full_node_verify_proof_and_store() { test_client.send_publish_batch_request().await; wait_for_l2_block(&full_node_test_client, 4, None).await; - // submits with new da block, triggers commitment submission. - da_service.publish_test_block().await.unwrap(); - // This is the above block created. - wait_for_l1_block(&da_service, 3, None).await; // Commitment submitted - wait_for_l1_block(&da_service, 4, None).await; + wait_for_l1_block(&da_service, 3, None).await; // Full node sync commitment block test_client.send_publish_batch_request().await; - wait_for_l2_block(&full_node_test_client, 6, None).await; + wait_for_l2_block(&full_node_test_client, 5, None).await; // wait here until we see from prover's rpc that it finished proving wait_for_prover_l1_height( &prover_node_test_client, - 5, + 4, Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), ) .await; let commitments = prover_node_test_client - .ledger_get_sequencer_commitments_on_slot_by_number(4) + .ledger_get_sequencer_commitments_on_slot_by_number(3) .await .unwrap() .unwrap(); @@ -2599,37 +2578,37 @@ async fn full_node_verify_proof_and_store() { assert_eq!(commitments[0].l2_start_block_number, 1); assert_eq!(commitments[0].l2_end_block_number, 4); - assert_eq!(commitments[0].found_in_l1, 4); + assert_eq!(commitments[0].found_in_l1, 3); - let fourth_block_hash = da_service.get_block_at(4).await.unwrap().header.hash; + let third_block_hash = da_service.get_block_at(3).await.unwrap().header.hash; let commitments_hash = prover_node_test_client - .ledger_get_sequencer_commitments_on_slot_by_hash(fourth_block_hash.0) + .ledger_get_sequencer_commitments_on_slot_by_hash(third_block_hash.0) .await .unwrap() .unwrap(); assert_eq!(commitments_hash, commitments); let prover_proof = prover_node_test_client - .ledger_get_proof_by_slot_height(4) + .ledger_get_proof_by_slot_height(3) .await; - // The proof will be in l1 block #5 because prover publishes it after the commitment and + // The proof will be in l1 block #4 because prover publishes it after the commitment and // in mock da submitting proof and commitments creates a new block. - // For full node to see the proof, we publish another l2 block and now it will check #5 l1 block - wait_for_l1_block(&da_service, 5, None).await; + // For full node to see the proof, we publish another l2 block and now it will check #4 l1 block + wait_for_l1_block(&da_service, 4, None).await; // Up until this moment, Full node has only seen 2 DA blocks. - // We need to force it to sync up to 5th DA block. - for i in 7..=8 { + // We need to force it to sync up to 4th DA block. + for i in 6..=7 { test_client.send_publish_batch_request().await; wait_for_l2_block(&full_node_test_client, i, None).await; } - // So the full node should see the proof in block 5 - wait_for_proof(&full_node_test_client, 5, Some(Duration::from_secs(60))).await; + // So the full node should see the proof in block 4 + wait_for_proof(&full_node_test_client, 4, Some(Duration::from_secs(60))).await; let full_node_proof = full_node_test_client - .ledger_get_verified_proofs_by_slot_height(5) + .ledger_get_verified_proofs_by_slot_height(4) .await .unwrap(); assert_eq!(prover_proof.proof, full_node_proof[0].proof); @@ -2783,18 +2762,8 @@ async fn test_all_flow() { test_client.send_publish_batch_request().await; wait_for_l2_block(&test_client, 4, None).await; - // Submit commitment - da_service.publish_test_block().await.unwrap(); // Commitment wait_for_l1_block(&da_service, 3, None).await; - // Proof - wait_for_l1_block(&da_service, 4, None).await; - // Full node sync - commitment DA - test_client.send_publish_batch_request().await; - wait_for_l2_block(&full_node_test_client, 5, None).await; - // Full node sync - Proof DA - test_client.send_publish_batch_request().await; - wait_for_l2_block(&full_node_test_client, 6, None).await; // wait here until we see from prover's rpc that it finished proving wait_for_prover_l1_height( @@ -2805,7 +2774,7 @@ async fn test_all_flow() { .await; let commitments = prover_node_test_client - .ledger_get_sequencer_commitments_on_slot_by_number(4) + .ledger_get_sequencer_commitments_on_slot_by_number(3) .await .unwrap() .unwrap(); @@ -2814,19 +2783,19 @@ async fn test_all_flow() { assert_eq!(commitments[0].l2_start_block_number, 1); assert_eq!(commitments[0].l2_end_block_number, 4); - assert_eq!(commitments[0].found_in_l1, 4); + assert_eq!(commitments[0].found_in_l1, 3); - let fourth_block_hash = da_service.get_block_at(4).await.unwrap().header.hash; + let third_block_hash = da_service.get_block_at(3).await.unwrap().header.hash; let commitments_hash = prover_node_test_client - .ledger_get_sequencer_commitments_on_slot_by_hash(fourth_block_hash.0) + .ledger_get_sequencer_commitments_on_slot_by_hash(third_block_hash.0) .await .unwrap() .unwrap(); assert_eq!(commitments_hash, commitments); let prover_proof = prover_node_test_client - .ledger_get_proof_by_slot_height(4) + .ledger_get_proof_by_slot_height(3) .await; let db_proofs = db_test_client.get_all_proof_data().await.unwrap(); @@ -2842,17 +2811,17 @@ async fn test_all_flow() { ); assert_eq!(db_proofs[0].l1_tx_id, prover_proof.l1_tx_id); - // the proof will be in l1 block #5 because prover publishes it after the commitment and in mock da submitting proof and commitments creates a new block - // For full node to see the proof, we publish another l2 block and now it will check #5 l1 block - // 7th soft batch - wait_for_l1_block(&da_service, 5, None).await; + // the proof will be in l1 block #4 because prover publishes it after the commitment and in mock da submitting proof and commitments creates a new block + // For full node to see the proof, we publish another l2 block and now it will check #4 l1 block + // 6th soft batch + wait_for_l1_block(&da_service, 4, None).await; test_client.send_publish_batch_request().await; - wait_for_l2_block(&full_node_test_client, 7, None).await; + wait_for_l2_block(&full_node_test_client, 6, None).await; // So the full node should see the proof in block 5 - wait_for_proof(&full_node_test_client, 5, Some(Duration::from_secs(120))).await; + wait_for_proof(&full_node_test_client, 4, Some(Duration::from_secs(120))).await; let full_node_proof = full_node_test_client - .ledger_get_verified_proofs_by_slot_height(5) + .ledger_get_verified_proofs_by_slot_height(4) .await .unwrap(); @@ -2901,34 +2870,32 @@ async fn test_all_flow() { .send_eth(addr, None, None, None, 1e18 as u128) .await .unwrap(); - // 8th soft batch - test_client.send_publish_batch_request().await; - wait_for_l2_block(&full_node_test_client, 8, None).await; - // Submit a commitment - da_service.publish_test_block().await.unwrap(); + for i in 7..=8 { + test_client.send_publish_batch_request().await; + wait_for_l2_block(&full_node_test_client, i, None).await; + } + // Commitment - wait_for_l1_block(&da_service, 6, None).await; - // Proof - wait_for_l1_block(&da_service, 7, None).await; + wait_for_l1_block(&da_service, 5, None).await; // wait here until we see from prover's rpc that it finished proving wait_for_prover_l1_height( &prover_node_test_client, - 7, + 5, Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), ) .await; let commitments = prover_node_test_client - .ledger_get_sequencer_commitments_on_slot_by_number(7) + .ledger_get_sequencer_commitments_on_slot_by_number(5) .await .unwrap() .unwrap(); assert_eq!(commitments.len(), 1); let prover_proof_data = prover_node_test_client - .ledger_get_proof_by_slot_height(7) + .ledger_get_proof_by_slot_height(5) .await; let db_proofs = db_test_client.get_all_proof_data().await.unwrap(); @@ -2943,15 +2910,9 @@ async fn test_all_flow() { prover_proof_data.state_transition.sequencer_public_key ); - // let full node see the proof - for i in 9..13 { - test_client.send_publish_batch_request().await; - wait_for_l2_block(&full_node_test_client, i, None).await; - } - - wait_for_proof(&full_node_test_client, 8, Some(Duration::from_secs(120))).await; + wait_for_proof(&full_node_test_client, 6, Some(Duration::from_secs(120))).await; let full_node_proof_data = full_node_test_client - .ledger_get_verified_proofs_by_slot_height(8) + .ledger_get_verified_proofs_by_slot_height(6) .await .unwrap(); @@ -2984,16 +2945,13 @@ async fn test_all_flow() { assert_eq!(status, SoftConfirmationStatus::Proven); } - wait_for_l2_block(&test_client, 14, None).await; - assert_eq!(test_client.eth_block_number().await, 14); - // Synced up to the latest block - wait_for_l2_block(&full_node_test_client, 14, Some(Duration::from_secs(60))).await; - assert!(full_node_test_client.eth_block_number().await >= 14); + wait_for_l2_block(&full_node_test_client, 8, Some(Duration::from_secs(60))).await; + assert!(full_node_test_client.eth_block_number().await == 8); // Synced up to the latest commitment - wait_for_l2_block(&prover_node_test_client, 9, Some(Duration::from_secs(60))).await; - assert!(prover_node_test_client.eth_block_number().await >= 9); + wait_for_l2_block(&prover_node_test_client, 8, Some(Duration::from_secs(60))).await; + assert!(prover_node_test_client.eth_block_number().await == 8); seq_task.abort(); prover_node_task.abort(); @@ -3301,9 +3259,9 @@ async fn test_full_node_sync_status() { #[tokio::test(flavor = "multi_thread")] async fn test_sequencer_commitment_threshold() { - // citrea::initialize_logging(tracing::Level::DEBUG); + citrea::initialize_logging(tracing::Level::DEBUG); - let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let storage_dir = tempdir_with_children(&["DA", "sequencer"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); @@ -3313,7 +3271,10 @@ async fn test_sequencer_commitment_threshold() { .await .unwrap(); - let mut sequencer_config = create_default_sequencer_config(4, Some(true), 10); + // Put a large number for commitment threshold + let min_soft_confirmations_per_commitment = 1_000_000; + let mut sequencer_config = + create_default_sequencer_config(min_soft_confirmations_per_commitment, Some(true), 10); sequencer_config.db_config = Some(SharedBackupDbConfig::default().set_db_name(psql_db_name)); sequencer_config.mempool_conf = SequencerMempoolConfig { @@ -3324,7 +3285,7 @@ async fn test_sequencer_commitment_threshold() { let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); let da_db_dir_cloned = da_db_dir.clone(); - let seq_task = tokio::spawn(async { + let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), @@ -3332,7 +3293,7 @@ async fn test_sequencer_commitment_threshold() { NodeMode::SequencerNode, sequencer_db_dir, da_db_dir_cloned, - 1_000_000, // Put a large number for commitment threshold + min_soft_confirmations_per_commitment, true, None, Some(sequencer_config), diff --git a/bin/citrea/tests/sequencer_commitments/mod.rs b/bin/citrea/tests/sequencer_commitments/mod.rs index a71dcd1d0..a595032d8 100644 --- a/bin/citrea/tests/sequencer_commitments/mod.rs +++ b/bin/citrea/tests/sequencer_commitments/mod.rs @@ -90,12 +90,9 @@ async fn sequencer_sends_commitments_to_da_layer() { test_client.send_publish_batch_request().await; wait_for_l2_block(&test_client, 4, None).await; - // Trigger a commitment - da_service.publish_test_block().await.unwrap(); + // The previous L2 block triggers a commitment + // which will create new L1 block. wait_for_l1_block(&da_service, 3, None).await; - // The previous L1 block triggers a commitment - // which will create yet another L1 block. - wait_for_l1_block(&da_service, 4, None).await; let start_l2_block: u64 = 1; let end_l2_block: u64 = 4; @@ -113,15 +110,10 @@ async fn sequencer_sends_commitments_to_da_layer() { test_client.send_publish_batch_request().await; } wait_for_l2_block(&test_client, 8, None).await; - - da_service.publish_test_block().await.unwrap(); wait_for_l1_block(&da_service, 4, None).await; - wait_for_l1_block(&da_service, 5, None).await; - - wait_for_l2_block(&test_client, 9, None).await; let start_l2_block: u64 = end_l2_block + 1; - let end_l2_block: u64 = end_l2_block + 5; // can only be the block before the one comitment landed in + let end_l2_block: u64 = end_l2_block + 4; // can only be the block before the one comitment landed in check_sequencer_commitment( test_client.as_ref(), @@ -331,18 +323,15 @@ async fn test_ledger_get_commitments_on_slot() { test_client.send_publish_batch_request().await; test_client.send_publish_batch_request().await; test_client.send_publish_batch_request().await; - da_service.publish_test_block().await.unwrap(); wait_for_l1_block(&da_service, 3, None).await; - // Commit - wait_for_l1_block(&da_service, 4, None).await; // full node gets the commitment test_client.send_publish_batch_request().await; - wait_for_l2_block(&full_node_test_client, 6, None).await; + wait_for_l2_block(&full_node_test_client, 5, None).await; let commitments = full_node_test_client - .ledger_get_sequencer_commitments_on_slot_by_number(4) + .ledger_get_sequencer_commitments_on_slot_by_number(3) .await .unwrap() .unwrap(); @@ -351,12 +340,12 @@ async fn test_ledger_get_commitments_on_slot() { assert_eq!(commitments[0].l2_start_block_number, 1); assert_eq!(commitments[0].l2_end_block_number, 4); - assert_eq!(commitments[0].found_in_l1, 4); + assert_eq!(commitments[0].found_in_l1, 3); - let fourth_block_hash = da_service.get_block_at(4).await.unwrap().header.hash; + let third_block_hash = da_service.get_block_at(3).await.unwrap().header.hash; let commitments_hash = full_node_test_client - .ledger_get_sequencer_commitments_on_slot_by_hash(fourth_block_hash.0) + .ledger_get_sequencer_commitments_on_slot_by_hash(third_block_hash.0) .await .unwrap() .unwrap(); @@ -436,21 +425,19 @@ async fn test_ledger_get_commitments_on_slot_prover() { test_client.send_publish_batch_request().await; wait_for_l2_block(&test_client, 4, None).await; - da_service.publish_test_block().await.unwrap(); - wait_for_l1_block(&da_service, 3, None).await; // Commitment - wait_for_l1_block(&da_service, 4, None).await; + wait_for_l1_block(&da_service, 3, None).await; // wait here until we see from prover's rpc that it finished proving wait_for_prover_l1_height( &prover_node_test_client, - 5, + 4, Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), ) .await; let commitments = prover_node_test_client - .ledger_get_sequencer_commitments_on_slot_by_number(4) + .ledger_get_sequencer_commitments_on_slot_by_number(3) .await .unwrap() .unwrap(); @@ -459,12 +446,12 @@ async fn test_ledger_get_commitments_on_slot_prover() { assert_eq!(commitments[0].l2_start_block_number, 1); assert_eq!(commitments[0].l2_end_block_number, 4); - assert_eq!(commitments[0].found_in_l1, 4); + assert_eq!(commitments[0].found_in_l1, 3); - let fourth_block_hash = da_service.get_block_at(4).await.unwrap().header.hash; + let third_block_hash = da_service.get_block_at(3).await.unwrap().header.hash; let commitments_hash = prover_node_test_client - .ledger_get_sequencer_commitments_on_slot_by_hash(fourth_block_hash.0) + .ledger_get_sequencer_commitments_on_slot_by_hash(third_block_hash.0) .await .unwrap() .unwrap(); diff --git a/crates/sequencer/src/commitment_controller.rs b/crates/sequencer/src/commitment_controller.rs index 2a894d93e..18e79f39f 100644 --- a/crates/sequencer/src/commitment_controller.rs +++ b/crates/sequencer/src/commitment_controller.rs @@ -1,21 +1,17 @@ use std::ops::RangeInclusive; -use anyhow::{anyhow, bail}; +use anyhow::anyhow; use rs_merkle::algorithms::Sha256; use rs_merkle::MerkleTree; use sov_db::ledger_db::LedgerDB; -use sov_db::schema::types::{BatchNumber, SlotNumber}; +use sov_db::schema::types::BatchNumber; use sov_rollup_interface::da::SequencerCommitment; -use sov_rollup_interface::rpc::LedgerRpcProvider; use tracing::{debug, instrument}; #[derive(Clone, Debug)] pub struct CommitmentInfo { /// L2 heights to commit pub l2_height_range: RangeInclusive, - /// Respectfully, the L1 heights to commit. - /// (L2 blocks were created with these L1 blocks.) - pub l1_height_range: RangeInclusive, } /// Checks if the sequencer should commit @@ -25,107 +21,46 @@ pub struct CommitmentInfo { pub fn get_commitment_info( ledger_db: &LedgerDB, min_soft_confirmations_per_commitment: u64, - prev_l1_height: u64, state_diff_threshold_reached: bool, ) -> anyhow::Result> { - // first get when the last merkle root of soft confirmations was submitted - let last_commitment_l1_height = ledger_db - .get_last_sequencer_commitment_l1_height() - .map_err(|e| { - anyhow!( - "Sequencer: Failed to get last sequencer commitment L1 height: {}", - e - ) - })?; - - debug!("Last commitment L1 height: {:?}", last_commitment_l1_height); - - // if none then we never submitted a commitment, start from prev_l1_height and go back as far as you can go - // if there is a height then start from height + 1 and go to prev_l1_height - let (l2_range_to_submit, l1_height_range) = match last_commitment_l1_height { - Some(last_commitment_l1_height) => { - let l1_start = last_commitment_l1_height.0 + 1; - let mut l1_end = l1_start; - - let Some((l2_start, mut l2_end)) = - ledger_db.get_l2_range_by_l1_height(SlotNumber(l1_start))? - else { - return Ok(None); - }; - - // Take while sum of l2 ranges <= min_soft_confirmations_per_commitment - for l1_i in l1_start..=prev_l1_height { - l1_end = l1_i; - - let Some((_, l2_end_new)) = - ledger_db.get_l2_range_by_l1_height(SlotNumber(l1_end))? - else { - bail!("Sequencer: Failed to get L1 L2 connection"); - }; - - l2_end = l2_end_new; - - let l2_range_length = 1 + l2_end.0 - l2_start.0; - if l2_range_length >= min_soft_confirmations_per_commitment { - break; - } - } - let l1_height_range = (l1_start, l1_end); - - let l2_height_range = (l2_start, l2_end); - - (l2_height_range, l1_height_range) - } - None => { - let first_soft_confirmation = match ledger_db.get_soft_batch_by_number::<()>(1)? { - Some(batch) => batch, - None => return Ok(None), // not even the first soft confirmation is there, shouldn't happen actually - }; - - let l1_height_range = (first_soft_confirmation.da_slot_height, prev_l1_height); - - let Some((_, last_soft_confirmation_height)) = - ledger_db.get_l2_range_by_l1_height(SlotNumber(prev_l1_height))? - else { - bail!("Sequencer: Failed to get L1 L2 connection"); - }; - - let l2_range_to_submit = (BatchNumber(1), last_soft_confirmation_height); - - (l2_range_to_submit, l1_height_range) - } + // Based on heights stored in ledger_db, decided which L2 blocks + // to commit. + let last_committed_l2_height = ledger_db + .get_last_sequencer_commitment_l2_height()? + .unwrap_or(BatchNumber(0)); + + let Some((head_soft_batch_number, _)) = ledger_db.get_head_soft_batch()? else { + // No soft batches have been created yet. + return Ok(None); }; - debug!("L2 range to submit: {:?}", l2_range_to_submit); - debug!("L1 height range: {:?}", l1_height_range); + // If the last commitment made is on par with the head + // soft batch, we have already committed the latest block. + if last_committed_l2_height >= head_soft_batch_number { + // Already committed. + return Ok(None); + } + + let l2_start = last_committed_l2_height.0 + 1; + let l2_end = head_soft_batch_number.0; - if !state_diff_threshold_reached - && (l2_range_to_submit.1 .0 + 1) - < min_soft_confirmations_per_commitment + l2_range_to_submit.0 .0 - { + let l2_range_length = 1 + l2_end - l2_start; + if !state_diff_threshold_reached && (l2_range_length < min_soft_confirmations_per_commitment) { return Ok(None); } - let Some(l1_start_hash) = ledger_db - .get_soft_batch_by_number::<()>(l2_range_to_submit.0 .0)? - .map(|s| s.da_slot_hash) - else { - bail!("Failed to get soft batch"); - }; + if l2_range_length >= min_soft_confirmations_per_commitment { + debug!("Enough soft confirmations to submit commitment"); + } - let Some(l1_end_hash) = ledger_db - .get_soft_batch_by_number::<()>(l2_range_to_submit.1 .0)? - .map(|s| s.da_slot_hash) - else { - bail!("Failed to get soft batch"); - }; + if state_diff_threshold_reached { + debug!("State diff threshold reached. Committing..."); + } - debug!("L1 start hash: {:?}", l1_start_hash); - debug!("L1 end hash: {:?}", l1_end_hash); + debug!("L2 range to submit: {}..{}", l2_start, l2_end); Ok(Some(CommitmentInfo { - l2_height_range: l2_range_to_submit.0..=l2_range_to_submit.1, - l1_height_range: BatchNumber(l1_height_range.0)..=BatchNumber(l1_height_range.1), + l2_height_range: BatchNumber(l2_start)..=BatchNumber(l2_end), })) } diff --git a/crates/sequencer/src/sequencer.rs b/crates/sequencer/src/sequencer.rs index 3886b4ded..7eff7f6a9 100644 --- a/crates/sequencer/src/sequencer.rs +++ b/crates/sequencer/src/sequencer.rs @@ -1,4 +1,3 @@ -use std::cmp::Ordering; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::net::SocketAddr; @@ -37,7 +36,6 @@ use sov_modules_api::{ }; use sov_modules_stf_blueprint::StfBlueprintTrait; use sov_rollup_interface::da::{BlockHeaderTrait, DaData, DaSpec}; -use sov_rollup_interface::rpc::LedgerRpcProvider; use sov_rollup_interface::services::da::{BlobWithNotifier, DaService}; use sov_rollup_interface::stf::{SoftBatchReceipt, StateTransitionFunction}; use sov_rollup_interface::storage::HierarchicalStorageManager; @@ -348,8 +346,7 @@ where l2_block_mode: L2BlockMode, pg_pool: &Option, last_used_l1_height: u64, - da_commitment_tx: UnboundedSender<(u64, bool)>, - ) -> anyhow::Result { + ) -> anyhow::Result<(u64, bool)> { let da_height = da_block.header().height(); let (l2_height, l1_height) = match self .ledger_db @@ -480,7 +477,7 @@ where tracing::debug!("Finalizing l2 height: {:?}", l2_height); self.storage_manager.finalize_l2(l2_height)?; - return Ok(last_used_l1_height); + return Ok((last_used_l1_height, false)); } trace!( @@ -555,12 +552,9 @@ where ); // Serialize the state diff to check size later. let serialized_state_diff = bincode::serialize(&merged_state_diff)?; - if serialized_state_diff.len() as u64 > MAX_STATEDIFF_SIZE_COMMITMENT_THRESHOLD { - // If we exceed the threshold, we should notify the commitment - // worker to initiate a commitment. - if da_commitment_tx.unbounded_send((l1_height, true)).is_err() { - error!("Commitment thread is dead!"); - } + let state_diff_threshold_reached = + serialized_state_diff.len() as u64 > MAX_STATEDIFF_SIZE_COMMITMENT_THRESHOLD; + if state_diff_threshold_reached { self.last_state_diff.clone_from(&slot_result.state_diff); self.ledger_db .set_state_diff(self.last_state_diff.clone())?; @@ -584,7 +578,7 @@ where }); } - Ok(da_block.header().height()) + Ok((da_block.header().height(), state_diff_threshold_reached)) } (Err(err), batch_workspace) => { warn!( @@ -602,7 +596,6 @@ where async fn submit_commitment( &mut self, - prev_l1_height: u64, state_diff_threshold_reached: bool, ) -> anyhow::Result<()> { debug!("Sequencer: new L1 block, checking if commitment should be submitted"); @@ -613,12 +606,10 @@ where let commitment_info = commitment_controller::get_commitment_info( &self.ledger_db, min_soft_confirmations_per_commitment, - prev_l1_height, state_diff_threshold_reached, )?; if let Some(commitment_info) = commitment_info { - debug!("Sequencer: enough soft confirmations to submit commitment"); let l2_range_to_submit = commitment_info.l2_height_range.clone(); // calculate exclusive range end @@ -658,17 +649,9 @@ where .map_err(|_| { anyhow!("Sequencer: Failed to set last sequencer commitment L2 height") })?; - self.ledger_db - .set_last_sequencer_commitment_l1_height(SlotNumber( - commitment_info.l1_height_range.end().0, - )) - .map_err(|_| { - anyhow!("Sequencer: Failed to set last sequencer commitment L1 height") - })?; debug!("Commitment info: {:?}", commitment_info); - // let l1_start_height = commitment_info.l1_height_range.start().0; - // let l1_end_height = commitment_info.l1_height_range.end().0; + let l2_start = l2_range_to_submit.start().0 as u32; let l2_end = l2_range_to_submit.end().0 as u32; if let Some(db_config) = self.config.db_config.clone() { @@ -764,7 +747,7 @@ where // Setup required workers to update our knowledge of the DA layer every X seconds (configurable). let (da_height_update_tx, mut da_height_update_rx) = mpsc::channel(1); - let (da_commitment_tx, mut da_commitment_rx) = unbounded::<(u64, bool)>(); + let (da_commitment_tx, mut da_commitment_rx) = unbounded::(); let da_monitor = da_block_monitor( self.da_service.clone(), da_height_update_tx, @@ -812,14 +795,10 @@ where missed_da_blocks_count = skipped_blocks; } } - - if let Err(e) = self.maybe_submit_commitment(da_commitment_tx.clone(), last_finalized_height, last_used_l1_height).await { - error!("Sequencer error: {}", e); - } } }, - (prev_l1_height, force) = da_commitment_rx.select_next_some() => { - if let Err(e) = self.submit_commitment(prev_l1_height, force).await { + force = da_commitment_rx.select_next_some() => { + if let Err(e) = self.submit_commitment(force).await { error!("Failed to submit commitment: {}", e); } }, @@ -838,7 +817,7 @@ where .map_err(|e| anyhow!(e))?; debug!("Created an empty L2 for L1={}", needed_da_block_height); - if let Err(e) = self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::Empty, &pg_pool, last_used_l1_height, da_commitment_tx.clone()).await { + if let Err(e) = self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::Empty, &pg_pool, last_used_l1_height).await { error!("Sequencer error: {}", e); } } @@ -854,9 +833,13 @@ where } }; let l1_fee_rate = l1_fee_rate.clamp(*l1_fee_rate_range.start(), *l1_fee_rate_range.end()); - match self.produce_l2_block(last_finalized_block.clone(), l1_fee_rate, L2BlockMode::NotEmpty, &pg_pool, last_used_l1_height, da_commitment_tx.clone()).await { - Ok(l1_block_number) => { + match self.produce_l2_block(last_finalized_block.clone(), l1_fee_rate, L2BlockMode::NotEmpty, &pg_pool, last_used_l1_height).await { + Ok((l1_block_number, state_diff_threshold_reached)) => { last_used_l1_height = l1_block_number; + + if da_commitment_tx.unbounded_send(state_diff_threshold_reached).is_err() { + error!("Commitment thread is dead!"); + } }, Err(e) => { error!("Sequencer error: {}", e); @@ -882,7 +865,7 @@ where .map_err(|e| anyhow!(e))?; debug!("Created an empty L2 for L1={}", needed_da_block_height); - if let Err(e) = self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::Empty, &pg_pool, last_used_l1_height, da_commitment_tx.clone()).await { + if let Err(e) = self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::Empty, &pg_pool, last_used_l1_height).await { error!("Sequencer error: {}", e); } } @@ -900,8 +883,8 @@ where let l1_fee_rate = l1_fee_rate.clamp(*l1_fee_rate_range.start(), *l1_fee_rate_range.end()); let instant = Instant::now(); - match self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::NotEmpty, &pg_pool, last_used_l1_height, da_commitment_tx.clone()).await { - Ok(l1_block_number) => { + match self.produce_l2_block(da_block, l1_fee_rate, L2BlockMode::NotEmpty, &pg_pool, last_used_l1_height).await { + Ok((l1_block_number, state_diff_threshold_reached)) => { // Set the next iteration's wait time to produce a block based on the // previous block's execution time. // This is mainly to make sure we account for the execution time to @@ -909,11 +892,15 @@ where parent_block_exec_time = instant.elapsed(); last_used_l1_height = l1_block_number; + + if da_commitment_tx.unbounded_send(state_diff_threshold_reached).is_err() { + error!("Commitment thread is dead!"); + } }, Err(e) => { error!("Sequencer error: {}", e); } - } + }; } } } @@ -1072,41 +1059,6 @@ where self.ledger_db .set_last_sequencer_commitment_l2_height(BatchNumber(db_commitment.l2_end_height))?; - let l2_end_batch = self - .ledger_db - .get_soft_batch_by_number::<()>(db_commitment.l2_end_height)? - .unwrap(); - self.ledger_db - .set_last_sequencer_commitment_l1_height(SlotNumber(l2_end_batch.da_slot_height))?; - - Ok(()) - } - - async fn maybe_submit_commitment( - &self, - da_commitment_tx: UnboundedSender<(u64, bool)>, - last_finalized_height: u64, - last_used_l1_height: u64, - ) -> anyhow::Result<()> { - let commit_up_to = match last_finalized_height.cmp(&last_used_l1_height) { - Ordering::Less => { - panic!("DA L1 height is less than Ledger finalized height. DA L1 height: {}, Finalized height: {}", last_finalized_height, last_used_l1_height); - } - Ordering::Equal => None, - Ordering::Greater => { - let commit_up_to = last_finalized_height - 1; - Some(commit_up_to) - } - }; - - if let Some(commit_up_to) = commit_up_to { - if da_commitment_tx - .unbounded_send((commit_up_to, false)) - .is_err() - { - error!("Commitment thread is dead!"); - } - } Ok(()) } diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs index d354c727a..be4eed580 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/ledger_db/mod.rs @@ -13,10 +13,9 @@ use tracing::instrument; use crate::rocks_db_config::gen_rocksdb_options; use crate::schema::tables::{ BatchByHash, BatchByNumber, CommitmentsByNumber, EventByKey, EventByNumber, L2RangeByL1Height, - L2Witness, LastSequencerCommitmentSent, LastSequencerCommitmentSentL2, LastStateDiff, - ProofBySlotNumber, ProverLastScannedSlot, SlotByHash, SlotByNumber, SoftBatchByHash, - SoftBatchByNumber, SoftConfirmationStatus, TxByHash, TxByNumber, VerifiedProofsBySlotNumber, - LEDGER_TABLES, + L2Witness, LastSequencerCommitmentSent, LastStateDiff, ProofBySlotNumber, + ProverLastScannedSlot, SlotByHash, SlotByNumber, SoftBatchByHash, SoftBatchByNumber, + SoftConfirmationStatus, TxByHash, TxByNumber, VerifiedProofsBySlotNumber, LEDGER_TABLES, }; use crate::schema::types::{ split_tx_for_storage, BatchNumber, EventNumber, L2HeightRange, SlotNumber, StoredBatch, @@ -463,23 +462,7 @@ impl LedgerDB { let mut schema_batch = SchemaBatch::new(); schema_batch - .put::(&(), &l2_height) - .unwrap(); - self.db.write_schemas(schema_batch)?; - - Ok(()) - } - - /// Used by the sequencer to record that it has committed to soft confirmations on a given L2 height - #[instrument(level = "trace", skip(self), err, ret)] - pub fn set_last_sequencer_commitment_l1_height( - &self, - l1_height: SlotNumber, - ) -> Result<(), anyhow::Error> { - let mut schema_batch = SchemaBatch::new(); - - schema_batch - .put::(&(), &l1_height) + .put::(&(), &l2_height) .unwrap(); self.db.write_schemas(schema_batch)?; @@ -543,20 +526,11 @@ impl LedgerDB { } } - /// Get the most recent committed batch - /// Returns L1 height, which means the corresponding L2 heights - /// were committed. - /// Called by the sequencer. - #[instrument(level = "trace", skip(self), err, ret)] - pub fn get_last_sequencer_commitment_l1_height(&self) -> anyhow::Result> { - self.db.get::(&()) - } - /// Get the most recent committed batch /// Returns L2 height. #[instrument(level = "trace", skip(self), err, ret)] pub fn get_last_sequencer_commitment_l2_height(&self) -> anyhow::Result> { - self.db.get::(&()) + self.db.get::(&()) } /// Get L2 height range for a given L1 height. diff --git a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs index 66fca858a..157ddfc21 100644 --- a/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs +++ b/crates/sovereign-sdk/full-node/db/sov-db/src/schema/tables.rs @@ -59,7 +59,6 @@ pub const LEDGER_TABLES: &[&str] = &[ L2Witness::table_name(), LastStateDiff::table_name(), LastSequencerCommitmentSent::table_name(), - LastSequencerCommitmentSentL2::table_name(), ProverLastScannedSlot::table_name(), BatchByHash::table_name(), BatchByNumber::table_name(), @@ -264,12 +263,7 @@ define_table_with_default_codec!( define_table_with_seek_key_codec!( /// Sequencer uses this table to store the last commitment it sent - (LastSequencerCommitmentSent) () => SlotNumber -); - -define_table_with_seek_key_codec!( - /// Sequencer uses this table to store the last commitment it sent - (LastSequencerCommitmentSentL2) () => BatchNumber + (LastSequencerCommitmentSent) () => BatchNumber ); define_table_with_seek_key_codec!(