diff --git a/bin/citrea/tests/all_tests.rs b/bin/citrea/tests/all_tests.rs index 17d6e06d3..f42b8d590 100644 --- a/bin/citrea/tests/all_tests.rs +++ b/bin/citrea/tests/all_tests.rs @@ -10,3 +10,4 @@ mod test_helpers; const DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT: u64 = 1000; const DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT: usize = 10; +const DEFAULT_PROOF_WAIT_DURATION: u64 = 300; // 5 minutes diff --git a/bin/citrea/tests/e2e/mod.rs b/bin/citrea/tests/e2e/mod.rs index 5f6822bcb..3c75fffc0 100644 --- a/bin/citrea/tests/e2e/mod.rs +++ b/bin/citrea/tests/e2e/mod.rs @@ -25,9 +25,13 @@ use tokio::time::sleep; use crate::evm::{init_test_rollup, make_test_client}; use crate::test_client::TestClient; use crate::test_helpers::{ - create_default_sequencer_config, start_rollup, tempdir_with_children, NodeMode, + create_default_sequencer_config, start_rollup, tempdir_with_children, wait_for_l1_block, + wait_for_l2_block, wait_for_postgres_commitment, wait_for_prover_l1_height, NodeMode, +}; +use crate::{ + DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, + DEFAULT_PROOF_WAIT_DURATION, }; -use crate::{DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT}; struct TestConfig { seq_min_soft_confirmations: u64, @@ -208,7 +212,7 @@ async fn test_soft_batch_save() -> Result<(), anyhow::Error> { let _ = execute_blocks(&seq_test_client, &full_node_test_client, &da_db_dir.clone()).await; - sleep(Duration::from_secs(10)).await; + wait_for_l2_block(&full_node_test_client_2, 504, None).await; let seq_block = seq_test_client .eth_get_block_by_number(Some(BlockNumberOrTag::Latest)) @@ -257,7 +261,8 @@ async fn test_full_node_send_tx() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; - sleep(Duration::from_millis(2000)).await; + wait_for_l2_block(&seq_test_client, 1, None).await; + wait_for_l2_block(&full_node_test_client, 1, None).await; let sq_block = seq_test_client .eth_get_block_by_number(Some(BlockNumberOrTag::Latest)) @@ -320,6 +325,8 @@ async fn test_delayed_sync_ten_blocks() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; } + wait_for_l2_block(&seq_test_client, 10, None).await; + let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); let da_db_dir_cloned = da_db_dir.clone(); @@ -344,7 +351,7 @@ async fn test_delayed_sync_ten_blocks() -> Result<(), anyhow::Error> { let full_node_port = full_node_port_rx.await.unwrap(); let full_node_test_client = make_test_client(full_node_port).await; - sleep(Duration::from_secs(10)).await; + wait_for_l2_block(&full_node_test_client, 10, None).await; let seq_block = seq_test_client .eth_get_block_by_number(Some(BlockNumberOrTag::Number(10))) @@ -459,7 +466,7 @@ async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { } // wait for full node to sync - sleep(Duration::from_secs(5)).await; + wait_for_l2_block(&full_node_test_client, 10, None).await; // check if latest blocks are the same let seq_last_block = seq_test_client @@ -479,8 +486,6 @@ async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { // close full node rollup_task.abort(); - sleep(Duration::from_secs(2)).await; - // create 100 more blocks for _ in 0..100 { seq_test_client @@ -500,8 +505,6 @@ async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { // the lock is not released on the db directory even though the task is aborted let _ = copy_dir_recursive(&fullnode_db_dir, &storage_dir.path().join("fullnode_copy")); - sleep(Duration::from_secs(5)).await; - let da_db_dir_cloned = da_db_dir.clone(); let fullnode_db_dir = storage_dir.path().join("fullnode_copy"); // spin up the full node again with the same data where it left of only with different path to not stuck on lock @@ -523,13 +526,13 @@ async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { .await; }); - // TODO: There should be a better way to test this? - sleep(Duration::from_secs(10)).await; - let full_node_port = full_node_port_rx.await.unwrap(); let full_node_test_client = make_test_client(full_node_port).await; + wait_for_l2_block(&seq_test_client, 110, None).await; + wait_for_l2_block(&full_node_test_client, 110, None).await; + // check if the latest block state roots are same let seq_last_block = seq_test_client .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Latest)) @@ -654,7 +657,7 @@ async fn test_get_transaction_by_hash() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; // wait for the full node to sync - sleep(Duration::from_millis(2000)).await; + wait_for_l2_block(&full_node_test_client, 1, None).await; // make sure txs are in the block let seq_block = seq_test_client @@ -745,7 +748,8 @@ async fn test_soft_confirmations_on_different_blocks() -> Result<(), anyhow::Err seq_test_client.send_publish_batch_request().await; } - sleep(Duration::from_secs(2)).await; + wait_for_l2_block(&seq_test_client, 6, None).await; + wait_for_l2_block(&full_node_test_client, 6, None).await; let mut last_da_slot_height = 0; let mut last_da_slot_hash = ::SlotHash::from([0u8; 32]); @@ -784,7 +788,8 @@ async fn test_soft_confirmations_on_different_blocks() -> Result<(), anyhow::Err seq_test_client.spam_publish_batch_request().await.unwrap(); } - sleep(Duration::from_secs(2)).await; + wait_for_l2_block(&seq_test_client, 12, None).await; + wait_for_l2_block(&full_node_test_client, 12, None).await; for i in 7..=12 { let seq_soft_conf = seq_test_client @@ -862,8 +867,6 @@ async fn test_reopen_sequencer() -> Result<(), anyhow::Error> { // close sequencer seq_task.abort(); - sleep(Duration::from_secs(1)).await; - let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); // Copy the db to a new path with the same contents because @@ -876,7 +879,7 @@ async fn test_reopen_sequencer() -> Result<(), anyhow::Error> { let da_service = MockDaService::new(MockAddress::from([0; 32]), &da_db_dir); da_service.publish_test_block().await.unwrap(); - sleep(Duration::from_secs(1)).await; + wait_for_l1_block(&da_service, 1, None).await; let sequencer_db_dir = storage_dir.path().join("sequencer_copy"); let da_db_dir_cloned = da_db_dir.clone(); @@ -916,6 +919,8 @@ async fn test_reopen_sequencer() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; seq_test_client.send_publish_batch_request().await; + wait_for_l2_block(&seq_test_client, 2, None).await; + assert_eq!( seq_test_client .eth_get_block_by_number(Some(BlockNumberOrTag::Latest)) @@ -995,7 +1000,7 @@ async fn execute_blocks( sequencer_client.spam_publish_batch_request().await.unwrap(); } - sleep(Duration::from_secs(1)).await; + wait_for_l2_block(sequencer_client, 204, None).await; } let da_service = MockDaService::new(MockAddress::from([0; 32]), da_db_dir); @@ -1013,7 +1018,8 @@ async fn execute_blocks( } } - sleep(Duration::from_millis(5000)).await; + wait_for_l2_block(sequencer_client, 504, None).await; + wait_for_l2_block(full_node_client, 504, None).await; let seq_last_block = sequencer_client .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Latest)) @@ -1060,14 +1066,14 @@ async fn test_soft_confirmations_status_one_l1() -> Result<(), anyhow::Error> { // TODO check status=trusted - sleep(Duration::from_secs(2)).await; + wait_for_l2_block(&full_node_test_client, 6, None).await; // publish new da block da_service.publish_test_block().await.unwrap(); seq_test_client.send_publish_batch_request().await; // TODO https://github.com/chainwayxyz/citrea/issues/214 seq_test_client.send_publish_batch_request().await; // TODO https://github.com/chainwayxyz/citrea/issues/214 - sleep(Duration::from_secs(2)).await; + wait_for_l2_block(&full_node_test_client, 8, None).await; // now retrieve confirmation status from the sequencer and full node and check if they are the same for i in 1..=6 { @@ -1111,7 +1117,7 @@ async fn test_soft_confirmations_status_two_l1() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; } - sleep(Duration::from_secs(2)).await; + wait_for_l2_block(&seq_test_client, 2, None).await; // publish new da block da_service.publish_test_block().await.unwrap(); @@ -1120,6 +1126,8 @@ async fn test_soft_confirmations_status_two_l1() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; } + wait_for_l2_block(&full_node_test_client, 7, None).await; + // now retrieve confirmation status from the sequencer and full node and check if they are the same for i in 1..=2 { let status_node = full_node_test_client @@ -1135,7 +1143,7 @@ async fn test_soft_confirmations_status_two_l1() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; seq_test_client.send_publish_batch_request().await; - sleep(Duration::from_secs(2)).await; + wait_for_l2_block(&full_node_test_client, 9, None).await; // Check that these L2 blocks are bounded on different L1 block let mut batch_infos = vec![]; @@ -1243,8 +1251,6 @@ async fn test_prover_sync_with_commitments() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; } - sleep(Duration::from_secs(2)).await; - // prover should not have any blocks saved assert_eq!(prover_node_test_client.eth_block_number().await, 0); @@ -1258,28 +1264,30 @@ async fn test_prover_sync_with_commitments() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; // wait here until we see from prover's rpc that it finished proving - while prover_node_test_client - .prover_get_last_scanned_l1_height() - .await - != 3 - { - // sleep 2 - sleep(Duration::from_secs(2)).await; - } - sleep(Duration::from_secs(4)).await; + wait_for_prover_l1_height( + &prover_node_test_client, + 3, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; + // prover should have synced all 4 l2 blocks assert_eq!(prover_node_test_client.eth_block_number().await, 4); seq_test_client.send_publish_batch_request().await; - sleep(Duration::from_secs(3)).await; - // Still should have 4 blokcs there are no commitments yet + wait_for_prover_l1_height( + &prover_node_test_client, + 4, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; assert_eq!(prover_node_test_client.eth_block_number().await, 4); seq_test_client.send_publish_batch_request().await; seq_test_client.send_publish_batch_request().await; - sleep(Duration::from_secs(3)).await; + // Still should have 4 blokcs there are no commitments yet assert_eq!(prover_node_test_client.eth_block_number().await, 4); da_service.publish_test_block().await.unwrap(); @@ -1288,15 +1296,13 @@ async fn test_prover_sync_with_commitments() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; // wait here until we see from prover's rpc that it finished proving - while prover_node_test_client - .prover_get_last_scanned_l1_height() - .await - != 8 - { - // sleep 2 - sleep(Duration::from_secs(2)).await; - } - sleep(Duration::from_secs(4)).await; + wait_for_prover_l1_height( + &prover_node_test_client, + 8, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; + // Should now have 8 blocks = 2 commitments of blocks 1-4 and 5-9 // there is an extra soft confirmation due to the prover publishing a proof. This causes // a new MockDa block, which in turn causes the sequencer to publish an extra soft confirmation @@ -1388,8 +1394,6 @@ async fn test_reopen_prover() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; } - sleep(Duration::from_secs(2)).await; - // prover should not have any blocks saved assert_eq!(prover_node_test_client.eth_block_number().await, 0); @@ -1403,14 +1407,12 @@ async fn test_reopen_prover() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; // wait here until we see from prover's rpc that it finished proving - while prover_node_test_client - .prover_get_last_scanned_l1_height() - .await - != 5 - { - // sleep 2 - sleep(Duration::from_secs(2)).await; - } + wait_for_prover_l1_height( + &prover_node_test_client, + 5, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; // prover should have synced all 4 l2 blocks assert_eq!(prover_node_test_client.eth_block_number().await, 4); @@ -1445,19 +1447,13 @@ async fn test_reopen_prover() -> Result<(), anyhow::Error> { let prover_node_port = prover_node_port_rx.await.unwrap(); let prover_node_test_client = make_test_client(prover_node_port).await; - sleep(Duration::from_secs(2)).await; - seq_test_client.send_publish_batch_request().await; - sleep(Duration::from_secs(3)).await; - - // Still should have 4 blokcs there are no commitments yet + // Still should have 4 blocks there are no commitments yet assert_eq!(prover_node_test_client.eth_block_number().await, 4); prover_node_task.abort(); - sleep(Duration::from_secs(2)).await; - seq_test_client.send_publish_batch_request().await; seq_test_client.send_publish_batch_request().await; @@ -1489,7 +1485,6 @@ async fn test_reopen_prover() -> Result<(), anyhow::Error> { let prover_node_port = prover_node_port_rx.await.unwrap(); let prover_node_test_client = make_test_client(prover_node_port).await; - sleep(Duration::from_secs(3)).await; // Still should have 4 blokcs there are no commitments yet assert_eq!(prover_node_test_client.eth_block_number().await, 4); da_service.publish_test_block().await.unwrap(); @@ -1498,14 +1493,12 @@ async fn test_reopen_prover() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; // wait here until we see from prover's rpc that it finished proving - while prover_node_test_client - .prover_get_last_scanned_l1_height() - .await - != 8 - { - // sleep 2 - sleep(Duration::from_secs(2)).await; - } + wait_for_prover_l1_height( + &prover_node_test_client, + 8, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; // Should now have 8 blocks = 2 commitments of blocks 1-4 and 5-9 // there is an extra soft confirmation due to the prover publishing a proof. This causes @@ -1515,7 +1508,6 @@ async fn test_reopen_prover() -> Result<(), anyhow::Error> { // TODO: Also test with multiple commitments in single Mock DA Block seq_task.abort(); prover_node_task.abort(); - Ok(()) } @@ -1550,17 +1542,17 @@ async fn test_system_transactons() -> Result<(), anyhow::Error> { .await; // publish some blocks with system transactions - for _ in 0..10 { + for i in 0..10 { for _ in 0..5 { seq_test_client.spam_publish_batch_request().await.unwrap(); } + wait_for_l2_block(&seq_test_client, 5 * (i + 1), None).await; da_service.publish_test_block().await.unwrap(); } seq_test_client.send_publish_batch_request().await; - - sleep(Duration::from_secs(5)).await; + wait_for_l2_block(&full_node_test_client, 51, None).await; // check block 1-6-11-16-21-26-31-36-41-46-51 has system transactions for i in 0..=10 { @@ -1741,7 +1733,7 @@ async fn test_system_tx_effect_on_block_gas_limit() -> Result<(), anyhow::Error> let last_in_receipt = last_in_tx.unwrap().await.unwrap().unwrap(); - sleep(Duration::from_secs(2)).await; + wait_for_l2_block(&seq_test_client, 1, None).await; let initial_soft_batch = seq_test_client .ledger_get_soft_batch_by_number::(1) @@ -1874,8 +1866,8 @@ async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); let config1 = sequencer_config.clone(); - let fullnode_db_dir_cloned = fullnode_db_dir.clone(); let da_db_dir_cloned = da_db_dir.clone(); + let fullnode_db_dir_cloned = fullnode_db_dir.clone(); let full_node_task = tokio::spawn(async move { start_rollup( full_node_port_tx, @@ -1912,22 +1904,19 @@ async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; // wait for sync - sleep(Duration::from_secs(2)).await; + wait_for_l2_block(&full_node_test_client, 5, None).await; // should be synced assert_eq!(full_node_test_client.eth_block_number().await, 5); // assume sequencer craashed seq_task.abort(); - sleep(Duration::from_secs(2)).await; let commitments = db_test_client.get_all_commitments().await.unwrap(); assert_eq!(commitments.len(), 1); full_node_task.abort(); - sleep(Duration::from_secs(2)).await; - let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); // Copy the db to a new path with the same contents because @@ -1935,7 +1924,6 @@ async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { let _ = copy_dir_recursive(&fullnode_db_dir, &storage_dir.path().join("full_node_copy")); let sequencer_db_dir = storage_dir.path().join("full_node_copy"); - sleep(Duration::from_secs(1)).await; let config1 = sequencer_config.clone(); // Start the full node as sequencer @@ -1961,7 +1949,7 @@ async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { let seq_test_client = make_test_client(seq_port).await; - sleep(Duration::from_secs(5)).await; + wait_for_l2_block(&seq_test_client, 5, None).await; assert_eq!(seq_test_client.eth_block_number().await as u64, 5); @@ -1973,7 +1961,13 @@ async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { // new commitment will be sent here, it should send between 2 and 3 should not include 1 seq_test_client.send_publish_batch_request().await; - sleep(Duration::from_secs(5)).await; + wait_for_postgres_commitment( + &db_test_client, + 2, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; + let commitments = db_test_client.get_all_commitments().await.unwrap(); assert_eq!(commitments.len(), 2); assert_eq!(commitments[0].l1_start_height, 1); @@ -2073,7 +2067,7 @@ async fn transaction_failing_on_l1_is_removed_from_mempool() -> Result<(), anyho assert!(tx_from_mempool.is_none()); assert_eq!(soft_confirmation.txs.unwrap().len(), 1); // TODO: if we can also remove the tx from soft confirmation, that'd be very efficient - sleep(Duration::from_secs(2)).await; + wait_for_l2_block(&full_node_test_client, block.number.unwrap().as_u64(), None).await; let block_from_full_node = full_node_test_client .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Latest)) @@ -2239,7 +2233,6 @@ async fn sequencer_crash_restore_mempool() -> Result<(), anyhow::Error> { assert_eq!(txs.len(), 0); seq_task.abort(); - Ok(()) } @@ -2252,7 +2245,8 @@ async fn test_db_get_proof() { let prover_db_dir = storage_dir.path().join("prover").to_path_buf(); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); - let db_test_client = PostgresConnector::new_test_client("test_db_get_proof".to_owned()) + let psql_db_name = "test_db_get_proof".to_string(); + let db_test_client = PostgresConnector::new_test_client(psql_db_name.clone()) .await .unwrap(); @@ -2291,9 +2285,7 @@ async fn test_db_get_proof() { Some(ProverConfig { proving_mode: sov_stf_runner::ProverGuestRunConfig::Execute, proof_sampling_number: 0, - db_config: Some( - SharedBackupDbConfig::default().set_db_name("test_db_get_proof".to_owned()), - ), + db_config: Some(SharedBackupDbConfig::default().set_db_name(psql_db_name)), }), NodeMode::Prover(seq_port), prover_db_dir, @@ -2312,7 +2304,6 @@ async fn test_db_get_proof() { let prover_node_test_client = make_test_client(prover_node_port).await; da_service.publish_test_block().await.unwrap(); - sleep(Duration::from_secs(1)).await; test_client.send_publish_batch_request().await; test_client.send_publish_batch_request().await; @@ -2326,22 +2317,17 @@ async fn test_db_get_proof() { // da_service.publish_test_block().await.unwrap(); // wait here until we see from prover's rpc that it finished proving - while prover_node_test_client - .prover_get_last_scanned_l1_height() - .await - != 5 - { - // sleep 2 - sleep(Duration::from_secs(2)).await; - } - sleep(Duration::from_secs(4)).await; + wait_for_prover_l1_height( + &prover_node_test_client, + 5, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; let ledger_proof = prover_node_test_client .ledger_get_proof_by_slot_height(4) .await; - sleep(Duration::from_secs(4)).await; - let db_proofs = db_test_client.get_all_proof_data().await.unwrap(); assert_eq!(db_proofs.len(), 1); @@ -2463,7 +2449,7 @@ async fn full_node_verify_proof_and_store() { let full_node_test_client = make_test_client(full_node_port).await; da_service.publish_test_block().await.unwrap(); - sleep(Duration::from_secs(1)).await; + wait_for_l1_block(&da_service, 1, None).await; test_client.send_publish_batch_request().await; test_client.send_publish_batch_request().await; @@ -2477,15 +2463,12 @@ async fn full_node_verify_proof_and_store() { // da_service.publish_test_block().await.unwrap(); // wait here until we see from prover's rpc that it finished proving - while prover_node_test_client - .prover_get_last_scanned_l1_height() - .await - != 5 - { - // sleep 2 - sleep(Duration::from_secs(2)).await; - } - sleep(Duration::from_secs(4)).await; + wait_for_prover_l1_height( + &prover_node_test_client, + 5, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; let commitments = prover_node_test_client .ledger_get_sequencer_commitments_on_slot_by_number(4) @@ -2519,11 +2502,13 @@ async fn full_node_verify_proof_and_store() { .ledger_get_proof_by_slot_height(4) .await; - // the proof will be in l1 block #5 because prover publishes it after the commitment and in mock da submitting proof and commitments creates a new block + // The proof will be in l1 block #5 because prover publishes it after the commitment and + // in mock da submitting proof and commitments creates a new block. // For full node to see the proof, we publish another l2 block and now it will check #5 l1 block test_client.send_publish_batch_request().await; - sleep(Duration::from_secs(2)).await; + wait_for_l2_block(&full_node_test_client, 7, None).await; + wait_for_l1_block(&da_service, 5, None).await; // So the full node should see the proof in block 5 let full_node_proof = full_node_test_client @@ -2563,11 +2548,13 @@ async fn test_all_flow() { // citrea::initialize_logging(); let storage_dir = tempdir_with_children(&["DA", "sequencer", "prover", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); let prover_db_dir = storage_dir.path().join("prover").to_path_buf(); let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); - let da_db_dir = storage_dir.path().join("DA").to_path_buf(); - let db_test_client = PostgresConnector::new_test_client("test_all_flow".to_owned()) + + let psql_db_name = "test_all_flow".to_owned(); + let db_test_client = PostgresConnector::new_test_client(psql_db_name.clone()) .await .unwrap(); @@ -2606,9 +2593,7 @@ async fn test_all_flow() { Some(ProverConfig { proving_mode: sov_stf_runner::ProverGuestRunConfig::Execute, proof_sampling_number: 0, - db_config: Some( - SharedBackupDbConfig::default().set_db_name("test_all_flow".to_owned()), - ), + db_config: Some(SharedBackupDbConfig::default().set_db_name(psql_db_name)), }), NodeMode::Prover(seq_port), prover_db_dir, @@ -2654,7 +2639,7 @@ async fn test_all_flow() { let full_node_test_client = make_test_client(full_node_port).await; da_service.publish_test_block().await.unwrap(); - sleep(Duration::from_secs(1)).await; + wait_for_l1_block(&da_service, 1, None).await; test_client.send_publish_batch_request().await; @@ -2676,6 +2661,7 @@ async fn test_all_flow() { .await .unwrap(); test_client.send_publish_batch_request().await; + da_service.publish_test_block().await.unwrap(); // submits with new da block test_client.send_publish_batch_request().await; @@ -2684,15 +2670,12 @@ async fn test_all_flow() { // da_service.publish_test_block().await.unwrap(); // wait here until we see from prover's rpc that it finished proving - while prover_node_test_client - .prover_get_last_scanned_l1_height() - .await - != 5 - { - // sleep 2 - sleep(Duration::from_secs(2)).await; - } - sleep(Duration::from_secs(4)).await; + wait_for_prover_l1_height( + &prover_node_test_client, + 5, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; let commitments = prover_node_test_client .ledger_get_sequencer_commitments_on_slot_by_number(4) @@ -2758,6 +2741,8 @@ async fn test_all_flow() { full_node_proof[0].state_transition ); + wait_for_l2_block(&full_node_test_client, 5, None).await; + full_node_test_client .ledger_get_soft_confirmation_status(5) .await @@ -2806,15 +2791,12 @@ async fn test_all_flow() { test_client.send_publish_batch_request().await; // wait here until we see from prover's rpc that it finished proving - while prover_node_test_client - .prover_get_last_scanned_l1_height() - .await - != 8 - { - // sleep 2 - sleep(Duration::from_secs(2)).await; - } - sleep(Duration::from_secs(4)).await; + wait_for_prover_l1_height( + &prover_node_test_client, + 8, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; let commitments = prover_node_test_client .ledger_get_sequencer_commitments_on_slot_by_number(7) @@ -2842,6 +2824,8 @@ async fn test_all_flow() { // let full node see the proof test_client.send_publish_batch_request().await; + wait_for_l2_block(&full_node_test_client, 8, None).await; + sleep(Duration::from_secs(2)).await; let full_node_proof_data = full_node_test_client diff --git a/bin/citrea/tests/sequencer_commitments/mod.rs b/bin/citrea/tests/sequencer_commitments/mod.rs index 24faf5c53..46b021e43 100644 --- a/bin/citrea/tests/sequencer_commitments/mod.rs +++ b/bin/citrea/tests/sequencer_commitments/mod.rs @@ -10,18 +10,18 @@ use sov_modules_api::{BlobReaderTrait, SignedSoftConfirmationBatch}; use sov_rollup_interface::da::DaData; use sov_rollup_interface::services::da::DaService; use sov_stf_runner::ProverConfig; -use tokio::time::sleep; use crate::evm::make_test_client; use crate::test_client::TestClient; use crate::test_helpers::{ - create_default_sequencer_config, start_rollup, tempdir_with_children, NodeMode, + create_default_sequencer_config, start_rollup, tempdir_with_children, wait_for_l1_block, + wait_for_l2_block, wait_for_postgres_commitment, wait_for_prover_l1_height, NodeMode, }; -use crate::DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT; +use crate::{DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, DEFAULT_PROOF_WAIT_DURATION}; #[tokio::test] async fn sequencer_sends_commitments_to_da_layer() { - // citrea::initialize_logging(); + citrea::initialize_logging(); let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); let da_db_dir = db_dir.path().join("DA").to_path_buf(); @@ -105,7 +105,8 @@ async fn sequencer_sends_commitments_to_da_layer() { da_service.publish_test_block().await.unwrap(); test_client.send_publish_batch_request().await; - sleep(Duration::from_secs(1)).await; + + wait_for_l2_block(&test_client, 5, None).await; let start_l2_block: u64 = end_l2_block + 1; let end_l2_block: u64 = end_l2_block + 5; // can only be the block before the one comitment landed in @@ -252,10 +253,15 @@ async fn check_commitment_in_offchain_db() { // new da block da_service.publish_test_block().await.unwrap(); - // commtiment should be published with this call + // commitment should be published with this call test_client.send_publish_batch_request().await; - sleep(Duration::from_secs(5)).await; + wait_for_postgres_commitment( + &db_test_client, + 1, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; let commitments = db_test_client.get_all_commitments().await.unwrap(); assert_eq!(commitments.len(), 1); @@ -323,7 +329,6 @@ async fn test_ledger_get_commitments_on_slot() { let full_node_test_client = make_test_client(full_node_port).await; da_service.publish_test_block().await.unwrap(); - sleep(Duration::from_secs(1)).await; test_client.send_publish_batch_request().await; test_client.send_publish_batch_request().await; @@ -335,7 +340,8 @@ async fn test_ledger_get_commitments_on_slot() { // full node gets the commitment test_client.send_publish_batch_request().await; // da_service.publish_test_block().await.unwrap(); - sleep(Duration::from_secs(4)).await; + + wait_for_l2_block(&full_node_test_client, 6, None).await; let commitments = full_node_test_client .ledger_get_sequencer_commitments_on_slot_by_number(4) @@ -371,7 +377,7 @@ async fn test_ledger_get_commitments_on_slot() { #[tokio::test] async fn test_ledger_get_commitments_on_slot_prover() { - // citrea::initialize_logging(); + citrea::initialize_logging(); let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); let da_db_dir = db_dir.path().join("DA").to_path_buf(); @@ -431,7 +437,7 @@ async fn test_ledger_get_commitments_on_slot_prover() { let prover_node_test_client = make_test_client(prover_node_port).await; da_service.publish_test_block().await.unwrap(); - sleep(Duration::from_secs(1)).await; + wait_for_l1_block(&da_service, 1, None).await; test_client.send_publish_batch_request().await; test_client.send_publish_batch_request().await; @@ -445,15 +451,12 @@ async fn test_ledger_get_commitments_on_slot_prover() { // da_service.publish_test_block().await.unwrap(); // wait here until we see from prover's rpc that it finished proving - while prover_node_test_client - .prover_get_last_scanned_l1_height() - .await - != 5 - { - // sleep 2 - sleep(Duration::from_secs(2)).await; - } - sleep(Duration::from_secs(4)).await; + wait_for_prover_l1_height( + &prover_node_test_client, + 5, + Some(Duration::from_secs(DEFAULT_PROOF_WAIT_DURATION)), + ) + .await; let commitments = prover_node_test_client .ledger_get_sequencer_commitments_on_slot_by_number(4) diff --git a/bin/citrea/tests/test_helpers.rs b/bin/citrea/tests/test_helpers/mod.rs similarity index 64% rename from bin/citrea/tests/test_helpers.rs rename to bin/citrea/tests/test_helpers/mod.rs index a5ca9e844..8016dd867 100644 --- a/bin/citrea/tests/test_helpers.rs +++ b/bin/citrea/tests/test_helpers/mod.rs @@ -1,11 +1,14 @@ use std::net::SocketAddr; use std::path::{Path, PathBuf}; +use std::time::{Duration, SystemTime}; use citrea::MockDemoRollup; use citrea_sequencer::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; +use reth_rpc_types::BlockNumberOrTag; use rollup_constants::TEST_PRIVATE_KEY; -use sov_mock_da::{MockAddress, MockDaConfig}; +use shared_backup_db::PostgresConnector; +use sov_mock_da::{MockAddress, MockDaConfig, MockDaService}; use sov_modules_api::default_signature::private_key::DefaultPrivateKey; use sov_modules_api::PrivateKey; use sov_modules_rollup_blueprint::RollupBlueprint; @@ -14,7 +17,10 @@ use sov_stf_runner::{ }; use tempfile::TempDir; use tokio::sync::oneshot; -use tracing::warn; +use tokio::time::sleep; +use tracing::{debug, warn}; + +use crate::test_client::TestClient; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum NodeMode { @@ -156,8 +162,96 @@ pub fn tempdir_with_children(children: &[&str]) -> TempDir { let db_dir = tempfile::tempdir().expect("Could not create temporary directory for test"); for child in children { let p = db_dir.path().join(child); - std::fs::create_dir(p).unwrap(); + if !std::path::Path::new(&p).exists() { + std::fs::create_dir(p).unwrap(); + } } db_dir } + +pub async fn wait_for_l2_block(sequencer_client: &TestClient, num: u64, timeout: Option) { + let start = SystemTime::now(); + let timeout = timeout.unwrap_or(Duration::from_secs(30)); // Default 30 seconds timeout + loop { + debug!("Waiting for soft batch {}", num); + let latest_block = sequencer_client + .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Latest)) + .await; + if latest_block.number >= Some(num.into()) { + break; + } + + let now = SystemTime::now(); + if start + timeout <= now { + panic!("Timeout"); + } + + sleep(Duration::from_secs(1)).await; + } +} + +pub async fn wait_for_prover_l1_height( + prover_client: &TestClient, + num: u64, + timeout: Option, +) { + let start = SystemTime::now(); + let timeout = timeout.unwrap_or(Duration::from_secs(30)); // Default 30 seconds timeout + loop { + debug!("Waiting for prover height {}", num); + let latest_block = prover_client.prover_get_last_scanned_l1_height().await; + if latest_block >= num { + break; + } + + let now = SystemTime::now(); + if start + timeout <= now { + panic!("Timeout"); + } + + sleep(Duration::from_secs(1)).await; + } +} + +pub async fn wait_for_l1_block(da_service: &MockDaService, num: u64, timeout: Option) { + let start = SystemTime::now(); + let timeout = timeout.unwrap_or(Duration::from_secs(30)); // Default 30 seconds timeout + loop { + debug!("Waiting for L1 block height {}", num); + let da_block = da_service.get_height().await; + if da_block >= num { + break; + } + + let now = SystemTime::now(); + if start + timeout <= now { + panic!("Timeout"); + } + + sleep(Duration::from_secs(1)).await; + } +} + +pub async fn wait_for_postgres_commitment( + db_test_client: &PostgresConnector, + num: usize, + timeout: Option, +) { + let start = SystemTime::now(); + let timeout = timeout.unwrap_or(Duration::from_secs(30)); // Default 30 seconds timeout + loop { + debug!("Waiting for {} L1 commitments to be published", num); + let commitments = db_test_client.get_all_commitments().await.unwrap().len(); + if commitments >= num { + break; + } + + let now = SystemTime::now(); + if start + timeout <= now { + panic!("Timeout"); + } + + sleep(Duration::from_secs(1)).await; + } +} diff --git a/crates/shared-backup-db/src/postgres_connector.rs b/crates/shared-backup-db/src/postgres_connector.rs index fa183647f..8402705ec 100644 --- a/crates/shared-backup-db/src/postgres_connector.rs +++ b/crates/shared-backup-db/src/postgres_connector.rs @@ -104,7 +104,8 @@ impl PostgresConnector { drop(pool); //connect to new db - cfg.dbname(db_name.as_str()); + cfg.dbname(&db_name); + let mgr = Manager::from_config(cfg, NoTls, mgr_config); let test_pool = Pool::builder(mgr).max_size(16).build().unwrap(); let test_client = test_pool.get().await.unwrap(); diff --git a/crates/sovereign-sdk/adapters/mock-da/src/service.rs b/crates/sovereign-sdk/adapters/mock-da/src/service.rs index eedb700c4..c660937ee 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/service.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/service.rs @@ -171,6 +171,11 @@ impl MockDaService { Ok(()) } + /// Returns the latest block number + pub async fn get_height(&self) -> u64 { + self.blocks.lock().await.len() as u64 + } + async fn get_last_finalized_height(&self) -> u64 { self.blocks .lock()