From 3650b631a39d7eb134c588f76cedefd846130907 Mon Sep 17 00:00:00 2001 From: Rakan Al-Huneiti Date: Tue, 28 May 2024 12:15:14 +0300 Subject: [PATCH] Create tempdir for tests (#626) * Use temp_dir to create a temporary directory for test db * Make db_connector receive path * Pass db path to sov-stf-runner * Pass path down * Pass db path for DB * Separate rollup from da storage paths * Make em pass * Fix some more * Pass db name to test * Update prover tests * Remove usage of thread name * Fix rollup config * Create db_directory if it doesn't exist * Add pg connector db name param * Make it compile * Cleanup * Clippy * Use da_db in execute_blocks * Copy fullnode db directory * Always use passed db name * Not multi-threaded yet * Fix clippy * Restore prints * Remove unwrap * Don't use tempdir if not using service * Resolve PR feedback * Quote path * Fix clippy * Add da db to mock configs * Add db-path where it's missing --- Cargo.lock | 1 + .../mock-dockerized/rollup_config.toml | 1 + bin/citrea/configs/mock/rollup_config.toml | 1 + .../configs/mock/sequencer_rollup_config.toml | 1 + bin/citrea/configs/mocknet/rollup_config.toml | 1 + .../mocknet/sequencer_rollup_config.toml | 1 + bin/citrea/src/mock_rollup.rs | 2 +- bin/citrea/tests/e2e/mod.rs | 451 +++++++++++++----- bin/citrea/tests/evm/archival_state.rs | 10 +- bin/citrea/tests/evm/gas_price.rs | 12 +- bin/citrea/tests/evm/mod.rs | 37 +- bin/citrea/tests/evm/tracing.rs | 10 +- bin/citrea/tests/mempool/mod.rs | 41 +- bin/citrea/tests/sequencer_commitments/mod.rs | 62 ++- .../soft_confirmation_rule_enforcer/mod.rs | 14 +- bin/citrea/tests/test_helpers.rs | 41 +- crates/sequencer/src/rpc.rs | 14 - crates/shared-backup-db/src/config.rs | 5 + crates/shared-backup-db/src/lib.rs | 1 - .../src/postgres_connector.rs | 27 +- crates/shared-backup-db/src/tables.rs | 6 - crates/shared-backup-db/src/utils.rs | 19 - .../sovereign-sdk/adapters/mock-da/Cargo.toml | 1 + .../adapters/mock-da/src/db_connector.rs | 71 +-- .../adapters/mock-da/src/service.rs | 48 +- .../adapters/mock-da/src/types/mod.rs | 3 + .../full-node/sov-sequencer/src/lib.rs | 10 +- .../full-node/sov-stf-runner/src/config.rs | 17 +- .../sov-stf-runner/tests/prover_tests.rs | 7 +- .../tests/runner_initialization_tests.rs | 21 +- .../tests/runner_reorg_tests.rs | 14 +- 31 files changed, 627 insertions(+), 323 deletions(-) delete mode 100644 crates/shared-backup-db/src/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 4ffbd2108..14a1b61e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9012,6 +9012,7 @@ dependencies = [ "serde_json", "sha2 0.10.8", "sov-rollup-interface", + "tempfile", "tokio", "tokio-stream", "tracing", diff --git a/bin/citrea/configs/mock-dockerized/rollup_config.toml b/bin/citrea/configs/mock-dockerized/rollup_config.toml index c91dd7466..e720dfd53 100644 --- a/bin/citrea/configs/mock-dockerized/rollup_config.toml +++ b/bin/citrea/configs/mock-dockerized/rollup_config.toml @@ -5,6 +5,7 @@ prover_da_pub_key = "" [da] sender_address = "0000000000000000000000000000000000000000000000000000000000000000" +db_path = "da-db" [storage] # The path to the rollup's data directory. Paths that do not begin with `/` are interpreted as relative paths. diff --git a/bin/citrea/configs/mock/rollup_config.toml b/bin/citrea/configs/mock/rollup_config.toml index b94a45493..2c5cbd258 100644 --- a/bin/citrea/configs/mock/rollup_config.toml +++ b/bin/citrea/configs/mock/rollup_config.toml @@ -5,6 +5,7 @@ prover_da_pub_key = "" [da] sender_address = "0000000000000000000000000000000000000000000000000000000000000000" +db_path = "da-db" [storage] # The path to the rollup's data directory. Paths that do not begin with `/` are interpreted as relative paths. diff --git a/bin/citrea/configs/mock/sequencer_rollup_config.toml b/bin/citrea/configs/mock/sequencer_rollup_config.toml index acbb930da..c4f8335ec 100644 --- a/bin/citrea/configs/mock/sequencer_rollup_config.toml +++ b/bin/citrea/configs/mock/sequencer_rollup_config.toml @@ -5,6 +5,7 @@ prover_da_pub_key = "" [da] sender_address = "0000000000000000000000000000000000000000000000000000000000000000" +db_path = "da-db" [storage] # The path to the rollup's data directory. Paths that do not begin with `/` are interpreted as relative paths. diff --git a/bin/citrea/configs/mocknet/rollup_config.toml b/bin/citrea/configs/mocknet/rollup_config.toml index 81091fdb6..1bc04e8f8 100644 --- a/bin/citrea/configs/mocknet/rollup_config.toml +++ b/bin/citrea/configs/mocknet/rollup_config.toml @@ -5,6 +5,7 @@ prover_da_pub_key = "" [da] sender_address = "0000000000000000000000000000000000000000000000000000000000000000" +db_path = "da-db" [storage] # The path to the rollup's data directory. Paths that do not begin with `/` are interpreted as relative paths. diff --git a/bin/citrea/configs/mocknet/sequencer_rollup_config.toml b/bin/citrea/configs/mocknet/sequencer_rollup_config.toml index de95f383e..5dcd5a8e6 100644 --- a/bin/citrea/configs/mocknet/sequencer_rollup_config.toml +++ b/bin/citrea/configs/mocknet/sequencer_rollup_config.toml @@ -5,6 +5,7 @@ prover_da_pub_key = "" [da] sender_address = "0000000000000000000000000000000000000000000000000000000000000000" +db_path = "da-db" [storage] # The path to the rollup's data directory. Paths that do not begin with `/` are interpreted as relative paths. diff --git a/bin/citrea/src/mock_rollup.rs b/bin/citrea/src/mock_rollup.rs index 8d5156b14..7ce28fffb 100644 --- a/bin/citrea/src/mock_rollup.rs +++ b/bin/citrea/src/mock_rollup.rs @@ -79,7 +79,7 @@ impl RollupBlueprint for MockDemoRollup { &self, rollup_config: &RollupConfig, ) -> Self::DaService { - MockDaService::new(rollup_config.da.sender_address) + MockDaService::new(rollup_config.da.sender_address, &rollup_config.da.db_path) } async fn create_prover_service( diff --git a/bin/citrea/tests/e2e/mod.rs b/bin/citrea/tests/e2e/mod.rs index 26e034f1c..5f6822bcb 100644 --- a/bin/citrea/tests/e2e/mod.rs +++ b/bin/citrea/tests/e2e/mod.rs @@ -1,5 +1,5 @@ use std::fs; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; @@ -24,12 +24,17 @@ use tokio::time::sleep; use crate::evm::{init_test_rollup, make_test_client}; use crate::test_client::TestClient; -use crate::test_helpers::{create_default_sequencer_config, start_rollup, NodeMode}; +use crate::test_helpers::{ + create_default_sequencer_config, start_rollup, tempdir_with_children, NodeMode, +}; use crate::{DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT}; struct TestConfig { seq_min_soft_confirmations: u64, deposit_mempool_fetch_limit: usize, + sequencer_path: PathBuf, + fullnode_path: PathBuf, + da_path: PathBuf, } impl Default for TestConfig { @@ -37,6 +42,9 @@ impl Default for TestConfig { Self { seq_min_soft_confirmations: DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, deposit_mempool_fetch_limit: 10, + sequencer_path: PathBuf::new(), + fullnode_path: PathBuf::new(), + da_path: PathBuf::new(), } } } @@ -52,13 +60,19 @@ async fn initialize_test( ) { let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let db_path = config.da_path.clone(); + let sequencer_path = config.sequencer_path.clone(); + let fullnode_path = config.fullnode_path.clone(); + + let db_path1 = db_path.clone(); let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_path, + db_path1, config.seq_min_soft_confirmations, true, None, @@ -74,14 +88,16 @@ async fn initialize_test( let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); + let db_path2 = db_path.clone(); let full_node_task = tokio::spawn(async move { start_rollup( full_node_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::FullNode(seq_port), - None, - DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, + fullnode_path, + db_path2, + config.seq_min_soft_confirmations, true, None, None, @@ -105,17 +121,30 @@ async fn initialize_test( #[tokio::test] async fn test_soft_batch_save() -> Result<(), anyhow::Error> { - let config = TestConfig::default(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + let fullnode2_db_dir = storage_dir.path().join("full-node2").to_path_buf(); + + let config = TestConfig { + da_path: da_db_dir.clone(), + sequencer_path: sequencer_db_dir.clone(), + fullnode_path: fullnode_db_dir.clone(), + ..Default::default() + }; let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, config.seq_min_soft_confirmations, true, None, @@ -131,13 +160,15 @@ async fn test_soft_batch_save() -> Result<(), anyhow::Error> { let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let full_node_task = tokio::spawn(async move { start_rollup( full_node_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::FullNode(seq_port), - None, + fullnode_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -153,13 +184,15 @@ async fn test_soft_batch_save() -> Result<(), anyhow::Error> { let (full_node_port_tx_2, full_node_port_rx_2) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let full_node_task_2 = tokio::spawn(async move { start_rollup( full_node_port_tx_2, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::FullNode(full_node_port), - None, + fullnode2_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, false, None, @@ -173,7 +206,7 @@ async fn test_soft_batch_save() -> Result<(), anyhow::Error> { let full_node_port_2 = full_node_port_rx_2.await.unwrap(); let full_node_test_client_2 = make_test_client(full_node_port_2).await; - let _ = execute_blocks(&seq_test_client, &full_node_test_client).await; + let _ = execute_blocks(&seq_test_client, &full_node_test_client, &da_db_dir.clone()).await; sleep(Duration::from_secs(10)).await; @@ -203,8 +236,19 @@ async fn test_soft_batch_save() -> Result<(), anyhow::Error> { async fn test_full_node_send_tx() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + let (seq_test_client, full_node_test_client, seq_task, full_node_task, addr) = - initialize_test(Default::default()).await; + initialize_test(TestConfig { + da_path: da_db_dir, + sequencer_path: sequencer_db_dir, + fullnode_path: fullnode_db_dir, + ..Default::default() + }) + .await; let tx_hash = full_node_test_client .send_eth(addr, None, None, None, 0u128) @@ -237,15 +281,22 @@ async fn test_full_node_send_tx() -> Result<(), anyhow::Error> { async fn test_delayed_sync_ten_blocks() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -271,13 +322,15 @@ async fn test_delayed_sync_ten_blocks() -> Result<(), anyhow::Error> { let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let full_node_task = tokio::spawn(async move { start_rollup( full_node_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::FullNode(seq_port), - None, + fullnode_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -313,10 +366,21 @@ async fn test_delayed_sync_ten_blocks() -> Result<(), anyhow::Error> { async fn test_e2e_same_block_sync() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + let (seq_test_client, full_node_test_client, seq_task, full_node_task, _) = - initialize_test(Default::default()).await; + initialize_test(TestConfig { + sequencer_path: sequencer_db_dir, + da_path: da_db_dir.clone(), + fullnode_path: fullnode_db_dir, + ..Default::default() + }) + .await; - let _ = execute_blocks(&seq_test_client, &full_node_test_client).await; + let _ = execute_blocks(&seq_test_client, &full_node_test_client, &da_db_dir).await; seq_task.abort(); full_node_task.abort(); @@ -327,20 +391,22 @@ async fn test_e2e_same_block_sync() -> Result<(), anyhow::Error> { #[tokio::test] async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); - - // Remove temp db directories if they exist - let _ = fs::remove_dir_all(Path::new("demo_data_test_close_and_reopen_full_node_copy")); - let _ = fs::remove_dir_all(Path::new("demo_data_test_close_and_reopen_full_node")); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -355,6 +421,8 @@ async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); + let fullnode_db_dir_cloned = fullnode_db_dir.clone(); // starting full node with db path let rollup_task = tokio::spawn(async move { start_rollup( @@ -362,7 +430,8 @@ async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::FullNode(seq_port), - Some("demo_data_test_close_and_reopen_full_node"), + fullnode_db_dir_cloned, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -421,7 +490,7 @@ async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; } - let da_service = MockDaService::new(MockAddress::from([0; 32])); + let da_service = MockDaService::new(MockAddress::from([0; 32]), &da_db_dir); da_service.publish_test_block().await.unwrap(); // start full node again @@ -429,13 +498,12 @@ async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { // Copy the db to a new path with the same contents because // the lock is not released on the db directory even though the task is aborted - let _ = copy_dir_recursive( - Path::new("demo_data_test_close_and_reopen_full_node"), - Path::new("demo_data_test_close_and_reopen_full_node_copy"), - ); + let _ = copy_dir_recursive(&fullnode_db_dir, &storage_dir.path().join("fullnode_copy")); sleep(Duration::from_secs(5)).await; + let da_db_dir_cloned = da_db_dir.clone(); + let fullnode_db_dir = storage_dir.path().join("fullnode_copy"); // spin up the full node again with the same data where it left of only with different path to not stuck on lock let rollup_task = tokio::spawn(async move { start_rollup( @@ -443,7 +511,8 @@ async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::FullNode(seq_port), - Some("demo_data_test_close_and_reopen_full_node_copy"), + fullnode_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -476,9 +545,6 @@ async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { assert_eq!(seq_last_block.state_root, full_node_last_block.state_root); assert_eq!(seq_last_block.hash, full_node_last_block.hash); - fs::remove_dir_all(Path::new("demo_data_test_close_and_reopen_full_node_copy")).unwrap(); - fs::remove_dir_all(Path::new("demo_data_test_close_and_reopen_full_node")).unwrap(); - seq_task.abort(); rollup_task.abort(); @@ -488,16 +554,22 @@ async fn test_close_and_reopen_full_node() -> Result<(), anyhow::Error> { #[tokio::test] async fn test_get_transaction_by_hash() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -512,13 +584,15 @@ async fn test_get_transaction_by_hash() -> Result<(), anyhow::Error> { let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); + let da_dir_cloned = da_db_dir.clone(); let rollup_task = tokio::spawn(async move { start_rollup( full_node_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::FullNode(seq_port), - None, + fullnode_db_dir, + da_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -650,11 +724,21 @@ async fn test_get_transaction_by_hash() -> Result<(), anyhow::Error> { #[tokio::test] async fn test_soft_confirmations_on_different_blocks() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); - let da_service = MockDaService::new(MockAddress::default()); + let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); let (seq_test_client, full_node_test_client, seq_task, full_node_task, _) = - initialize_test(Default::default()).await; + initialize_test(TestConfig { + da_path: da_db_dir.clone(), + sequencer_path: sequencer_db_dir.clone(), + fullnode_path: fullnode_db_dir.clone(), + ..Default::default() + }) + .await; // first publish a few blocks fast make it land in the same da block for _ in 1..=6 { @@ -740,20 +824,22 @@ async fn test_soft_confirmations_on_different_blocks() -> Result<(), anyhow::Err #[tokio::test] async fn test_reopen_sequencer() -> Result<(), anyhow::Error> { // open, close without publishing blokcs - // then reopen, publish some blocks without error - // Remove temp db directories if they exist - let _ = fs::remove_dir_all(Path::new("demo_data_test_reopen_sequencer_copy")); - let _ = fs::remove_dir_all(Path::new("demo_data_test_reopen_sequencer")); + let storage_dir = tempdir_with_children(&["DA", "sequencer"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let sequencer_db_dir_cloned = sequencer_db_dir.clone(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - Some("demo_data_test_reopen_sequencer"), + sequencer_db_dir_cloned, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -783,22 +869,25 @@ async fn test_reopen_sequencer() -> Result<(), anyhow::Error> { // Copy the db to a new path with the same contents because // the lock is not released on the db directory even though the task is aborted let _ = copy_dir_recursive( - Path::new("demo_data_test_reopen_sequencer"), - Path::new("demo_data_test_reopen_sequencer_copy"), + &sequencer_db_dir, + &storage_dir.path().join("sequencer_copy"), ); - let da_service = MockDaService::new(MockAddress::from([0; 32])); + let da_service = MockDaService::new(MockAddress::from([0; 32]), &da_db_dir); da_service.publish_test_block().await.unwrap(); sleep(Duration::from_secs(1)).await; + let sequencer_db_dir = storage_dir.path().join("sequencer_copy"); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - Some("demo_data_test_reopen_sequencer_copy"), + sequencer_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -837,9 +926,6 @@ async fn test_reopen_sequencer() -> Result<(), anyhow::Error> { 2 ); - fs::remove_dir_all(Path::new("demo_data_test_reopen_sequencer_copy")).unwrap(); - fs::remove_dir_all(Path::new("demo_data_test_reopen_sequencer")).unwrap(); - seq_task.abort(); Ok(()) @@ -867,6 +953,7 @@ fn copy_dir_recursive(src: &Path, dst: &Path) -> std::io::Result<()> { async fn execute_blocks( sequencer_client: &TestClient, full_node_client: &TestClient, + da_db_dir: &Path, ) -> Result<(), Box> { let (contract_address, contract) = { let contract = SimpleStorageContract::default(); @@ -911,7 +998,7 @@ async fn execute_blocks( sleep(Duration::from_secs(1)).await; } - let da_service = MockDaService::new(MockAddress::from([0; 32])); + let da_service = MockDaService::new(MockAddress::from([0; 32]), da_db_dir); da_service.publish_test_block().await.unwrap(); { @@ -949,10 +1036,18 @@ async fn execute_blocks( async fn test_soft_confirmations_status_one_l1() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); - let da_service = MockDaService::new(MockAddress::default()); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + + let da_service = MockDaService::new(MockAddress::default(), &da_db_dir); let (seq_test_client, full_node_test_client, seq_task, full_node_task, _) = initialize_test(TestConfig { + da_path: da_db_dir.clone(), + sequencer_path: sequencer_db_dir.clone(), + fullnode_path: fullnode_db_dir.clone(), seq_min_soft_confirmations: 3, deposit_mempool_fetch_limit: 10, }) @@ -994,10 +1089,18 @@ async fn test_soft_confirmations_status_one_l1() -> Result<(), anyhow::Error> { async fn test_soft_confirmations_status_two_l1() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); - let da_service = MockDaService::new(MockAddress::default()); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + + let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); let (seq_test_client, full_node_test_client, seq_task, full_node_task, _) = initialize_test(TestConfig { + da_path: da_db_dir.clone(), + sequencer_path: sequencer_db_dir.clone(), + fullnode_path: fullnode_db_dir.clone(), seq_min_soft_confirmations: 3, deposit_mempool_fetch_limit: 10, }) @@ -1076,17 +1179,24 @@ async fn test_soft_confirmations_status_two_l1() -> Result<(), anyhow::Error> { async fn test_prover_sync_with_commitments() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); - let da_service = MockDaService::new(MockAddress::default()); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "prover"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let prover_db_dir = storage_dir.path().join("prover").to_path_buf(); + + let da_service = MockDaService::new(MockAddress::default(), &da_db_dir); let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, 4, true, None, @@ -1102,6 +1212,7 @@ async fn test_prover_sync_with_commitments() -> Result<(), anyhow::Error> { let (prover_node_port_tx, prover_node_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let prover_node_task = tokio::spawn(async move { start_rollup( prover_node_port_tx, @@ -1112,7 +1223,8 @@ async fn test_prover_sync_with_commitments() -> Result<(), anyhow::Error> { proof_sampling_number: 0, }), NodeMode::Prover(seq_port), - None, + prover_db_dir, + da_db_dir_cloned, 4, true, None, @@ -1215,21 +1327,24 @@ async fn test_prover_sync_with_commitments() -> Result<(), anyhow::Error> { async fn test_reopen_prover() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); - let _ = fs::remove_dir_all(Path::new("demo_data_test_reopen_prover_copy2")); - let _ = fs::remove_dir_all(Path::new("demo_data_test_reopen_prover_copy")); - let _ = fs::remove_dir_all(Path::new("demo_data_test_reopen_prover")); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "prover"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let prover_db_dir = storage_dir.path().join("prover").to_path_buf(); - let da_service = MockDaService::new(MockAddress::default()); + let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), Some(ProverConfig::default()), NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, 4, true, None, @@ -1245,13 +1360,16 @@ async fn test_reopen_prover() -> Result<(), anyhow::Error> { let (prover_node_port_tx, prover_node_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); + let prover_db_dir_cloned = prover_db_dir.clone(); let prover_node_task = tokio::spawn(async move { start_rollup( prover_node_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), Some(ProverConfig::default()), NodeMode::Prover(seq_port), - Some("demo_data_test_reopen_prover"), + prover_db_dir_cloned, + da_db_dir_cloned, 4, true, None, @@ -1298,21 +1416,22 @@ async fn test_reopen_prover() -> Result<(), anyhow::Error> { assert_eq!(prover_node_test_client.eth_block_number().await, 4); prover_node_task.abort(); - let _ = copy_dir_recursive( - Path::new("demo_data_test_reopen_prover"), - Path::new("demo_data_test_reopen_prover_copy"), - ); + + let _ = copy_dir_recursive(&prover_db_dir, &storage_dir.path().join("prover_copy")); // Reopen prover with the new path let (prover_node_port_tx, prover_node_port_rx) = tokio::sync::oneshot::channel(); + let prover_copy_db_dir = storage_dir.path().join("prover_copy"); + let da_db_dir_cloned = da_db_dir.clone(); let prover_node_task = tokio::spawn(async move { start_rollup( prover_node_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), Some(ProverConfig::default()), NodeMode::Prover(seq_port), - Some("demo_data_test_reopen_prover_copy"), + prover_copy_db_dir, + da_db_dir_cloned, 4, true, None, @@ -1342,21 +1461,21 @@ async fn test_reopen_prover() -> Result<(), anyhow::Error> { seq_test_client.send_publish_batch_request().await; seq_test_client.send_publish_batch_request().await; - let _ = copy_dir_recursive( - Path::new("demo_data_test_reopen_prover_copy"), - Path::new("demo_data_test_reopen_prover_copy2"), - ); + let _ = copy_dir_recursive(&prover_db_dir, &storage_dir.path().join("prover_copy2")); // Reopen prover with the new path let (prover_node_port_tx, prover_node_port_rx) = tokio::sync::oneshot::channel(); + let prover_copy2_dir_cloned = storage_dir.path().join("prover_copy2"); + let da_db_dir_cloned = da_db_dir.clone(); let prover_node_task = tokio::spawn(async move { start_rollup( prover_node_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), Some(ProverConfig::default()), NodeMode::Prover(seq_port), - Some("demo_data_test_reopen_prover_copy2"), + prover_copy2_dir_cloned, + da_db_dir_cloned, 4, true, None, @@ -1397,9 +1516,6 @@ async fn test_reopen_prover() -> Result<(), anyhow::Error> { seq_task.abort(); prover_node_task.abort(); - let _ = fs::remove_dir_all(Path::new("demo_data_test_reopen_prover_copy2")); - let _ = fs::remove_dir_all(Path::new("demo_data_test_reopen_prover_copy")); - let _ = fs::remove_dir_all(Path::new("demo_data_test_reopen_prover")); Ok(()) } @@ -1412,7 +1528,12 @@ async fn test_system_transactons() -> Result<(), anyhow::Error> { let system_signer_address = Address::from_str("0xdeaddeaddeaddeaddeaddeaddeaddeaddeaddead").unwrap(); - let da_service = MockDaService::new(MockAddress::default()); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + + let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); // start rollup on da block 3 for _ in 0..3 { @@ -1420,7 +1541,13 @@ async fn test_system_transactons() -> Result<(), anyhow::Error> { } let (seq_test_client, full_node_test_client, seq_task, full_node_task, _) = - initialize_test(Default::default()).await; + initialize_test(TestConfig { + da_path: da_db_dir, + sequencer_path: sequencer_db_dir, + fullnode_path: fullnode_db_dir, + ..Default::default() + }) + .await; // publish some blocks with system transactions for _ in 0..10 { @@ -1536,7 +1663,12 @@ async fn test_system_transactons() -> Result<(), anyhow::Error> { #[tokio::test] async fn test_system_tx_effect_on_block_gas_limit() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); - let da_service = MockDaService::new(MockAddress::default()); + + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + + let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); // start rollup on da block 3 for _ in 0..3 { @@ -1545,13 +1677,15 @@ async fn test_system_tx_effect_on_block_gas_limit() -> Result<(), anyhow::Error> let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests-low-block-gas-limit"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, 4, true, None, @@ -1691,31 +1825,38 @@ fn find_subarray(haystack: &[u8], needle: &[u8]) -> Option { async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); - // open, close without publishing blokcs - // then reopen, publish some blocks without error - // Remove temp db directories if they exist - let _ = fs::remove_dir_all(Path::new("demo_data_sequencer_full_node")); - let _ = fs::remove_dir_all(Path::new("demo_data_sequencer_full_node_copy")); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); - let db_test_client = PostgresConnector::new_test_client().await.unwrap(); + let psql_db_name = "sequencer_crash_and_replace_full_node".to_owned(); + + let db_test_client = PostgresConnector::new_test_client(psql_db_name.clone()) + .await + .unwrap(); let mut sequencer_config = create_default_sequencer_config(4, Some(true), 10); - sequencer_config.db_config = Some(SharedBackupDbConfig::default()); + sequencer_config.db_config = Some(SharedBackupDbConfig::default().set_db_name(psql_db_name)); - let da_service = MockDaService::with_finality(MockAddress::from([0; 32]), 2); + let da_service = MockDaService::with_finality(MockAddress::from([0; 32]), 2, &da_db_dir); da_service.publish_test_block().await.unwrap(); let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); let config1 = sequencer_config.clone(); + + let da_db_dir_cloned = da_db_dir.clone(); + let sequencer_db_dir_cloned = sequencer_db_dir.clone(); let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir_cloned, + da_db_dir_cloned, 4, true, None, @@ -1732,13 +1873,17 @@ async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); let config1 = sequencer_config.clone(); + + let fullnode_db_dir_cloned = fullnode_db_dir.clone(); + let da_db_dir_cloned = da_db_dir.clone(); let full_node_task = tokio::spawn(async move { start_rollup( full_node_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::FullNode(seq_port), - Some("demo_data_sequencer_full_node"), + fullnode_db_dir_cloned, + da_db_dir_cloned, 4, true, None, @@ -1787,13 +1932,12 @@ async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { // Copy the db to a new path with the same contents because // the lock is not released on the db directory even though the task is aborted - let _ = copy_dir_recursive( - Path::new("demo_data_sequencer_full_node"), - Path::new("demo_data_sequencer_full_node_copy"), - ); + let _ = copy_dir_recursive(&fullnode_db_dir, &storage_dir.path().join("full_node_copy")); + let sequencer_db_dir = storage_dir.path().join("full_node_copy"); sleep(Duration::from_secs(1)).await; let config1 = sequencer_config.clone(); + // Start the full node as sequencer let seq_task = tokio::spawn(async move { start_rollup( @@ -1801,7 +1945,8 @@ async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - Some("demo_data_sequencer_full_node_copy"), + sequencer_db_dir, + da_db_dir, 4, true, None, @@ -1836,10 +1981,6 @@ async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { assert_eq!(commitments[1].l1_start_height, 2); assert_eq!(commitments[1].l1_end_height, 3); - let _ = fs::remove_dir_all(Path::new("demo_data_test_reopen_sequencer")); - let _ = fs::remove_dir_all(Path::new("demo_data_sequencer_full_node")); - let _ = fs::remove_dir_all(Path::new("demo_data_sequencer_full_node_copy")); - seq_task.abort(); Ok(()) @@ -1849,8 +1990,19 @@ async fn sequencer_crash_and_replace_full_node() -> Result<(), anyhow::Error> { async fn transaction_failing_on_l1_is_removed_from_mempool() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + let (seq_test_client, full_node_test_client, seq_task, full_node_task, _) = - initialize_test(Default::default()).await; + initialize_test(TestConfig { + da_path: da_db_dir.clone(), + sequencer_path: sequencer_db_dir.clone(), + fullnode_path: fullnode_db_dir.clone(), + ..Default::default() + }) + .await; let random_wallet = LocalWallet::new(&mut thread_rng()).with_chain_id(seq_test_client.chain_id); @@ -1940,28 +2092,41 @@ async fn sequencer_crash_restore_mempool() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266").unwrap(); - let _ = fs::remove_dir_all(Path::new("demo_data_sequencer_restore_mempool")); - let _ = fs::remove_dir_all(Path::new("demo_data_sequencer_restore_mempool_copy")); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); - let db_test_client = PostgresConnector::new_test_client().await.unwrap(); + let db_test_client = + PostgresConnector::new_test_client("sequencer_crash_restore_mempool".to_owned()) + .await + .unwrap(); let mut sequencer_config = create_default_sequencer_config(4, Some(true), 10); + sequencer_config.mempool_conf = SequencerMempoolConfig { + max_account_slots: 100, + ..Default::default() + }; + sequencer_config.db_config = Some( + SharedBackupDbConfig::default().set_db_name("sequencer_crash_restore_mempool".to_owned()), + ); - sequencer_config.db_config = Some(SharedBackupDbConfig::default()); - - let da_service = MockDaService::with_finality(MockAddress::from([0; 32]), 2); + let da_service = + MockDaService::with_finality(MockAddress::from([0; 32]), 2, &da_db_dir.clone()); da_service.publish_test_block().await.unwrap(); let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); let config1 = sequencer_config.clone(); + let da_db_dir_cloned = da_db_dir.clone(); + let sequencer_db_dir_cloned = sequencer_db_dir.clone(); let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - Some("demo_data_sequencer_restore_mempool"), + sequencer_db_dir_cloned, + da_db_dir_cloned, 4, true, None, @@ -2011,20 +2176,23 @@ async fn sequencer_crash_restore_mempool() -> Result<(), anyhow::Error> { seq_task.abort(); let _ = copy_dir_recursive( - Path::new("demo_data_sequencer_restore_mempool"), - Path::new("demo_data_sequencer_restore_mempool_copy"), + &sequencer_db_dir, + &storage_dir.path().join("sequencer_copy"), ); let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); let config1 = sequencer_config.clone(); + let da_db_dir_cloned = da_db_dir.clone(); + let sequencer_db_dir = storage_dir.path().join("sequencer_copy").to_path_buf(); let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - Some("demo_data_sequencer_restore_mempool_copy"), + sequencer_db_dir, + da_db_dir_cloned, 4, true, None, @@ -2071,8 +2239,6 @@ async fn sequencer_crash_restore_mempool() -> Result<(), anyhow::Error> { assert_eq!(txs.len(), 0); seq_task.abort(); - let _ = fs::remove_dir_all(Path::new("demo_data_sequencer_restore_mempool")); - let _ = fs::remove_dir_all(Path::new("demo_data_sequencer_restore_mempool_copy")); Ok(()) } @@ -2081,17 +2247,26 @@ async fn sequencer_crash_restore_mempool() -> Result<(), anyhow::Error> { async fn test_db_get_proof() { // citrea::initialize_logging(); - let db_test_client = PostgresConnector::new_test_client().await.unwrap(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "prover"]); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let prover_db_dir = storage_dir.path().join("prover").to_path_buf(); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + + let db_test_client = PostgresConnector::new_test_client("test_db_get_proof".to_owned()) + .await + .unwrap(); let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, 4, true, None, @@ -2104,10 +2279,11 @@ async fn test_db_get_proof() { let seq_port = seq_port_rx.await.unwrap(); let test_client = make_test_client(seq_port).await; - let da_service = MockDaService::new(MockAddress::from([0; 32])); + let da_service = MockDaService::new(MockAddress::from([0; 32]), &da_db_dir); let (prover_node_port_tx, prover_node_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let prover_node_task = tokio::spawn(async move { start_rollup( prover_node_port_tx, @@ -2115,10 +2291,13 @@ async fn test_db_get_proof() { Some(ProverConfig { proving_mode: sov_stf_runner::ProverGuestRunConfig::Execute, proof_sampling_number: 0, - db_config: Some(SharedBackupDbConfig::default()), + db_config: Some( + SharedBackupDbConfig::default().set_db_name("test_db_get_proof".to_owned()), + ), }), NodeMode::Prover(seq_port), - None, + prover_db_dir, + da_db_dir_cloned, 4, true, None, @@ -2198,15 +2377,23 @@ async fn test_db_get_proof() { async fn full_node_verify_proof_and_store() { // citrea::initialize_logging(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "prover", "full-node"]); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let prover_db_dir = storage_dir.path().join("prover").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, 4, true, None, @@ -2219,10 +2406,12 @@ async fn full_node_verify_proof_and_store() { let seq_port = seq_port_rx.await.unwrap(); let test_client = make_test_client(seq_port).await; - let da_service = MockDaService::new(MockAddress::from([0; 32])); + + let da_service = MockDaService::new(MockAddress::from([0; 32]), &da_db_dir); let (prover_node_port_tx, prover_node_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let prover_node_task = tokio::spawn(async move { start_rollup( prover_node_port_tx, @@ -2233,7 +2422,8 @@ async fn full_node_verify_proof_and_store() { db_config: None, }), NodeMode::Prover(seq_port), - None, + prover_db_dir, + da_db_dir_cloned, 4, true, None, @@ -2250,13 +2440,15 @@ async fn full_node_verify_proof_and_store() { let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let full_node_task = tokio::spawn(async move { start_rollup( full_node_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::FullNode(seq_port), - None, + fullnode_db_dir, + da_db_dir_cloned, 4, true, None, @@ -2370,17 +2562,26 @@ async fn full_node_verify_proof_and_store() { async fn test_all_flow() { // citrea::initialize_logging(); - let db_test_client = PostgresConnector::new_test_client().await.unwrap(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "prover", "full-node"]); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let prover_db_dir = storage_dir.path().join("prover").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let db_test_client = PostgresConnector::new_test_client("test_all_flow".to_owned()) + .await + .unwrap(); let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, 4, true, None, @@ -2393,10 +2594,11 @@ async fn test_all_flow() { let seq_port = seq_port_rx.await.unwrap(); let test_client = make_test_client(seq_port).await; - let da_service = MockDaService::new(MockAddress::from([0; 32])); + let da_service = MockDaService::new(MockAddress::from([0; 32]), &da_db_dir); let (prover_node_port_tx, prover_node_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let prover_node_task = tokio::spawn(async move { start_rollup( prover_node_port_tx, @@ -2404,10 +2606,13 @@ async fn test_all_flow() { Some(ProverConfig { proving_mode: sov_stf_runner::ProverGuestRunConfig::Execute, proof_sampling_number: 0, - db_config: Some(SharedBackupDbConfig::default()), + db_config: Some( + SharedBackupDbConfig::default().set_db_name("test_all_flow".to_owned()), + ), }), NodeMode::Prover(seq_port), - None, + prover_db_dir, + da_db_dir_cloned, 4, true, None, @@ -2424,13 +2629,15 @@ async fn test_all_flow() { let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let full_node_task = tokio::spawn(async move { start_rollup( full_node_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::FullNode(seq_port), - None, + fullnode_db_dir, + da_db_dir_cloned, 4, true, None, diff --git a/bin/citrea/tests/evm/archival_state.rs b/bin/citrea/tests/evm/archival_state.rs index ad1e5b941..c926ba740 100644 --- a/bin/citrea/tests/evm/archival_state.rs +++ b/bin/citrea/tests/evm/archival_state.rs @@ -8,22 +8,28 @@ use reth_primitives::BlockNumberOrTag; use crate::evm::init_test_rollup; use crate::test_client::TestClient; -use crate::test_helpers::{start_rollup, NodeMode}; +use crate::test_helpers::{start_rollup, tempdir_with_children, NodeMode}; use crate::{DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT}; #[tokio::test] async fn test_archival_state() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, diff --git a/bin/citrea/tests/evm/gas_price.rs b/bin/citrea/tests/evm/gas_price.rs index df0abdab6..7e63cc988 100644 --- a/bin/citrea/tests/evm/gas_price.rs +++ b/bin/citrea/tests/evm/gas_price.rs @@ -10,23 +10,29 @@ use reth_primitives::BlockNumberOrTag; use crate::evm::init_test_rollup; use crate::test_client::TestClient; -use crate::test_helpers::{start_rollup, NodeMode}; +use crate::test_helpers::{start_rollup, tempdir_with_children, NodeMode}; use crate::{DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT}; #[tokio::test] async fn test_gas_price_increase() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); + let storage_dir = tempdir_with_children(&["DA", "sequencer"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let (port_tx, port_rx) = tokio::sync::oneshot::channel(); - let rollup_task = tokio::spawn(async { + let da_db_dir_cloned = da_db_dir.clone(); + let rollup_task = tokio::spawn(async move { // Don't provide a prover since the EVM is not currently provable start_rollup( port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, diff --git a/bin/citrea/tests/evm/mod.rs b/bin/citrea/tests/evm/mod.rs index b90a703a4..428cb66f5 100644 --- a/bin/citrea/tests/evm/mod.rs +++ b/bin/citrea/tests/evm/mod.rs @@ -14,7 +14,7 @@ use sov_rollup_interface::CITREA_VERSION; // use sov_demo_rollup::initialize_logging; use crate::test_client::TestClient; -use crate::test_helpers::{start_rollup, NodeMode}; +use crate::test_helpers::{start_rollup, tempdir_with_children, NodeMode}; use crate::{DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT}; mod archival_state; @@ -24,6 +24,12 @@ mod tracing; #[tokio::test] async fn web3_rpc_tests() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); + + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let da_db_dir_cloned = da_db_dir.clone(); + let (port_tx, port_rx) = tokio::sync::oneshot::channel(); let rollup_task = tokio::spawn(async { start_rollup( @@ -31,7 +37,8 @@ async fn web3_rpc_tests() -> Result<(), anyhow::Error> { GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -73,16 +80,21 @@ async fn web3_rpc_tests() -> Result<(), anyhow::Error> { async fn evm_tx_tests() -> Result<(), anyhow::Error> { // citrea::initialize_logging(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let da_db_dir_cloned = da_db_dir.clone(); + let (port_tx, port_rx) = tokio::sync::oneshot::channel(); let rollup_task = tokio::spawn(async { - // Don't provide a prover since the EVM is not currently provable start_rollup( port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -109,16 +121,21 @@ async fn send_tx_test_to_eth(rpc_address: SocketAddr) -> Result<(), Box Result<(), anyhow::Error> { use crate::test_helpers::start_rollup; + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let da_db_dir_cloned = da_db_dir.clone(); + let (port_tx, port_rx) = tokio::sync::oneshot::channel(); let rollup_task = tokio::spawn(async { - // Don't provide a prover since the EVM is not currently provable start_rollup( port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -144,13 +161,19 @@ async fn test_eth_get_logs() -> Result<(), anyhow::Error> { async fn test_genesis_contract_call() -> Result<(), Box> { let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let da_db_dir_cloned = da_db_dir.clone(); + let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../../hive/genesis"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, 123456, true, None, diff --git a/bin/citrea/tests/evm/tracing.rs b/bin/citrea/tests/evm/tracing.rs index 72226e768..b54485e1a 100644 --- a/bin/citrea/tests/evm/tracing.rs +++ b/bin/citrea/tests/evm/tracing.rs @@ -13,12 +13,17 @@ use reth_rpc_types::trace::geth::{ use serde_json::{self, json}; use crate::evm::make_test_client; -use crate::test_helpers::{start_rollup, NodeMode}; +use crate::test_helpers::{start_rollup, tempdir_with_children, NodeMode}; use crate::{DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT}; #[tokio::test] async fn tracing_tests() -> Result<(), Box> { + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let (port_tx, port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let rollup_task = tokio::spawn(async { // Don't provide a prover since the EVM is not currently provable start_rollup( @@ -26,7 +31,8 @@ async fn tracing_tests() -> Result<(), Box> { GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, diff --git a/bin/citrea/tests/mempool/mod.rs b/bin/citrea/tests/mempool/mod.rs index 3178fb242..e35d20387 100644 --- a/bin/citrea/tests/mempool/mod.rs +++ b/bin/citrea/tests/mempool/mod.rs @@ -1,3 +1,4 @@ +use std::path::PathBuf; use std::str::FromStr; use citrea_stf::genesis_config::GenesisPaths; @@ -8,10 +9,13 @@ use tokio::task::JoinHandle; use crate::evm::make_test_client; use crate::test_client::{TestClient, MAX_FEE_PER_GAS}; -use crate::test_helpers::{start_rollup, NodeMode}; +use crate::test_helpers::{start_rollup, tempdir_with_children, NodeMode}; use crate::{DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT}; -async fn initialize_test() -> (JoinHandle<()>, Box) { +async fn initialize_test( + sequencer_path: PathBuf, + db_path: PathBuf, +) -> (JoinHandle<()>, Box) { let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); let seq_task = tokio::spawn(async { @@ -20,7 +24,8 @@ async fn initialize_test() -> (JoinHandle<()>, Box) { GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_path, + db_path, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -42,7 +47,10 @@ async fn initialize_test() -> (JoinHandle<()>, Box) { async fn test_same_nonce_tx_should_panic() { // citrea::initialize_logging(); - let (seq_task, test_client) = initialize_test().await; + let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = db_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = db_dir.path().join("sequencer").to_path_buf(); + let (seq_task, test_client) = initialize_test(sequencer_db_dir, da_db_dir).await; let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266").unwrap(); @@ -69,7 +77,10 @@ async fn test_same_nonce_tx_should_panic() { async fn test_nonce_too_low() { // citrea::initialize_logging(); - let (seq_task, test_client) = initialize_test().await; + let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = db_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = db_dir.path().join("sequencer").to_path_buf(); + let (seq_task, test_client) = initialize_test(sequencer_db_dir, da_db_dir).await; let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266").unwrap(); @@ -96,7 +107,10 @@ async fn test_nonce_too_low() { async fn test_nonce_too_high() { // citrea::initialize_logging(); - let (seq_task, test_client) = initialize_test().await; + let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = db_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = db_dir.path().join("sequencer").to_path_buf(); + let (seq_task, test_client) = initialize_test(sequencer_db_dir, da_db_dir).await; let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266").unwrap(); @@ -128,7 +142,10 @@ async fn test_nonce_too_high() { #[tokio::test] async fn test_order_by_fee() { - let (seq_task, test_client) = initialize_test().await; + let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = db_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = db_dir.path().join("sequencer").to_path_buf(); + let (seq_task, test_client) = initialize_test(sequencer_db_dir, da_db_dir).await; let chain_id: u64 = 5655; let key = "0xdcf2cbdd171a21c480aa7f53d77f31bb102282b3ff099c78e3118b37348c72f7" @@ -229,7 +246,10 @@ async fn test_order_by_fee() { /// Publish block, tx should not be in block but should still be in the mempool. #[tokio::test] async fn test_tx_with_low_base_fee() { - let (seq_task, test_client) = initialize_test().await; + let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = db_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = db_dir.path().join("sequencer").to_path_buf(); + let (seq_task, test_client) = initialize_test(sequencer_db_dir, da_db_dir).await; let chain_id: u64 = 5655; let key = "0xdcf2cbdd171a21c480aa7f53d77f31bb102282b3ff099c78e3118b37348c72f7" @@ -272,7 +292,10 @@ async fn test_tx_with_low_base_fee() { #[tokio::test] async fn test_same_nonce_tx_replacement() { - let (seq_task, test_client) = initialize_test().await; + let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = db_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = db_dir.path().join("sequencer").to_path_buf(); + let (seq_task, test_client) = initialize_test(sequencer_db_dir, da_db_dir).await; let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266").unwrap(); diff --git a/bin/citrea/tests/sequencer_commitments/mod.rs b/bin/citrea/tests/sequencer_commitments/mod.rs index cb0164117..24faf5c53 100644 --- a/bin/citrea/tests/sequencer_commitments/mod.rs +++ b/bin/citrea/tests/sequencer_commitments/mod.rs @@ -14,22 +14,30 @@ use tokio::time::sleep; use crate::evm::make_test_client; use crate::test_client::TestClient; -use crate::test_helpers::{create_default_sequencer_config, start_rollup, NodeMode}; +use crate::test_helpers::{ + create_default_sequencer_config, start_rollup, tempdir_with_children, NodeMode, +}; use crate::DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT; #[tokio::test] async fn sequencer_sends_commitments_to_da_layer() { // citrea::initialize_logging(); + let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = db_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = db_dir.path().join("sequencer").to_path_buf(); + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); - let seq_task = tokio::spawn(async { + let da_db_dir_cloned = da_db_dir.clone(); + let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, 4, true, None, @@ -42,7 +50,8 @@ async fn sequencer_sends_commitments_to_da_layer() { let seq_port = seq_port_rx.await.unwrap(); let test_client = make_test_client(seq_port).await; - let da_service = MockDaService::new(MockAddress::from([0; 32])); + + let da_service = MockDaService::new(MockAddress::from([0; 32]), &da_db_dir); // publish 3 soft confirmations, no commitment should be sent for _ in 0..3 { @@ -193,21 +202,28 @@ async fn check_sequencer_commitment( async fn check_commitment_in_offchain_db() { // citrea::initialize_logging(); + let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = db_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = db_dir.path().join("sequencer").to_path_buf(); + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); let mut sequencer_config = create_default_sequencer_config(4, Some(true), 10); - sequencer_config.db_config = Some(SharedBackupDbConfig::default()); + let db_name = "check_commitment_in_offchain_db".to_owned(); + sequencer_config.db_config = Some(SharedBackupDbConfig::default().set_db_name(db_name.clone())); // drops db if exists from previous test runs, recreates the db - let db_test_client = PostgresConnector::new_test_client().await.unwrap(); + let db_test_client = PostgresConnector::new_test_client(db_name).await.unwrap(); - let seq_task = tokio::spawn(async { + let da_db_dir_cloned = da_db_dir.clone(); + let seq_task = tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, 4, true, None, @@ -220,7 +236,7 @@ async fn check_commitment_in_offchain_db() { let seq_port = seq_port_rx.await.unwrap(); let test_client = make_test_client(seq_port).await; - let da_service = MockDaService::new(MockAddress::from([0; 32])); + let da_service = MockDaService::new(MockAddress::from([0; 32]), &da_db_dir); da_service.publish_test_block().await.unwrap(); @@ -253,15 +269,22 @@ async fn check_commitment_in_offchain_db() { async fn test_ledger_get_commitments_on_slot() { // citrea::initialize_logging(); + let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = db_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = db_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = db_dir.path().join("full-node").to_path_buf(); + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, 4, true, None, @@ -274,7 +297,7 @@ async fn test_ledger_get_commitments_on_slot() { let seq_port = seq_port_rx.await.unwrap(); let test_client = make_test_client(seq_port).await; - let da_service = MockDaService::new(MockAddress::from([0; 32])); + let da_service = MockDaService::new(MockAddress::from([0; 32]), &da_db_dir); let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); @@ -284,7 +307,8 @@ async fn test_ledger_get_commitments_on_slot() { GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::FullNode(seq_port), - None, + fullnode_db_dir, + da_db_dir, 4, true, None, @@ -349,15 +373,22 @@ async fn test_ledger_get_commitments_on_slot() { async fn test_ledger_get_commitments_on_slot_prover() { // citrea::initialize_logging(); + let db_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = db_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = db_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = db_dir.path().join("full-node").to_path_buf(); + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let da_db_dir_cloned = da_db_dir.clone(); let seq_task = tokio::spawn(async { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, 4, true, None, @@ -370,7 +401,7 @@ async fn test_ledger_get_commitments_on_slot_prover() { let seq_port = seq_port_rx.await.unwrap(); let test_client = make_test_client(seq_port).await; - let da_service = MockDaService::new(MockAddress::from([0; 32])); + let da_service = MockDaService::new(MockAddress::from([0; 32]), &da_db_dir); let (prover_node_port_tx, prover_node_port_rx) = tokio::sync::oneshot::channel(); @@ -384,7 +415,8 @@ async fn test_ledger_get_commitments_on_slot_prover() { db_config: None, }), NodeMode::Prover(seq_port), - None, + fullnode_db_dir, + da_db_dir, 4, true, None, diff --git a/bin/citrea/tests/soft_confirmation_rule_enforcer/mod.rs b/bin/citrea/tests/soft_confirmation_rule_enforcer/mod.rs index e6ed6c475..edcd03d89 100644 --- a/bin/citrea/tests/soft_confirmation_rule_enforcer/mod.rs +++ b/bin/citrea/tests/soft_confirmation_rule_enforcer/mod.rs @@ -3,7 +3,7 @@ use sov_mock_da::{MockAddress, MockDaService}; use crate::evm::make_test_client; // use citrea::initialize_logging; -use crate::test_helpers::{start_rollup, NodeMode}; +use crate::test_helpers::{start_rollup, tempdir_with_children, NodeMode}; use crate::{DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT}; /// Transaction with equal nonce to last tx should not be accepted by mempool. @@ -11,15 +11,21 @@ use crate::{DEFAULT_DEPOSIT_MEMPOOL_FETCH_LIMIT, DEFAULT_MIN_SOFT_CONFIRMATIONS_ async fn too_many_l2_block_per_l1_block() { // citrea::initialize_logging(); + let storage_dir = tempdir_with_children(&["DA", "sequencer"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); - tokio::spawn(async { + let da_db_dir_cloned = da_db_dir.clone(); + tokio::spawn(async move { start_rollup( seq_port_tx, GenesisPaths::from_dir("../test-data/genesis/integration-tests-low-limiting-number"), None, NodeMode::SequencerNode, - None, + sequencer_db_dir, + da_db_dir_cloned, DEFAULT_MIN_SOFT_CONFIRMATIONS_PER_COMMITMENT, true, None, @@ -33,7 +39,7 @@ async fn too_many_l2_block_per_l1_block() { let test_client = make_test_client(seq_port).await; let limiting_number = test_client.get_limiting_number().await; - let da_service = MockDaService::new(MockAddress::from([0; 32])); + let da_service = MockDaService::new(MockAddress::from([0; 32]), &da_db_dir); // limiting number should be 10 // we use a low limiting number because mockda creates blocks every 5 seconds diff --git a/bin/citrea/tests/test_helpers.rs b/bin/citrea/tests/test_helpers.rs index 21009c8e0..a5ca9e844 100644 --- a/bin/citrea/tests/test_helpers.rs +++ b/bin/citrea/tests/test_helpers.rs @@ -1,5 +1,5 @@ use std::net::SocketAddr; -use std::path::Path; +use std::path::{Path, PathBuf}; use citrea::MockDemoRollup; use citrea_sequencer::SequencerConfig; @@ -12,6 +12,7 @@ use sov_modules_rollup_blueprint::RollupBlueprint; use sov_stf_runner::{ ProverConfig, RollupConfig, RollupPublicKeys, RpcConfig, RunnerConfig, StorageConfig, }; +use tempfile::TempDir; use tokio::sync::oneshot; use tracing::warn; @@ -29,7 +30,8 @@ pub async fn start_rollup( rt_genesis_paths: GenesisPaths, rollup_prover_config: Option, node_mode: NodeMode, - db_path: Option<&str>, + rollup_db_path: PathBuf, + da_db_path: PathBuf, min_soft_confirmations_per_commitment: u64, include_tx_body: bool, rollup_config: Option>, @@ -37,17 +39,10 @@ pub async fn start_rollup( test_mode: Option, deposit_mempool_fetch_limit: usize, ) { - let mut path = db_path.map(Path::new); - let mut temp_dir: Option = None; - if db_path.is_none() { - temp_dir = Some(tempfile::tempdir().unwrap()); - - path = Some(temp_dir.as_ref().unwrap().path()); - } - // create rollup config default creator function and use them here for the configs - let rollup_config = rollup_config - .unwrap_or_else(|| create_default_rollup_config(include_tx_body, path, node_mode)); + let rollup_config = rollup_config.unwrap_or_else(|| { + create_default_rollup_config(include_tx_body, &rollup_db_path, &da_db_path, node_mode) + }); let mock_demo_rollup = MockDemoRollup {}; @@ -101,16 +96,12 @@ pub async fn start_rollup( .unwrap(); } } - - if db_path.is_none() { - // Close the tempdir explicitly to ensure that rustc doesn't see that it's unused and drop it unexpectedly - temp_dir.unwrap().close().unwrap(); - } } pub fn create_default_rollup_config( include_tx_body: bool, - path: Option<&Path>, + rollup_path: &Path, + da_path: &Path, node_mode: NodeMode, ) -> RollupConfig { RollupConfig { @@ -122,9 +113,8 @@ pub fn create_default_rollup_config( sequencer_da_pub_key: vec![0; 32], prover_da_pub_key: vec![0; 32], }, - storage: StorageConfig { - path: path.unwrap().to_path_buf(), + path: rollup_path.to_path_buf(), }, rpc: RpcConfig { bind_host: "127.0.0.1".into(), @@ -141,6 +131,7 @@ pub fn create_default_rollup_config( }, da: MockDaConfig { sender_address: MockAddress::from([0; 32]), + db_path: da_path.to_path_buf(), }, } } @@ -160,3 +151,13 @@ pub fn create_default_sequencer_config( db_config: None, } } + +pub fn tempdir_with_children(children: &[&str]) -> TempDir { + let db_dir = tempfile::tempdir().expect("Could not create temporary directory for test"); + for child in children { + let p = db_dir.path().join(child); + std::fs::create_dir(p).unwrap(); + } + + db_dir +} diff --git a/crates/sequencer/src/rpc.rs b/crates/sequencer/src/rpc.rs index 581deac58..af3c662db 100644 --- a/crates/sequencer/src/rpc.rs +++ b/crates/sequencer/src/rpc.rs @@ -11,7 +11,6 @@ use reth_rpc::eth::error::EthApiError; use reth_rpc_types_compat::transaction::from_recovered; use reth_transaction_pool::EthPooledTransaction; use shared_backup_db::PostgresConnector; -use sov_mock_da::{MockAddress, MockDaService}; use sov_modules_api::WorkingSet; use tokio::sync::Mutex; use tracing::{error, info}; @@ -79,19 +78,6 @@ pub(crate) fn create_rpc_module( })?; Ok::<(), ErrorObjectOwned>(()) })?; - - rpc.register_async_method("da_publishBlock", |_, _ctx| async move { - info!("Sequencer: da_publishBlock"); - let da = MockDaService::new(MockAddress::from([0; 32])); - da.publish_test_block().await.map_err(|e| { - ErrorObjectOwned::owned( - INTERNAL_ERROR_CODE, - INTERNAL_ERROR_MSG, - Some(format!("Could not publish mock-da block: {e}")), - ) - })?; - Ok::<(), ErrorObjectOwned>(()) - })?; } rpc.register_async_method("eth_getTransactionByHash", |parameters, ctx| async move { diff --git a/crates/shared-backup-db/src/config.rs b/crates/shared-backup-db/src/config.rs index 4fc490219..c313d2068 100644 --- a/crates/shared-backup-db/src/config.rs +++ b/crates/shared-backup-db/src/config.rs @@ -55,6 +55,11 @@ impl SharedBackupDbConfig { self.max_pool_size } + pub fn set_db_name(mut self, db_name: String) -> Self { + self.db_name = db_name; + self + } + pub fn parse_to_connection_string(&self) -> String { format!( "host={} port={} user={} password={} dbname={}", diff --git a/crates/shared-backup-db/src/lib.rs b/crates/shared-backup-db/src/lib.rs index 91b7a9aa3..01d8ed7b9 100644 --- a/crates/shared-backup-db/src/lib.rs +++ b/crates/shared-backup-db/src/lib.rs @@ -1,7 +1,6 @@ pub mod config; pub mod postgres_connector; pub mod tables; -mod utils; pub use config::SharedBackupDbConfig; pub use postgres_connector::PostgresConnector; diff --git a/crates/shared-backup-db/src/postgres_connector.rs b/crates/shared-backup-db/src/postgres_connector.rs index d1a006be7..fa183647f 100644 --- a/crates/shared-backup-db/src/postgres_connector.rs +++ b/crates/shared-backup-db/src/postgres_connector.rs @@ -4,7 +4,7 @@ use deadpool_postgres::tokio_postgres::config::Config as PgConfig; use deadpool_postgres::tokio_postgres::{NoTls, Row}; use deadpool_postgres::{Manager, ManagerConfig, Object, Pool, PoolError, RecyclingMethod}; use sov_rollup_interface::rpc::StateTransitionRpcResponse; -use tracing::instrument; +use tracing::{debug, instrument}; use crate::config::SharedBackupDbConfig; use crate::tables::{ @@ -12,7 +12,6 @@ use crate::tables::{ INDEX_L1_END_HASH, INDEX_L1_END_HEIGHT, INDEX_L2_END_HEIGHT, MEMPOOL_TXS_TABLE_CREATE_QUERY, PROOF_TABLE_CREATE_QUERY, SEQUENCER_COMMITMENT_TABLE_CREATE_QUERY, }; -use crate::utils::get_db_extension; #[derive(Clone)] pub struct PostgresConnector { @@ -34,16 +33,17 @@ impl PostgresConnector { .unwrap(); let mut client = pool.get().await?; + debug!("Connecting PG client to DB: {}", pg_config.db_name()); + // Create new db if running thread is not main or tokio-runtime-worker, meaning when running for tests if cfg!(feature = "test-utils") { // create new db - let db_name = format!("citrea{}", get_db_extension()); let _ = client - .batch_execute(&format!("CREATE DATABASE {};", db_name.clone())) + .batch_execute(&format!("CREATE DATABASE {};", pg_config.db_name())) .await; //connect to new db - cfg.dbname(&db_name); + cfg.dbname(pg_config.db_name()); let mgr = Manager::from_config(cfg, NoTls, mgr_config); pool = Pool::builder(mgr) .max_size(pg_config.max_pool_size().unwrap_or(16)) @@ -81,7 +81,7 @@ impl PostgresConnector { } #[cfg(feature = "test-utils")] - pub async fn new_test_client() -> Result { + pub async fn new_test_client(db_name: String) -> Result { let mut cfg: PgConfig = SharedBackupDbConfig::default().into(); let mgr_config = ManagerConfig { @@ -91,7 +91,6 @@ impl PostgresConnector { let pool = Pool::builder(mgr).max_size(16).build().unwrap(); let client = pool.get().await.unwrap(); - let db_name = format!("citrea{}", get_db_extension()); client .batch_execute(&format!("DROP DATABASE IF EXISTS {};", db_name.clone())) .await @@ -103,8 +102,8 @@ impl PostgresConnector { .unwrap(); drop(pool); - //connect to new db + //connect to new db cfg.dbname(db_name.as_str()); let mgr = Manager::from_config(cfg, NoTls, mgr_config); let test_pool = Pool::builder(mgr).max_size(16).build().unwrap(); @@ -314,7 +313,9 @@ mod tests { #[tokio::test] async fn test_insert_sequencer_commitment() { - let client = PostgresConnector::new_test_client().await.unwrap(); + let client = PostgresConnector::new_test_client("insert_sequencer_commitments".to_owned()) + .await + .unwrap(); client.create_table(Tables::SequencerCommitment).await; let inserted = client @@ -348,7 +349,9 @@ mod tests { #[tokio::test] async fn test_insert_rlp_tx() { - let client = PostgresConnector::new_test_client().await.unwrap(); + let client = PostgresConnector::new_test_client("insert_rlp_tx".to_owned()) + .await + .unwrap(); client.create_table(Tables::MempoolTxs).await; client @@ -396,7 +399,9 @@ mod tests { #[tokio::test] async fn test_insert_proof_data() { - let client = PostgresConnector::new_test_client().await.unwrap(); + let client = PostgresConnector::new_test_client("test_insert_proof_data".to_string()) + .await + .unwrap(); client.create_table(Tables::Proof).await; let inserted = client diff --git a/crates/shared-backup-db/src/tables.rs b/crates/shared-backup-db/src/tables.rs index b4dd46728..9916699f7 100644 --- a/crates/shared-backup-db/src/tables.rs +++ b/crates/shared-backup-db/src/tables.rs @@ -3,8 +3,6 @@ use std::fmt; use postgres_types::Json; use sov_rollup_interface::rpc::StateTransitionRpcResponse; -use crate::utils::get_db_extension; - pub enum Tables { /// string version is sequencer_commitment #[allow(dead_code)] @@ -70,10 +68,6 @@ pub struct DbSequencerCommitment { pub status: CommitmentStatus, } -pub fn create_database() -> String { - format!("CREATE DATABASE citrea{};", get_db_extension()) -} - pub const SEQUENCER_COMMITMENT_TABLE_CREATE_QUERY: &str = " CREATE TABLE IF NOT EXISTS sequencer_commitments ( id SERIAL PRIMARY KEY, diff --git a/crates/shared-backup-db/src/utils.rs b/crates/shared-backup-db/src/utils.rs deleted file mode 100644 index ec66e4545..000000000 --- a/crates/shared-backup-db/src/utils.rs +++ /dev/null @@ -1,19 +0,0 @@ -pub(crate) fn get_db_extension() -> String { - let thread = std::thread::current(); - let mut thread_name = format!("_{}", thread.name().unwrap_or("unnamed")); - if thread_name == "_tokio-runtime-worker" || thread_name == "_main" { - thread_name = "".to_string(); - } - "_".to_owned() - + thread_name - .split(':') - .collect::>() - .last() - .unwrap_or(&"unnamed") -} - -#[test] -fn test_get_db_extension() { - let a = get_db_extension(); - assert_eq!(a, "_test_get_db_extension"); -} diff --git a/crates/sovereign-sdk/adapters/mock-da/Cargo.toml b/crates/sovereign-sdk/adapters/mock-da/Cargo.toml index 6b5c4abdc..a00367cee 100644 --- a/crates/sovereign-sdk/adapters/mock-da/Cargo.toml +++ b/crates/sovereign-sdk/adapters/mock-da/Cargo.toml @@ -31,6 +31,7 @@ sov-rollup-interface = { path = "../../rollup-interface" } [dev-dependencies] futures = { workspace = true } +tempfile = { workspace = true } [features] default = ["native"] diff --git a/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs b/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs index 169f277f0..60847671e 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/db_connector.rs @@ -1,37 +1,25 @@ -use std::collections::HashSet; -use std::path::{Path, PathBuf}; -use std::sync::Mutex; +use std::path::Path; -use lazy_static::lazy_static; use rusqlite::{params, Connection}; use tracing::debug; use crate::{MockBlock, MockBlockHeader, MockHash, MockValidityCond}; -lazy_static! { - static ref USED_THREAD: Mutex> = Mutex::new(HashSet::new()); -} - pub(crate) struct DbConnector { // thread-safe sqlite connection conn: Connection, } impl DbConnector { - pub fn new() -> Self { - let thread = std::thread::current(); - let mut thread_name = thread.name().unwrap_or("unnamed"); - if thread_name == "tokio-runtime-worker" { - thread_name = "main" - } - let dir = workspace_dir() - .join("test-da-dbs") - .join(thread_name.to_string() + ".db"); - let db_name = dir.to_str().unwrap().to_string(); + pub fn new(db_path: &Path) -> Self { + debug!("Using test db: {:?}", db_path); - debug!("Using test db: {}", db_name); + if !db_path.exists() { + let _ = std::fs::create_dir(db_path); + } - let conn = Connection::open(db_name.clone()).expect("DbConnector: failed to open db"); + let conn = + Connection::open(db_path.join("mock_da.db")).expect("DbConnector: failed to open db"); conn.execute( "CREATE TABLE IF NOT EXISTS blocks ( @@ -47,17 +35,6 @@ impl DbConnector { ) .expect("DbConnector: failed to create table"); - // first time db is opened in a thread, wipe data inside it unless it's the main thread - // keep main thread's data since main thread runs only when running demo or mocknet - // we would like to keep da data in that case - let mut set = USED_THREAD.lock().unwrap(); - if !set.contains(&thread_name.to_string()) && thread_name != "main" { - debug!("deleting db"); - conn.execute("DELETE FROM blocks", ()) - .expect("DbConnector: failed to delete all rows"); - set.insert(thread_name.to_string()); - } - Self { conn } } @@ -164,18 +141,6 @@ impl DbConnector { } } -fn workspace_dir() -> PathBuf { - let output = std::process::Command::new(env!("CARGO")) - .arg("locate-project") - .arg("--workspace") - .arg("--message-format=plain") - .output() - .unwrap() - .stdout; - let cargo_path = Path::new(std::str::from_utf8(&output).unwrap().trim()); - cargo_path.parent().unwrap().to_path_buf() -} - #[cfg(test)] mod tests { use crate::db_connector::DbConnector; @@ -194,7 +159,8 @@ mod tests { #[test] fn test_write_and_read() { - let db = DbConnector::new(); + let db_path = tempfile::tempdir().unwrap(); + let db = DbConnector::new(db_path.path()); let block = get_test_block(1); @@ -207,7 +173,8 @@ mod tests { #[test] fn test_read_by_hash() { - let db = DbConnector::new(); + let db_path = tempfile::tempdir().unwrap(); + let db = DbConnector::new(db_path.path()); let block = get_test_block(1); @@ -222,7 +189,8 @@ mod tests { #[test] fn test_len() { - let db = DbConnector::new(); + let db_path = tempfile::tempdir().unwrap(); + let db = DbConnector::new(db_path.path()); let block = get_test_block(1); @@ -233,7 +201,8 @@ mod tests { #[test] fn test_last() { - let db = DbConnector::new(); + let db_path = tempfile::tempdir().unwrap(); + let db = DbConnector::new(db_path.path()); let block1 = get_test_block(1); let block2 = get_test_block(2); @@ -247,7 +216,8 @@ mod tests { #[test] fn test_prune_above() { - let db = DbConnector::new(); + let db_path = tempfile::tempdir().unwrap(); + let db = DbConnector::new(db_path.path()); let block1 = get_test_block(1); let block2 = get_test_block(2); @@ -266,7 +236,8 @@ mod tests { #[test] fn test_same_thread_behaviour() { - let db = DbConnector::new(); + let db_path = tempfile::tempdir().unwrap(); + let db = DbConnector::new(db_path.path()); let block = get_test_block(1); @@ -276,7 +247,7 @@ mod tests { assert_eq!(block, block_from_db); - let db2 = DbConnector::new(); + let db2 = DbConnector::new(db_path.path()); // data wasn't wiped let block_from_db2 = db2.get(0).unwrap(); diff --git a/crates/sovereign-sdk/adapters/mock-da/src/service.rs b/crates/sovereign-sdk/adapters/mock-da/src/service.rs index 8f538bc13..eedb700c4 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/service.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/service.rs @@ -1,3 +1,4 @@ +use std::path::Path; use std::pin::Pin; use std::sync::Mutex; use std::task::{Context, Poll}; @@ -77,12 +78,16 @@ pub struct MockDaService { impl MockDaService { /// Creates a new [`MockDaService`] with instant finality. - pub fn new(sequencer_da_address: MockAddress) -> Self { - Self::with_finality(sequencer_da_address, 0) + pub fn new(sequencer_da_address: MockAddress, db_path: &Path) -> Self { + Self::with_finality(sequencer_da_address, 0, db_path) } /// Create a new [`MockDaService`] with given finality. - pub fn with_finality(sequencer_da_address: MockAddress, blocks_to_finality: u32) -> Self { + pub fn with_finality( + sequencer_da_address: MockAddress, + blocks_to_finality: u32, + db_path: &Path, + ) -> Self { let (tx, rx1) = broadcast::channel(16); // Spawn a task, so channel is never closed tokio::spawn(async move { @@ -93,7 +98,7 @@ impl MockDaService { }); Self { sequencer_da_address, - blocks: Arc::new(AsyncMutex::new(DbConnector::new())), + blocks: Arc::new(AsyncMutex::new(DbConnector::new(db_path))), blocks_to_finality, finalized_header_sender: tx, wait_attempts: 100_0000, @@ -458,7 +463,8 @@ mod tests { #[tokio::test] async fn test_empty() { - let mut da = MockDaService::new(MockAddress::new([1; 32])); + let db_path = tempfile::tempdir().unwrap(); + let mut da = MockDaService::new(MockAddress::new([1; 32]), db_path.path()); da.wait_attempts = 10; let last_finalized_header = da.get_last_finalized_block_header().await.unwrap(); @@ -516,7 +522,12 @@ mod tests { } async fn test_push_and_read(finalization: u64, num_blocks: usize) { - let mut da = MockDaService::with_finality(MockAddress::new([1; 32]), finalization as u32); + let db_path = tempfile::tempdir().unwrap(); + let mut da = MockDaService::with_finality( + MockAddress::new([1; 32]), + finalization as u32, + db_path.path(), + ); da.blocks.lock().await.delete_all_rows(); da.wait_attempts = 2; let number_of_finalized_blocks = num_blocks - finalization as usize; @@ -552,7 +563,12 @@ mod tests { } async fn test_push_many_then_read(finalization: u64, num_blocks: usize) { - let mut da = MockDaService::with_finality(MockAddress::new([1; 32]), finalization as u32); + let db_path = tempfile::tempdir().unwrap(); + let mut da = MockDaService::with_finality( + MockAddress::new([1; 32]), + finalization as u32, + db_path.path(), + ); da.blocks.lock().await.delete_all_rows(); da.wait_attempts = 2; @@ -638,7 +654,8 @@ mod tests { #[tokio::test] async fn read_multiple_times() { - let mut da = MockDaService::with_finality(MockAddress::new([1; 32]), 4); + let db_path = tempfile::tempdir().unwrap(); + let mut da = MockDaService::with_finality(MockAddress::new([1; 32]), 4, db_path.path()); da.wait_attempts = 2; // 1 -> 2 -> 3 @@ -671,7 +688,8 @@ mod tests { #[tokio::test] async fn test_zk_submission() -> Result<(), anyhow::Error> { - let da = MockDaService::new(MockAddress::new([1; 32])); + let db_path = tempfile::tempdir().unwrap(); + let da = MockDaService::new(MockAddress::new([1; 32]), db_path.path()); let aggregated_proof_data = vec![1, 2, 3]; let height = da.send_aggregated_zk_proof(&aggregated_proof_data).await?; let proofs = da.get_aggregated_proofs_at(height).await?; @@ -686,7 +704,8 @@ mod tests { #[tokio::test] async fn test_reorg_control_success() { - let da = MockDaService::with_finality(MockAddress::new([1; 32]), 4); + let db_path = tempfile::tempdir().unwrap(); + let da = MockDaService::with_finality(MockAddress::new([1; 32]), 4, db_path.path()); // 1 -> 2 -> 3.1 -> 4.1 // \ -> 3.2 -> 4.2 @@ -721,7 +740,8 @@ mod tests { #[tokio::test] async fn test_attempt_reorg_after_finalized() { - let da = MockDaService::with_finality(MockAddress::new([1; 32]), 2); + let db_path = tempfile::tempdir().unwrap(); + let da = MockDaService::with_finality(MockAddress::new([1; 32]), 2, db_path.path()); // 1 -> 2 -> 3 -> 4 @@ -773,7 +793,8 @@ mod tests { #[tokio::test] async fn test_planned_reorg() { - let mut da = MockDaService::with_finality(MockAddress::new([1; 32]), 4); + let db_path = tempfile::tempdir().unwrap(); + let mut da = MockDaService::with_finality(MockAddress::new([1; 32]), 4, db_path.path()); da.wait_attempts = 2; // Planned for will replace blocks at height 3 and 4 @@ -809,7 +830,8 @@ mod tests { #[tokio::test] async fn test_planned_reorg_shorter() { - let mut da = MockDaService::with_finality(MockAddress::new([1; 32]), 4); + let db_path = tempfile::tempdir().unwrap(); + let mut da = MockDaService::with_finality(MockAddress::new([1; 32]), 4, db_path.path()); da.wait_attempts = 2; // Planned for will replace blocks at height 3 and 4 let planned_fork = diff --git a/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs b/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs index 322dc1548..795414cba 100644 --- a/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs +++ b/crates/sovereign-sdk/adapters/mock-da/src/types/mod.rs @@ -2,6 +2,7 @@ mod address; use std::fmt::{Debug, Formatter}; use std::hash::Hasher; +use std::path::PathBuf; pub use address::{MockAddress, MOCK_SEQUENCER_DA_ADDRESS}; use borsh::{BorshDeserialize, BorshSerialize}; @@ -145,6 +146,8 @@ impl BlockHeaderTrait for MockBlockHeader { pub struct MockDaConfig { /// The address to use to "submit" blobs on the mock da layer pub sender_address: MockAddress, + /// The path in which DA db is stored + pub db_path: PathBuf, } #[derive(Clone, Default)] diff --git a/crates/sovereign-sdk/full-node/sov-sequencer/src/lib.rs b/crates/sovereign-sdk/full-node/sov-sequencer/src/lib.rs index 9fcbe7214..a75ee4ea9 100644 --- a/crates/sovereign-sdk/full-node/sov-sequencer/src/lib.rs +++ b/crates/sovereign-sdk/full-node/sov-sequencer/src/lib.rs @@ -136,7 +136,6 @@ pub enum SubmitTransactionResponse { #[cfg(test)] mod tests { - use sov_mock_da::{MockAddress, MockDaService}; use sov_rollup_interface::da::BlobReaderTrait; @@ -176,8 +175,9 @@ mod tests { #[tokio::test] async fn test_submit_on_empty_mempool() { + let temp = tempfile::tempdir().unwrap(); let batch_builder = MockBatchBuilder { mempool: vec![] }; - let da_service = MockDaService::new(MockAddress::default()); + let da_service = MockDaService::new(MockAddress::default(), temp.path()); let rpc = get_sequencer_rpc(batch_builder, da_service.clone()); let arg: &[u8] = &[]; @@ -194,12 +194,13 @@ mod tests { #[tokio::test] async fn test_submit_happy_path() { + let temp = tempfile::tempdir().unwrap(); let tx1 = vec![1, 2, 3]; let tx2 = vec![3, 4, 5]; let batch_builder = MockBatchBuilder { mempool: vec![tx1.clone(), tx2.clone()], }; - let da_service = MockDaService::new(MockAddress::default()); + let da_service = MockDaService::new(MockAddress::default(), temp.path()); let rpc = get_sequencer_rpc(batch_builder, da_service.clone()); let arg: &[u8] = &[]; @@ -216,8 +217,9 @@ mod tests { #[tokio::test] async fn test_accept_tx() { + let temp = tempfile::tempdir().unwrap(); let batch_builder = MockBatchBuilder { mempool: vec![] }; - let da_service = MockDaService::new(MockAddress::default()); + let da_service = MockDaService::new(MockAddress::default(), temp.path()); let rpc = get_sequencer_rpc(batch_builder, da_service.clone()); diff --git a/crates/sovereign-sdk/full-node/sov-stf-runner/src/config.rs b/crates/sovereign-sdk/full-node/sov-stf-runner/src/config.rs index 87c73099f..021b8076a 100644 --- a/crates/sovereign-sdk/full-node/sov-stf-runner/src/config.rs +++ b/crates/sovereign-sdk/full-node/sov-stf-runner/src/config.rs @@ -40,7 +40,7 @@ const fn default_max_connections() -> u32 { /// Simple storage configuration #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct StorageConfig { - /// Path that can be utilized by concrete implementation + /// Path that can be utilized by concrete rollup implementation pub path: PathBuf, } @@ -113,7 +113,6 @@ pub fn from_toml_path, R: DeserializeOwned>(path: P) -> anyhow::R #[cfg(test)] mod tests { use std::io::Write; - use std::path::PathBuf; use tempfile::NamedTempFile; @@ -127,7 +126,8 @@ mod tests { #[test] fn test_correct_rollup_config() { - let config = r#" + let config = + r#" [public_keys] sequencer_public_key = "0000000000000000000000000000000000000000000000000000000000000000" sequencer_da_pub_key = "7777777777777777777777777777777777777777777777777777777777777777" @@ -140,19 +140,21 @@ mod tests { [da] sender_address = "0000000000000000000000000000000000000000000000000000000000000000" + db_path = "/tmp/da" [storage] - path = "/tmp" + path = "/tmp/rollup" [runner] include_tx_body = true sequencer_client_url = "http://0.0.0.0:12346" - "#; + "#.to_owned(); - let config_file = create_config_from(config); + let config_file = create_config_from(&config); let config: RollupConfig = from_toml_path(config_file.path()).unwrap(); + let expected = RollupConfig { runner: Some(RunnerConfig { sequencer_client_url: "http://0.0.0.0:12346".to_owned(), @@ -161,9 +163,10 @@ mod tests { }), da: sov_mock_da::MockDaConfig { sender_address: [0; 32].into(), + db_path: "/tmp/da".into(), }, storage: StorageConfig { - path: PathBuf::from("/tmp"), + path: "/tmp/rollup".into(), }, rpc: RpcConfig { bind_host: "127.0.0.1".to_string(), diff --git a/crates/sovereign-sdk/full-node/sov-stf-runner/tests/prover_tests.rs b/crates/sovereign-sdk/full-node/sov-stf-runner/tests/prover_tests.rs index 90b90291a..fee86c8a9 100644 --- a/crates/sovereign-sdk/full-node/sov-stf-runner/tests/prover_tests.rs +++ b/crates/sovereign-sdk/full-node/sov-stf-runner/tests/prover_tests.rs @@ -15,7 +15,9 @@ use sov_stf_runner::{ #[tokio::test] async fn test_successful_prover_execution() -> Result<(), ProverServiceError> { - let da_service = MockDaService::new(MockAddress::from([0; 32])); + let temp = tempfile::tempdir().unwrap(); + + let da_service = MockDaService::new(MockAddress::from([0; 32]), temp.path()); let TestProver { prover_service, vm, .. @@ -47,7 +49,8 @@ async fn test_successful_prover_execution() -> Result<(), ProverServiceError> { #[tokio::test] async fn test_prover_status_busy() -> Result<(), anyhow::Error> { - let da_service = MockDaService::new(MockAddress::from([0; 32])); + let temp = tempfile::tempdir().unwrap(); + let da_service = MockDaService::new(MockAddress::from([0; 32]), temp.path()); let TestProver { prover_service, vm, diff --git a/crates/sovereign-sdk/full-node/sov-stf-runner/tests/runner_initialization_tests.rs b/crates/sovereign-sdk/full-node/sov-stf-runner/tests/runner_initialization_tests.rs index 85287445a..fbe0ba8b1 100644 --- a/crates/sovereign-sdk/full-node/sov-stf-runner/tests/runner_initialization_tests.rs +++ b/crates/sovereign-sdk/full-node/sov-stf-runner/tests/runner_initialization_tests.rs @@ -46,7 +46,7 @@ type MockProverService = ParallelProverService< HashStf, >; fn initialize_runner( - path: &std::path::Path, + storage_path: &std::path::Path, init_variant: MockInitVariant, ) -> StateTransitionRunner< HashStf, @@ -56,10 +56,20 @@ fn initialize_runner( MockProverService, sov_modules_api::default_context::DefaultContext, > { + let da_storage_path = storage_path.join("da").to_path_buf(); + let rollup_storage_path = storage_path.join("rollup").to_path_buf(); + + if !std::path::Path::new(&da_storage_path).exists() { + let _ = std::fs::create_dir(da_storage_path.clone()); + } + if !std::path::Path::new(&rollup_storage_path).exists() { + let _ = std::fs::create_dir(rollup_storage_path.clone()); + } + let address = MockAddress::new([11u8; 32]); let rollup_config = RollupConfig:: { storage: StorageConfig { - path: path.to_path_buf(), + path: rollup_storage_path.clone(), }, rpc: RpcConfig { bind_host: "127.0.0.1".to_string(), @@ -73,6 +83,7 @@ fn initialize_runner( }), da: MockDaConfig { sender_address: address, + db_path: da_storage_path.clone(), }, public_keys: RollupPublicKeys { sequencer_public_key: vec![], @@ -81,14 +92,14 @@ fn initialize_runner( }, }; - let da_service = MockDaService::new(address); + let da_service = MockDaService::new(address, &da_storage_path); - let ledger_db = LedgerDB::with_path(path).unwrap(); + let ledger_db = LedgerDB::with_path(rollup_storage_path.clone()).unwrap(); let stf = HashStf::::new(); let storage_config = sov_state::config::Config { - path: path.to_path_buf(), + path: rollup_storage_path.to_path_buf(), }; let storage_manager = ProverStorageManager::new(storage_config).unwrap(); diff --git a/crates/sovereign-sdk/full-node/sov-stf-runner/tests/runner_reorg_tests.rs b/crates/sovereign-sdk/full-node/sov-stf-runner/tests/runner_reorg_tests.rs index 85fda52d9..8d79df047 100644 --- a/crates/sovereign-sdk/full-node/sov-stf-runner/tests/runner_reorg_tests.rs +++ b/crates/sovereign-sdk/full-node/sov-stf-runner/tests/runner_reorg_tests.rs @@ -48,7 +48,7 @@ async fn test_simple_reorg_case() { vec![15, 15, 15, 15], ]; - let mut da_service = MockDaService::with_finality(sequencer_address, 4); + let mut da_service = MockDaService::with_finality(sequencer_address, 4, tmpdir.path()); da_service.set_wait_attempts(2); let _genesis_header = da_service.get_last_finalized_block_header().await.unwrap(); @@ -87,7 +87,7 @@ async fn test_instant_finality_data_stored() { let sequencer_address = MockAddress::new([11u8; 32]); let genesis_params = vec![1, 2, 3, 4, 5]; - let mut da_service = MockDaService::new(sequencer_address); + let mut da_service = MockDaService::new(sequencer_address, tmpdir.path()); da_service.set_wait_attempts(2); let _genesis_header = da_service.get_last_finalized_block_header().await.unwrap(); @@ -113,13 +113,14 @@ async fn test_instant_finality_data_stored() { } async fn runner_execution( - path: &std::path::Path, + storage_path: &std::path::Path, init_variant: MockInitVariant, da_service: MockDaService, ) -> ([u8; 32], [u8; 32]) { + let rollup_storage_path = storage_path.join("rollup").to_path_buf(); let rollup_config = RollupConfig:: { storage: StorageConfig { - path: path.to_path_buf(), + path: rollup_storage_path.clone(), }, rpc: RpcConfig { bind_host: "127.0.0.1".to_string(), @@ -133,6 +134,7 @@ async fn runner_execution( }), da: MockDaConfig { sender_address: da_service.get_sequencer_address(), + db_path: storage_path.join("da").to_path_buf(), }, public_keys: RollupPublicKeys { sequencer_public_key: vec![0u8; 32], @@ -141,12 +143,12 @@ async fn runner_execution( }, }; - let ledger_db = LedgerDB::with_path(path).unwrap(); + let ledger_db = LedgerDB::with_path(rollup_storage_path.clone()).unwrap(); let stf = HashStf::::new(); let storage_config = sov_state::config::Config { - path: rollup_config.storage.path.clone(), + path: rollup_storage_path, }; let mut storage_manager = ProverStorageManager::new(storage_config).unwrap();