From 7c3cda2072a44da79d4cc3ae9cc0cfc219be6e57 Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Wed, 24 Jul 2024 12:06:11 +0400 Subject: [PATCH] refactor!: remove mempool execute, add faucet component, remove create free test coins (#1082) Description --- - remove duplicate and incorrect state bootstrapping code in engine - remove CreateFreeTestCoins instruction - remove mempool transaction validation - add validation to consensus-level transaction executor - remove deferred decision (decision is now made until after execution and locking) - fixes for local-only-rule concurrent execution - fix(engine): fix incorrectly marking components as changed when no state change occurred - adds XtrFaucet builtin template - create funded XtrFaucet component in testnet bootstrap - update walletd to call XtrFaucet for free test coins - fix(consensus): only accept missing transactions if they were requested - fix(transaction-submitter): ignore race condition when retrieving tx results Motivation and Context --- CreateFreeTestCoins was a quick way to get test funds. However because it is the only valid transaction that required no inputs, a number of edge cases had to be handled. This PR replaces this with a faucet component that is bootstrapped with funds. This component is only created for non-mainnet networks. The mempool execution has a number of limitations e.g. cannot handle versionless inputs. Next steps: - fee claiming does not currently work. This is because we need a bunch of information from the L1/epoch manager to authorize a claim. We may need to rethink this e.g. at the end of epoch, we create/update an UnclaimedVnFee substate which can later be claimed/swept by validators. The "minting" of the validator fee substate may require co-operation from multiple shards. - multishard transactions only previously worked for versioned inputs. After this PR they will not work at all because they require a substate pledging protocol. This will be implemented in a subsequent PR. - there is a small chance that a substate may change shards as the version changes. This chance needs to be completely removed. The easiest way would be to simply ignore the last 8 (u32) bytes of a substate address when mapping to a shard, however that would leave many tiny "holes" in the shard space where state cannot exist. How Has This Been Tested? --- Existing tests. Manually: 5000 transaction stress test on single shard network What process can a PR reviewer use to test or verify this change? --- Run a stress test (remember to regenerate stress test transaction bin file with new XtrFaucet transaction) Observe that local-only transactions that share the XtrFaucet vault are executed in the same block Breaking Changes --- - [ ] None - [x] Requires data directory to be deleted - [x] Other - CreateFreeTestCoins is no longer valid, so all clients that submit these will have to change to use the XtrFaucet --- Cargo.lock | 4 +- .../implementation/manager.rs | 29 +- .../src/template_manager/interface/types.rs | 11 +- .../src/handlers/accounts.rs | 54 +- .../src/handlers/nfts.rs | 9 +- .../src/handlers/transaction.rs | 8 +- .../AssetVault/Components/ActionMenu.tsx | 2 +- .../src/routes/Onboarding/Onboarding.tsx | 2 +- .../src/routes/Wallet/Components/Accounts.tsx | 2 +- .../tari_indexer/src/dry_run/processor.rs | 16 +- .../tari_indexer/src/json_rpc/handlers.rs | 2 +- applications/tari_swarm_daemon/Cargo.toml | 1 + .../src/webserver/templates.rs | 5 + .../webui/src/routes/Main.tsx | 1021 +++++++++-------- applications/tari_validator_node/Cargo.toml | 2 + .../tari_validator_node/src/bootstrap.rs | 281 ++--- .../consensus/block_transaction_executor.rs | 168 +-- .../src/consensus/handle.rs | 17 +- .../src/consensus/metrics.rs | 1 - .../tari_validator_node/src/consensus/mod.rs | 113 +- .../tari_validator_node/src/consensus/spec.rs | 7 +- .../src/dry_run_transaction_processor.rs | 12 +- .../src/json_rpc/handlers.rs | 4 +- applications/tari_validator_node/src/lib.rs | 3 + .../src/p2p/services/mempool/error.rs | 58 +- .../src/p2p/services/mempool/executor.rs | 135 --- .../src/p2p/services/mempool/initializer.rs | 30 +- .../src/p2p/services/mempool/metrics.rs | 43 +- .../src/p2p/services/mempool/mod.rs | 3 - .../src/p2p/services/mempool/service.rs | 406 +------ .../validators/after/has_involved_shards.rs | 37 - .../services/mempool/validators/after/mod.rs | 7 - .../after/outputs_dont_exist_locally.rs | 51 - .../services/mempool/validators/and_then.rs | 33 - .../before/claim_fee_instructions.rs | 60 - .../mempool/validators/before/epoch_range.rs | 53 - .../mempool/validators/before/has_inputs.rs | 48 - .../p2p/services/mempool/validators/mod.rs | 45 - .../claim_fee_instructions.rs | 54 + .../src/transaction_validators/epoch_range.rs | 48 + .../src/transaction_validators/error.rs | 47 + .../before => transaction_validators}/fee.rs | 13 +- .../src/transaction_validators/has_inputs.rs | 37 + .../before => transaction_validators}/mod.rs | 9 +- .../signature.rs | 13 +- .../template_exists.rs | 13 +- .../transaction_validators/with_context.rs | 24 + .../tari_validator_node/src/validator.rs | 110 ++ bindings/src/types/Decision.ts | 2 +- bindings/src/types/ForeignProposal.ts | 2 +- bindings/src/types/Instruction.ts | 5 +- bindings/src/types/TransactionPoolRecord.ts | 9 +- .../common_types/src/substate_address.rs | 17 +- dan_layer/consensus/Cargo.toml | 1 - .../src/hotstuff/block_change_set.rs | 2 +- dan_layer/consensus/src/hotstuff/error.rs | 5 - dan_layer/consensus/src/hotstuff/mod.rs | 2 +- .../src/hotstuff/on_message_validate.rs | 265 ++--- .../consensus/src/hotstuff/on_propose.rs | 65 +- .../on_ready_to_vote_on_local_block.rs | 140 +-- .../hotstuff/on_receive_foreign_proposal.rs | 2 +- .../src/hotstuff/on_receive_local_proposal.rs | 56 +- .../hotstuff/on_receive_new_transaction.rs | 126 ++ ...on_receive_request_missing_transactions.rs | 7 +- .../on_receive_requested_transactions.rs | 43 - dan_layer/consensus/src/hotstuff/proposer.rs | 49 +- .../hotstuff/substate_store/pending_store.rs | 72 +- dan_layer/consensus/src/hotstuff/worker.rs | 221 ++-- dan_layer/consensus/src/messages/message.rs | 20 +- .../messages/request_missing_transaction.rs | 3 +- .../src/messages/requested_transaction.rs | 3 +- .../consensus/src/traits/substate_store.rs | 7 +- .../src/traits/transaction_executor.rs | 27 +- dan_layer/consensus_tests/src/consensus.rs | 136 ++- .../consensus_tests/src/substate_store.rs | 4 +- .../src/support/executions_store.rs | 4 +- .../consensus_tests/src/support/harness.rs | 85 +- .../consensus_tests/src/support/network.rs | 64 +- .../src/support/transaction.rs | 43 +- .../src/support/transaction_executor.rs | 29 +- .../src/support/validator/builder.rs | 36 +- .../src/support/validator/instance.rs | 12 +- dan_layer/engine/src/lib.rs | 5 - dan_layer/engine/src/runtime/error.rs | 6 +- dan_layer/engine/src/runtime/impl.rs | 29 +- dan_layer/engine/src/runtime/mod.rs | 9 +- dan_layer/engine/src/runtime/state_store.rs | 35 + dan_layer/engine/src/runtime/working_state.rs | 41 +- .../engine/src/{ => state_store}/bootstrap.rs | 28 +- dan_layer/engine/src/state_store/memory.rs | 2 +- dan_layer/engine/src/state_store/mod.rs | 2 + dan_layer/engine/src/transaction/processor.rs | 16 +- dan_layer/engine/tests/access_rules.rs | 2 +- dan_layer/engine/tests/confidential.rs | 17 +- dan_layer/engine/tests/shenanigans.rs | 4 +- dan_layer/engine/tests/test.rs | 47 +- dan_layer/engine_types/Cargo.toml | 4 +- dan_layer/engine_types/src/instruction.rs | 16 +- dan_layer/p2p/proto/consensus.proto | 23 +- dan_layer/p2p/proto/transaction.proto | 4 - dan_layer/p2p/src/conversions/consensus.rs | 42 +- dan_layer/p2p/src/conversions/transaction.rs | 23 +- .../up.sql | 10 +- dan_layer/state_store_sqlite/src/reader.rs | 38 +- dan_layer/state_store_sqlite/src/schema.rs | 4 +- .../src/sql_models/block_diff.rs | 56 +- .../src/sql_models/transaction.rs | 2 +- .../src/sql_models/transaction_pool.rs | 49 +- dan_layer/state_store_sqlite/src/writer.rs | 163 +-- dan_layer/state_store_sqlite/tests/tests.rs | 24 +- .../storage/src/consensus_models/block.rs | 4 +- .../src/consensus_models/block_diff.rs | 10 + .../storage/src/consensus_models/command.rs | 66 +- .../consensus_models/executed_transaction.rs | 10 +- .../storage/src/consensus_models/substate.rs | 7 +- .../src/consensus_models/transaction.rs | 51 +- .../consensus_models/transaction_decision.rs | 9 - .../consensus_models/transaction_execution.rs | 2 +- .../src/consensus_models/transaction_pool.rs | 120 +- dan_layer/storage/src/state_store/mod.rs | 14 +- dan_layer/template_builtin/build.rs | 2 +- dan_layer/template_builtin/src/lib.rs | 33 +- .../template_builtin/templates/.gitignore | 3 +- .../templates/faucet/Cargo.toml | 20 + .../templates/faucet/src/lib.rs | 34 + .../template_lib/src/auth/access_rules.rs | 10 +- dan_layer/template_lib/src/constants.rs | 12 +- .../src/models/confidential_proof.rs | 13 + dan_layer/template_test_tooling/src/lib.rs | 1 + .../src/template_test.rs | 18 +- dan_layer/transaction/src/substate.rs | 11 +- dan_layer/wallet/sdk/src/apis/substate.rs | 9 +- dan_layer/wallet/sdk/src/apis/transaction.rs | 15 +- integration_tests/src/wallet_daemon_cli.rs | 2 +- ...fees.feature => claim_fees.feature.ignore} | 3 + .../tests/features/transfer.feature | 48 +- networking/rpc_framework/src/client/mod.rs | 10 +- utilities/tariswap_test_bench/src/accounts.rs | 44 +- utilities/tariswap_test_bench/src/cli.rs | 3 + utilities/tariswap_test_bench/src/faucet.rs | 2 +- utilities/tariswap_test_bench/src/runner.rs | 7 +- .../tariswap_test_bench/src/templates.rs | 29 +- .../templates/faucet/Cargo.toml | 20 + .../templates/faucet/src/lib.rs | 55 + .../src/transaction_builders/free_coins.rs | 19 +- utilities/transaction_submitter/Cargo.toml | 1 + utilities/transaction_submitter/src/main.rs | 13 +- 147 files changed, 3002 insertions(+), 3144 deletions(-) delete mode 100644 applications/tari_validator_node/src/p2p/services/mempool/executor.rs delete mode 100644 applications/tari_validator_node/src/p2p/services/mempool/validators/after/has_involved_shards.rs delete mode 100644 applications/tari_validator_node/src/p2p/services/mempool/validators/after/mod.rs delete mode 100644 applications/tari_validator_node/src/p2p/services/mempool/validators/after/outputs_dont_exist_locally.rs delete mode 100644 applications/tari_validator_node/src/p2p/services/mempool/validators/and_then.rs delete mode 100644 applications/tari_validator_node/src/p2p/services/mempool/validators/before/claim_fee_instructions.rs delete mode 100644 applications/tari_validator_node/src/p2p/services/mempool/validators/before/epoch_range.rs delete mode 100644 applications/tari_validator_node/src/p2p/services/mempool/validators/before/has_inputs.rs delete mode 100644 applications/tari_validator_node/src/p2p/services/mempool/validators/mod.rs create mode 100644 applications/tari_validator_node/src/transaction_validators/claim_fee_instructions.rs create mode 100644 applications/tari_validator_node/src/transaction_validators/epoch_range.rs create mode 100644 applications/tari_validator_node/src/transaction_validators/error.rs rename applications/tari_validator_node/src/{p2p/services/mempool/validators/before => transaction_validators}/fee.rs (55%) create mode 100644 applications/tari_validator_node/src/transaction_validators/has_inputs.rs rename applications/tari_validator_node/src/{p2p/services/mempool/validators/before => transaction_validators}/mod.rs (72%) rename applications/tari_validator_node/src/{p2p/services/mempool/validators/before => transaction_validators}/signature.rs (64%) rename applications/tari_validator_node/src/{p2p/services/mempool/validators/before => transaction_validators}/template_exists.rs (78%) create mode 100644 applications/tari_validator_node/src/transaction_validators/with_context.rs create mode 100644 applications/tari_validator_node/src/validator.rs create mode 100644 dan_layer/consensus/src/hotstuff/on_receive_new_transaction.rs delete mode 100644 dan_layer/consensus/src/hotstuff/on_receive_requested_transactions.rs rename dan_layer/engine/src/{ => state_store}/bootstrap.rs (62%) create mode 100644 dan_layer/template_builtin/templates/faucet/Cargo.toml create mode 100644 dan_layer/template_builtin/templates/faucet/src/lib.rs rename integration_tests/tests/features/{claim_fees.feature => claim_fees.feature.ignore} (97%) create mode 100644 utilities/tariswap_test_bench/templates/faucet/Cargo.toml create mode 100644 utilities/tariswap_test_bench/templates/faucet/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 9c8fd8675..9fd3c88f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8761,7 +8761,6 @@ dependencies = [ "tari_dan_storage", "tari_engine_types", "tari_epoch_manager", - "tari_mmr", "tari_shutdown", "tari_state_tree", "tari_transaction", @@ -9736,6 +9735,7 @@ dependencies = [ "tari_common_types", "tari_core", "tari_crypto", + "tari_dan_engine", "tari_engine_types", "tari_shutdown", "tari_validator_node_client", @@ -9919,6 +9919,7 @@ dependencies = [ "tari_rpc_state_sync", "tari_shutdown", "tari_state_store_sqlite", + "tari_template_builtin", "tari_template_lib", "tari_transaction", "tari_validator_node_client", @@ -10632,6 +10633,7 @@ version = "0.7.0" dependencies = [ "anyhow", "clap 4.5.4", + "tari_dan_common_types", "tari_transaction", "tari_validator_node_client", "tokio", diff --git a/applications/tari_dan_app_utilities/src/template_manager/implementation/manager.rs b/applications/tari_dan_app_utilities/src/template_manager/implementation/manager.rs index 0727b62c7..82c6feee5 100644 --- a/applications/tari_dan_app_utilities/src/template_manager/implementation/manager.rs +++ b/applications/tari_dan_app_utilities/src/template_manager/implementation/manager.rs @@ -40,7 +40,12 @@ use tari_dan_engine::{ use tari_dan_storage::global::{DbTemplate, DbTemplateType, DbTemplateUpdate, GlobalDb, TemplateStatus}; use tari_dan_storage_sqlite::global::SqliteGlobalDbAdapter; use tari_engine_types::calculate_template_binary_hash; -use tari_template_builtin::{get_template_builtin, ACCOUNT_NFT_TEMPLATE_ADDRESS, ACCOUNT_TEMPLATE_ADDRESS}; +use tari_template_builtin::{ + get_template_builtin, + ACCOUNT_NFT_TEMPLATE_ADDRESS, + ACCOUNT_TEMPLATE_ADDRESS, + FAUCET_TEMPLATE_ADDRESS, +}; use tari_template_lib::models::TemplateAddress; use super::TemplateConfig; @@ -90,28 +95,28 @@ impl TemplateManager { fn load_builtin_templates() -> HashMap { // for now, we only load the "account" template - let mut builtin_templates = HashMap::new(); + let mut builtin_templates = HashMap::with_capacity(3); // get the builtin WASM code of the account template let compiled_code = get_template_builtin(&ACCOUNT_TEMPLATE_ADDRESS); - let template = Self::load_builtin_template("account", ACCOUNT_TEMPLATE_ADDRESS, compiled_code.to_vec()); + let template = Self::convert_code_to_template("Account", ACCOUNT_TEMPLATE_ADDRESS, compiled_code.to_vec()); builtin_templates.insert(ACCOUNT_TEMPLATE_ADDRESS, template); // get the builtin WASM code of the account nft template let compiled_code = get_template_builtin(&ACCOUNT_NFT_TEMPLATE_ADDRESS); - let template = Self::load_builtin_template("account_nft", ACCOUNT_NFT_TEMPLATE_ADDRESS, compiled_code.to_vec()); + let template = + Self::convert_code_to_template("AccountNft", ACCOUNT_NFT_TEMPLATE_ADDRESS, compiled_code.to_vec()); builtin_templates.insert(ACCOUNT_NFT_TEMPLATE_ADDRESS, template); + // get the builtin WASM code of the account nft template + let compiled_code = get_template_builtin(&FAUCET_TEMPLATE_ADDRESS); + let template = Self::convert_code_to_template("XtrFaucet", FAUCET_TEMPLATE_ADDRESS, compiled_code.to_vec()); + builtin_templates.insert(FAUCET_TEMPLATE_ADDRESS, template); + builtin_templates } - fn load_builtin_template(name: &str, address: TemplateAddress, compiled_code: Vec) -> Template { - let compiled_code_len = compiled_code.len(); - info!( - target: LOG_TARGET, - "Loading builtin {} template: {} bytes", name, compiled_code_len - ); - + fn convert_code_to_template(name: &str, address: TemplateAddress, compiled_code: Vec) -> Template { // build the template object of the account template let binary_sha = calculate_template_binary_hash(&compiled_code); Template { @@ -119,7 +124,7 @@ impl TemplateManager { name: name.to_string(), address, url: "".to_string(), - binary_sha: binary_sha.to_vec(), + binary_sha, height: 0, }, executable: TemplateExecutable::CompiledWasm(compiled_code), diff --git a/applications/tari_dan_app_utilities/src/template_manager/interface/types.rs b/applications/tari_dan_app_utilities/src/template_manager/interface/types.rs index aadf16bde..14120b900 100644 --- a/applications/tari_dan_app_utilities/src/template_manager/interface/types.rs +++ b/applications/tari_dan_app_utilities/src/template_manager/interface/types.rs @@ -20,6 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use tari_common_types::types::FixedHash; use tari_dan_storage::global::{DbTemplate, DbTemplateType}; use tari_template_lib::models::TemplateAddress; use tari_validator_node_client::types::TemplateAbi; @@ -34,7 +35,7 @@ pub struct TemplateMetadata { // this must be in the form of "https://example.com/my_template.wasm" pub url: String, /// SHA hash of binary - pub binary_sha: Vec, + pub binary_sha: FixedHash, /// Block height in which the template was published pub height: u64, } @@ -45,7 +46,9 @@ impl From for TemplateMetadata { name: reg.template_name, address: reg.template_address, url: reg.registration.binary_url.into_string(), - binary_sha: reg.registration.binary_sha.into_vec(), + binary_sha: FixedHash::try_from(reg.registration.binary_sha.into_vec()) + // TODO: impl Fallible conversion + .expect("binary_sha must be 32 bytes long"), height: reg.mined_height, } } @@ -58,7 +61,7 @@ impl From for TemplateMetadata { name: record.template_name, address: (*record.template_address).into(), url: record.url, - binary_sha: vec![], + binary_sha: FixedHash::zero(), height: record.height, } } @@ -87,7 +90,7 @@ impl From for Template { address: (*record.template_address).into(), url: record.url, // TODO: add field to db - binary_sha: vec![], + binary_sha: FixedHash::zero(), height: record.height, }, executable: match record.template_type { diff --git a/applications/tari_dan_wallet_daemon/src/handlers/accounts.rs b/applications/tari_dan_wallet_daemon/src/handlers/accounts.rs index 89172be39..973603156 100644 --- a/applications/tari_dan_wallet_daemon/src/handlers/accounts.rs +++ b/applications/tari_dan_wallet_daemon/src/handlers/accounts.rs @@ -17,7 +17,7 @@ use tari_dan_common_types::optional::Optional; use tari_dan_wallet_crypto::ConfidentialProofStatement; use tari_dan_wallet_sdk::{ apis::{confidential_transfer::TransferParams, jwt::JrpcPermission, key_manager, substate::ValidatorScanResult}, - models::{NewAccountInfo, VersionedSubstateId}, + models::NewAccountInfo, storage::WalletStore, DanWalletSdk, }; @@ -32,6 +32,7 @@ use tari_key_manager::key_manager::DerivedKey; use tari_template_builtin::ACCOUNT_TEMPLATE_ADDRESS; use tari_template_lib::{ args, + constants::{XTR_FAUCET_COMPONENT_ADDRESS, XTR_FAUCET_VAULT_ADDRESS}, models::{Amount, UnclaimedConfidentialOutputAddress}, prelude::CONFIDENTIAL_TARI_RESOURCE_ADDRESS, }; @@ -124,11 +125,7 @@ pub async fn handle_create( let transaction = Transaction::builder() .fee_transaction_pay_from_component(default_account.address.as_component_address().unwrap(), max_fee) .create_account(owner_pk.clone()) - .with_inputs( - inputs - .iter() - .map(|addr| SubstateRequirement::new(addr.substate_id.clone(), Some(addr.version))), - ) + .with_inputs(inputs) .sign(&signing_key.key) .build(); @@ -541,12 +538,8 @@ pub async fn handle_claim_burn( // Add all versioned account child addresses as inputs // add the commitment substate id as input to the claim burn transaction - let commitment_substate_address = VersionedSubstateId { - substate_id: SubstateId::UnclaimedConfidentialOutput(UnclaimedConfidentialOutputAddress::try_from( - commitment.as_slice(), - )?), - version: 0, - }; + let commitment_substate_address = + SubstateRequirement::unversioned(UnclaimedConfidentialOutputAddress::try_from(commitment.as_slice())?); inputs.push(commitment_substate_address.clone()); info!( @@ -561,7 +554,7 @@ pub async fn handle_claim_burn( .substate_api() .scan_for_substate( &commitment_substate_address.substate_id, - Some(commitment_substate_address.version), + commitment_substate_address.version, ) .await?; let output = output.into_unclaimed_confidential_output().unwrap(); @@ -648,7 +641,7 @@ async fn finish_claiming( account_address: SubstateId, new_account_name: Option, sdk: &DanWalletSdk, - mut inputs: Vec, + mut inputs: Vec, account_public_key: &RistrettoPublicKey, max_fee: Amount, account_secret_key: DerivedKey, @@ -670,7 +663,7 @@ async fn finish_claiming( if new_account_name.is_none() { // Add all versioned account child addresses as inputs unless the account is new let child_addresses = sdk.substate_api().load_dependent_substates(&[&account_address])?; - inputs.extend(child_addresses); + inputs.extend(child_addresses.into_iter().map(Into::into)); instructions.push(Instruction::CallMethod { component_address: account_component_address, method: "deposit".to_string(), @@ -687,9 +680,6 @@ async fn finish_claiming( method: "pay_fee".to_string(), args: args![max_fee], }); - let inputs = inputs - .into_iter() - .map(|s| SubstateRequirement::new(s.substate_id.clone(), Some(s.version))); let transaction = Transaction::builder() .with_fee_instructions(instructions) .with_inputs(inputs) @@ -727,7 +717,6 @@ async fn finish_claiming( } /// Mints free test coins into an account. If an account name is provided which does not exist, that account is created -#[allow(clippy::too_many_lines)] pub async fn handle_create_free_test_coins( context: &HandlerContext, token: Option, @@ -748,24 +737,21 @@ pub async fn handle_create_free_test_coins( return Err(invalid_params("fee", Some("cannot be negative"))); } - let mut inputs = vec![]; + let mut inputs = vec![ + SubstateRequirement::unversioned(XTR_FAUCET_COMPONENT_ADDRESS), + SubstateRequirement::unversioned(XTR_FAUCET_VAULT_ADDRESS), + ]; let accounts_api = sdk.accounts_api(); let (account_address, account_secret_key, new_account_name) = get_or_create_account(&account, &accounts_api, key_id, sdk, &mut inputs)?; let account_public_key = PublicKey::from_secret_key(&account_secret_key.key); - let output = sdk - .confidential_crypto_api() - .generate_output_for_dest(&account_public_key, amount)?; - - let instructions = vec![ - // TODO: We create double what is expected, amount confidential and amount revealed. Should let the caller - // specify these values separately. - Instruction::CreateFreeTestCoins { - revealed_amount: amount, - output: Some(output), - }, - ]; + + let instructions = vec![Instruction::CallMethod { + component_address: XTR_FAUCET_COMPONENT_ADDRESS, + method: "take".to_string(), + args: args![amount], + }]; // ------------------------------ let (tx_id, finalized) = finish_claiming( @@ -799,7 +785,7 @@ fn get_or_create_account( accounts_api: &tari_dan_wallet_sdk::apis::accounts::AccountsApi<'_, T>, key_id: Option, sdk: &DanWalletSdk, - inputs: &mut Vec, + inputs: &mut Vec, ) -> Result<(SubstateId, DerivedKey, Option), anyhow::Error> { let maybe_account = match account { Some(ref addr_or_name) => get_account(addr_or_name, accounts_api).optional()?, @@ -819,7 +805,7 @@ fn get_or_create_account( .key_manager_api() .derive_key(key_manager::TRANSACTION_BRANCH, key_index)?; let account_substate = sdk.substate_api().get_substate(&account.address)?; - inputs.push(account_substate.address); + inputs.push(account_substate.address.into()); (account.address, account_secret_key, None) }, diff --git a/applications/tari_dan_wallet_daemon/src/handlers/nfts.rs b/applications/tari_dan_wallet_daemon/src/handlers/nfts.rs index 6f3336854..c768a3a94 100644 --- a/applications/tari_dan_wallet_daemon/src/handlers/nfts.rs +++ b/applications/tari_dan_wallet_daemon/src/handlers/nfts.rs @@ -180,15 +180,11 @@ async fn mint_account_nft( let sdk = context.wallet_sdk(); sdk.jwt_api().check_auth(token, &[JrpcPermission::Admin])?; - let inputs = sdk + let mut inputs = sdk .substate_api() .locate_dependent_substates(&[account.address.clone()]) .await?; - let mut inputs = inputs - .iter() - .map(|v| SubstateRequirement::new(v.substate_id.clone(), Some(v.version))) - .collect::>(); inputs.extend([SubstateRequirement::new(SubstateId::Component(component_address), None)]); let instructions = vec![ @@ -252,9 +248,6 @@ async fn create_account_nft( .substate_api() .locate_dependent_substates(&[account.address.clone()]) .await?; - let inputs = inputs - .iter() - .map(|addr| SubstateRequirement::new(addr.substate_id.clone(), Some(addr.version))); let transaction = Transaction::builder() .fee_transaction_pay_from_component(account.address.as_component_address().unwrap(), fee) diff --git a/applications/tari_dan_wallet_daemon/src/handlers/transaction.rs b/applications/tari_dan_wallet_daemon/src/handlers/transaction.rs index ca079d91b..684a97813 100644 --- a/applications/tari_dan_wallet_daemon/src/handlers/transaction.rs +++ b/applications/tari_dan_wallet_daemon/src/handlers/transaction.rs @@ -121,13 +121,7 @@ pub async fn handle_submit( .unwrap_or(&req.fee_instructions), )?); let substates = substates.into_iter().collect::>(); - let loaded_dependent_substates = sdk - .substate_api() - .locate_dependent_substates(&substates) - .await? - .into_iter() - .map(Into::into) - .collect(); + let loaded_dependent_substates = sdk.substate_api().locate_dependent_substates(&substates).await?; [req.inputs, loaded_dependent_substates].concat() }; diff --git a/applications/tari_dan_wallet_web_ui/src/routes/AssetVault/Components/ActionMenu.tsx b/applications/tari_dan_wallet_web_ui/src/routes/AssetVault/Components/ActionMenu.tsx index be5d3762d..c89bedf60 100644 --- a/applications/tari_dan_wallet_web_ui/src/routes/AssetVault/Components/ActionMenu.tsx +++ b/applications/tari_dan_wallet_web_ui/src/routes/AssetVault/Components/ActionMenu.tsx @@ -37,7 +37,7 @@ function ActionMenu() { const onClaimFreeCoins = () => { mutate({ accountName: accountName, - amount: 100000, + amount: 200000, fee: 1000, }); }; diff --git a/applications/tari_dan_wallet_web_ui/src/routes/Onboarding/Onboarding.tsx b/applications/tari_dan_wallet_web_ui/src/routes/Onboarding/Onboarding.tsx index 89e14821e..379c9b930 100644 --- a/applications/tari_dan_wallet_web_ui/src/routes/Onboarding/Onboarding.tsx +++ b/applications/tari_dan_wallet_web_ui/src/routes/Onboarding/Onboarding.tsx @@ -45,7 +45,7 @@ function Onboarding() { mutate( { accountName: accountFormState.accountName, - amount: 100000, + amount: 200000, fee: 1000, }, { diff --git a/applications/tari_dan_wallet_web_ui/src/routes/Wallet/Components/Accounts.tsx b/applications/tari_dan_wallet_web_ui/src/routes/Wallet/Components/Accounts.tsx index 754ecbcea..9a07f2166 100644 --- a/applications/tari_dan_wallet_web_ui/src/routes/Wallet/Components/Accounts.tsx +++ b/applications/tari_dan_wallet_web_ui/src/routes/Wallet/Components/Accounts.tsx @@ -161,7 +161,7 @@ function Accounts() { const onClaimFreeCoins = async () => { await mutateCreateFeeTestCoins({ accountName: "TestAccount", - amount: 100000, + amount: 200000, fee: 1000, }); }; diff --git a/applications/tari_indexer/src/dry_run/processor.rs b/applications/tari_indexer/src/dry_run/processor.rs index de32a4026..8faeea7ae 100644 --- a/applications/tari_indexer/src/dry_run/processor.rs +++ b/applications/tari_indexer/src/dry_run/processor.rs @@ -29,11 +29,7 @@ use tari_dan_app_utilities::{ transaction_executor::{TariDanTransactionProcessor, TransactionExecutor as _}, }; use tari_dan_common_types::{Epoch, PeerAddress, SubstateAddress}; -use tari_dan_engine::{ - bootstrap_state, - fees::FeeTable, - state_store::{memory::MemoryStateStore, AtomicDb, StateWriter}, -}; +use tari_dan_engine::{fees::FeeTable, state_store::new_memory_store}; use tari_engine_types::{ commit_result::ExecuteResult, instruction::Instruction, @@ -115,7 +111,7 @@ where TSubstateCache: SubstateCache + 'static let virtual_substates = self.get_virtual_substates(&transaction, epoch).await?; - let state_store = new_state_store(); + let state_store = new_memory_store(); state_store.set_many(found_substates)?; // execute the payload in the WASM engine and return the result @@ -271,11 +267,3 @@ where TSubstateCache: SubstateCache + 'static Ok(virtual_substates) } } - -fn new_state_store() -> MemoryStateStore { - let state_store = MemoryStateStore::new(); - let mut tx = state_store.write_access().unwrap(); - bootstrap_state(&mut tx).unwrap(); - tx.commit().unwrap(); - state_store -} diff --git a/applications/tari_indexer/src/json_rpc/handlers.rs b/applications/tari_indexer/src/json_rpc/handlers.rs index 2e5074f7f..4921e94e0 100644 --- a/applications/tari_indexer/src/json_rpc/handlers.rs +++ b/applications/tari_indexer/src/json_rpc/handlers.rs @@ -617,7 +617,7 @@ impl JsonRpcHandlers { name: t.name, address: t.address, url: t.url, - binary_sha: to_hex(&t.binary_sha), + binary_sha: to_hex(t.binary_sha.as_slice()), height: t.height, }) .collect(), diff --git a/applications/tari_swarm_daemon/Cargo.toml b/applications/tari_swarm_daemon/Cargo.toml index 4f9697207..7712b35ea 100644 --- a/applications/tari_swarm_daemon/Cargo.toml +++ b/applications/tari_swarm_daemon/Cargo.toml @@ -18,6 +18,7 @@ tari_engine_types = { workspace = true } minotari_node_grpc_client = { workspace = true } minotari_wallet_grpc_client = { workspace = true } tari_validator_node_client = { workspace = true } +tari_dan_engine = { workspace = true } anyhow = { workspace = true } async-trait = { workspace = true } diff --git a/applications/tari_swarm_daemon/src/webserver/templates.rs b/applications/tari_swarm_daemon/src/webserver/templates.rs index 81e20289f..0d0329160 100644 --- a/applications/tari_swarm_daemon/src/webserver/templates.rs +++ b/applications/tari_swarm_daemon/src/webserver/templates.rs @@ -11,6 +11,7 @@ use axum::{ }; use log::{error, info}; use tari_crypto::tari_utilities::hex; +use tari_dan_engine::wasm::WasmModule; use tari_engine_types::calculate_template_binary_hash; use tokio::{fs, io::AsyncWriteExt}; use url::Url; @@ -32,6 +33,10 @@ pub async fn upload( let hash = calculate_template_binary_hash(&bytes); let dest_file = format!("{}-{}.wasm", slug(&name), hex::to_hex(hash.as_ref())); let dest_path = context.config().base_dir.join("templates").join(&dest_file); + + // Load the struct name from the wasm. + let loaded = WasmModule::load_template_from_code(&bytes).map_err(|e| UploadError::Other(e.into()))?; + let name = loaded.template_def().template_name().to_string(); let mut file = fs::File::create(dest_path).await?; file.write_all(&bytes).await?; info!("🌐 Upload template {} bytes", bytes.len()); diff --git a/applications/tari_swarm_daemon/webui/src/routes/Main.tsx b/applications/tari_swarm_daemon/webui/src/routes/Main.tsx index 415736ae8..c759a6cd0 100644 --- a/applications/tari_swarm_daemon/webui/src/routes/Main.tsx +++ b/applications/tari_swarm_daemon/webui/src/routes/Main.tsx @@ -1,565 +1,566 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -import {ChangeEvent, useEffect, useState} from "react"; -import {jsonRpc} from "../utils/json_rpc"; -import {ExecutedTransaction} from "../Types.ts"; +import { ChangeEvent, useEffect, useState } from "react"; +import { jsonRpc } from "../utils/json_rpc"; +import { ExecutedTransaction } from "../Types.ts"; enum Executable { - BaseNode = 1, - Wallet = 2, - Miner = 3, - ValidatorNode = 4, - Indexer = 5, - DanWallet = 6, - Templates = 7, + BaseNode = 1, + Wallet = 2, + Miner = 3, + ValidatorNode = 4, + Indexer = 5, + DanWallet = 6, + Templates = 7, } async function jsonRpc2(address: string, method: string, params: any = null) { - let id = 0; - id += 1; - const response = await fetch(address, { - method: "POST", - body: JSON.stringify({ - method: method, - jsonrpc: "2.0", - id: id, - params: params, - }), - headers: { - "Content-Type": "application/json", - }, - }); - const json = await response.json(); - if (json.error) { - throw json.error; - } - return json.result; + let id = 0; + id += 1; + const response = await fetch(address, { + method: "POST", + body: JSON.stringify({ + method: method, + jsonrpc: "2.0", + id: id, + params: params, + }), + headers: { + "Content-Type": "application/json", + }, + }); + const json = await response.json(); + if (json.error) { + throw json.error; + } + return json.result; } -function ExtraInfoVN({name, url, setRow, addTxToPool, autoRefresh, state, horizontal}: { - name: string, - url: string, - setRow: any, - addTxToPool: any, - autoRefresh: boolean, - state: any, - horizontal: boolean +function ExtraInfoVN({ name, url, setRow, addTxToPool, autoRefresh, state, horizontal }: { + name: string, + url: string, + setRow: any, + addTxToPool: any, + autoRefresh: boolean, + state: any, + horizontal: boolean }) { - const [bucket, setBucket] = useState(null); - const [epoch, setEpoch] = useState(null); - const [height, setHeight] = useState(null); - const [pool, setPool] = useState([]); - const [copied, setCopied] = useState(false); - const [missingTxStates, setMissingTxStates] = useState({}); // {tx_id: [vn1, vn2, ...]} - const [publicKey, setPublicKey] = useState(null); - const [peerId, setPeerId] = useState(null); - const [tick, setTick] = useState(0); - useEffect(() => { - if (autoRefresh) { - const timer = setInterval(() => { - setTick(tick + 1); - }, 1000); - return () => clearInterval(timer); - } - }, [tick, autoRefresh]); - useEffect(() => { - jsonRpc2(url, "get_epoch_manager_stats").then((resp) => { - setRow(resp.committee_shard.shard + 1); - setBucket(resp.committee_shard.shard); - setHeight(resp.current_block_height); - setEpoch(resp.current_epoch); - }).catch((resp) => { - console.error("err", resp); - }); - jsonRpc2(url, "get_tx_pool").then((resp) => { - setPool(resp.tx_pool); - addTxToPool(resp.tx_pool.map((tx: any) => tx.transaction.id).sort()); - }); - jsonRpc2(url, "get_identity").then((resp) => { - setPublicKey(resp.public_key); - setPeerId(resp.peer_id); - }); - let missing_tx = new Set(); - for (const k in state) { - if (k != name && state[k].length > 0) { - missing_tx = new Set([...missing_tx, ...state[k]]); - } - } - const my_txs = new Set(state[name]); - missing_tx = new Set([...missing_tx].filter((tx) => !my_txs.has(tx))); - const promises = Array.from(missing_tx).map((tx) => jsonRpc2(url, "get_transaction", [tx]) - .then((resp) => resp.transaction as ExecutedTransaction) - .catch((resp) => { - throw {resp, tx}; - })); - Promise.allSettled(promises).then((results) => { - const newState: Map = new Map(); - for (const result of results) { - if (result.status == "fulfilled") { - const tx = result.value; - newState.set(tx.transaction.id, { - known: true, - abort_details: tx.abort_details, - final_decision: tx.final_decision, - }); - } else { - newState.set(result.reason.tx, {known: false}); - } - } - if (JSON.stringify(newState) != JSON.stringify(missingTxStates)) { - setMissingTxStates(newState); - } - }); - // for (let tx of missing_tx) { - // jsonRpc2(url, "get_transaction", [tx]).then((resp) => { - // setMissingTxStates((state) => ({ ...state, [tx]: { known: true, abort_details: resp.transaction.abort_details, final_decision: resp.transaction.final_decision } })); - // // console.log(resp); - // }).catch((resp) => { setMissingTxStates((state) => ({ ...state, [tx]: { know: false } })); }); - // } - }, [tick, state]); - const shorten = (str: string) => { - if (str.length > 20) { - return str.slice(0, 3) + "..." + str.slice(-3); - } - return str; - }; - useEffect(() => { - if (copied) { - setTimeout(() => setCopied(false), 1000); - } - }, [copied]); - const copyToClipboard = (str: string) => { - setCopied(true); - navigator.clipboard.writeText(str); - }; - const showMissingTx = (missingTxStates: { [key: string]: any }) => { - if (Object.keys(missingTxStates).length == 0) { - return null; - } - return ( - <> -
-

Transaction from others TXs pools

-
- Tx Id - Known - Abort details - Final decision - {Object.keys(missingTxStates).map((tx) => { - const {known, abort_details, final_decision} = missingTxStates[tx]; - return ( - <> -
copyToClipboard(tx)}>{copied && "Copied" || shorten(tx)}
-
{known && "Yes" || "No"}
-
{abort_details || unknown}
-
{final_decision || unknown}
- - ); - })} -
- ); - }; - const showPool = (pool: Array) => { - if (pool.length == 0) { - return null; + const [bucket, setBucket] = useState(null); + const [epoch, setEpoch] = useState(null); + const [height, setHeight] = useState(null); + const [pool, setPool] = useState([]); + const [copied, setCopied] = useState(false); + const [missingTxStates, setMissingTxStates] = useState({}); // {tx_id: [vn1, vn2, ...]} + const [publicKey, setPublicKey] = useState(null); + const [peerId, setPeerId] = useState(null); + const [tick, setTick] = useState(0); + useEffect(() => { + if (autoRefresh) { + const timer = setInterval(() => { + setTick(tick + 1); + }, 1000); + return () => clearInterval(timer); + } + }, [tick, autoRefresh]); + useEffect(() => { + jsonRpc2(url, "get_epoch_manager_stats").then((resp) => { + setRow(resp.committee_shard.shard + 1); + setBucket(resp.committee_shard.shard); + setHeight(resp.current_block_height); + setEpoch(resp.current_epoch); + }).catch((resp) => { + console.error("err", resp); + }); + jsonRpc2(url, "get_tx_pool").then((resp) => { + setPool(resp.tx_pool); + addTxToPool(resp.tx_pool.filter((tx: any) => Boolean(tx?.transaction)).map((tx: any) => tx.transaction.id).sort()); + }); + jsonRpc2(url, "get_identity").then((resp) => { + setPublicKey(resp.public_key); + setPeerId(resp.peer_id); + }); + let missing_tx = new Set(); + for (const k in state) { + if (k != name && state[k].length > 0) { + missing_tx = new Set([...missing_tx, ...state[k]]); + } + } + const my_txs = new Set(state[name]); + missing_tx = new Set([...missing_tx].filter((tx) => !my_txs.has(tx))); + const promises = Array.from(missing_tx).map((tx) => jsonRpc2(url, "get_transaction", [tx]) + .then((resp) => resp.transaction as ExecutedTransaction) + .catch((resp) => { + throw { resp, tx }; + })); + Promise.allSettled(promises).then((results) => { + const newState: Map = new Map(); + for (const result of results) { + if (result.status == "fulfilled") { + const tx = result.value; + newState.set(tx.transaction.id, { + known: true, + abort_details: tx.abort_details, + final_decision: tx.final_decision, + }); + } else { + newState.set(result.reason.tx, { known: false }); } - return (<> -
-

Pool transaction

- - - - - - - - {pool.map(({atom}, i) => ( - - - - - - ))} -
Tx IdReadyDecisionStage
copyToClipboard(atom.id)}>{copied && "Copied" || shorten(atom.id)}{atom.is_ready && "Yes" || "No"}{atom.decision || "_"}{atom.stage}
- - ); - }; + } + if (JSON.stringify(newState) != JSON.stringify(missingTxStates)) { + setMissingTxStates(newState); + } + }); + // for (let tx of missing_tx) { + // jsonRpc2(url, "get_transaction", [tx]).then((resp) => { + // setMissingTxStates((state) => ({ ...state, [tx]: { known: true, abort_details: resp.transaction.abort_details, final_decision: resp.transaction.final_decision } })); + // // console.log(resp); + // }).catch((resp) => { setMissingTxStates((state) => ({ ...state, [tx]: { know: false } })); }); + // } + }, [tick, state]); + const shorten = (str: string) => { + if (str.length > 20) { + return str.slice(0, 3) + "..." + str.slice(-3); + } + return str; + }; + useEffect(() => { + if (copied) { + setTimeout(() => setCopied(false), 1000); + } + }, [copied]); + const copyToClipboard = (str: string) => { + setCopied(true); + navigator.clipboard.writeText(str); + }; + const showMissingTx = (missingTxStates: { [key: string]: any }) => { + if (Object.keys(missingTxStates).length == 0) { + return null; + } return ( -
-
-
-
Bucket
-
Height
-
Epoch
-
Public key
-
Peer id
-
{bucket}
-
{height}
-
{epoch}
-
{publicKey}
-
{peerId}
-
- {showPool(pool)} - {showMissingTx(missingTxStates)} + <> +
+

Transaction from others TXs pools

+
+ Tx Id + Known + Abort details + Final decision + {Object.keys(missingTxStates).map((tx) => { + const { known, abort_details, final_decision } = missingTxStates[tx]; + return ( + <> +
copyToClipboard(tx)}>{copied && "Copied" || shorten(tx)}
+
{known && "Yes" || "No"}
+
{abort_details || unknown}
+
{final_decision || unknown}
+ + ); + })}
+ ); + }; + const showPool = (pool: Array) => { + if (pool.length == 0) { + return null; + } + return (<> +
+

Pool transaction

+ + + + + + + + {pool.map((atom, i) => ( + + + + + + ))} +
Tx IdReadyDecisionStage
copyToClipboard(atom.transaction_id)}>{copied && "Copied" || shorten(atom.transaction_id)}{atom.is_ready && "Yes" || "No"}{atom.decision || "_"}{atom.stage}
+ ); + }; + return ( +
+
+
+
Bucket
+
Height
+
Epoch
+
Public key
+
Peer id
+
{bucket}
+
{height}
+
{epoch}
+
{publicKey}
+
{peerId}
+
+ {showPool(pool)} + {showMissingTx(missingTxStates)} +
+ ); } function ShowInfo(params: any) { - const { - children, - executable, - name, - node, - logs, - stdoutLogs, - showLogs, - autoRefresh, - updateState, - state, - horizontal, - onReload, - } = params; - const [row, setRow] = useState(1); - // const [unprocessedTx, setUnprocessedTx] = useState([]); - const nameInfo = name && ( -
-

-            Name
-            {name}
-        
- ); - const jrpcInfo = node?.jrpc && ( - - ); - const grpcInfo = node?.grpc && ( + const { + children, + executable, + name, + node, + logs, + stdoutLogs, + showLogs, + autoRefresh, + updateState, + state, + horizontal, + onReload, + } = params; + const [row, setRow] = useState(1); + // const [unprocessedTx, setUnprocessedTx] = useState([]); + const nameInfo = name && ( +
+

+      Name
+      {name}
+    
+ ); + const jrpcInfo = node?.jrpc && ( + + ); + const grpcInfo = node?.grpc && ( +
+ GRPC + {node.grpc} +
+ ); + const httpInfo = node?.web && ( +
+ HTTP + {node.web} +
+ ); + const logInfo = logs && ( + <> +
+ Logs
- GRPC - {node.grpc} + {logs?.map((e: any) => ( + + ))}
- ); - const httpInfo = node?.web && ( +
+
- HTTP - {node.web} -
- ); - const logInfo = logs && ( - <> -
- Logs -
- {logs?.map((e: any) => ( - - ))} -
+ {stdoutLogs?.map((e: any) => ( +
+ stdout
-
-
- {stdoutLogs?.map((e: any) => ( -
- stdout -
- ))} -
-
- - ); - const addTxToPool = (tx: any) => { - updateState({name: name, state: tx}); - }; + ))} +
+
+ + ); + const addTxToPool = (tx: any) => { + updateState({ name: name, state: tx }); + }; - const handleOnStart = () => { - jsonRpc("start", name).then(onReload); - }; + const handleOnStart = () => { + jsonRpc("start", name).then(onReload); + }; - const handleOnStop = () => { - jsonRpc("stop", name).then(onReload); - }; + const handleOnStop = () => { + jsonRpc("stop", name).then(onReload); + }; - const handleDeleteData = () => { - jsonRpc("delete_data", {name}).then(onReload); - }; + const handleDeleteData = () => { + jsonRpc("delete_data", { name }).then(onReload); + }; - return ( -
- {nameInfo} - {httpInfo} - {jrpcInfo} - {grpcInfo} - {showLogs && logInfo} - {executable === Executable.ValidatorNode && node?.jrpc && - { - if (new_row != row) setRow(new_row); - }} addTxToPool={addTxToPool} autoRefresh={autoRefresh} state={state} horizontal={horizontal}/>} - {executable !== Executable.Templates && - handleOnStart()} - onStop={() => handleOnStop()} - onDeleteData={() => handleDeleteData()} - />} - {children} -
- ); + return ( +
+ {nameInfo} + {httpInfo} + {jrpcInfo} + {grpcInfo} + {showLogs && logInfo} + {executable === Executable.ValidatorNode && node?.jrpc && + { + if (new_row != row) setRow(new_row); + }} addTxToPool={addTxToPool} autoRefresh={autoRefresh} state={state} horizontal={horizontal} />} + {executable !== Executable.Templates && + handleOnStart()} + onStop={() => handleOnStop()} + onDeleteData={() => handleDeleteData()} + />} + {children} +
+ ); } interface NodeControlsProps { - isRunning: boolean, - onStart: () => void; - onStop: () => void; - onDeleteData: () => void; + isRunning: boolean, + onStart: () => void; + onStop: () => void; + onDeleteData: () => void; } -function NodeControls({isRunning, onStart, onStop, onDeleteData}: NodeControlsProps) { - return <> - - - - ; +function NodeControls({ isRunning, onStart, onStop, onDeleteData }: NodeControlsProps) { + return <> + + + + ; } function ShowInfos(params: any) { - const {nodes, logs, stdoutLogs, name, showLogs, autoRefresh, horizontal, onReload} = params; - const [state, setState] = useState<{ [key: string]: any }>({}); - let executable: Executable; - switch (name) { - case "vn": - executable = Executable.ValidatorNode; - break; - case "dan": - executable = Executable.DanWallet; - break; - case "indexer": - executable = Executable.Indexer; - break; - default: - console.log(`Unknown name ${name}`); - break; + const { nodes, logs, stdoutLogs, name, showLogs, autoRefresh, horizontal, onReload } = params; + const [state, setState] = useState<{ [key: string]: any }>({}); + let executable: Executable; + switch (name) { + case "vn": + executable = Executable.ValidatorNode; + break; + case "dan": + executable = Executable.DanWallet; + break; + case "indexer": + executable = Executable.Indexer; + break; + default: + console.log(`Unknown name ${name}`); + break; + } + const updateState = (partial_state: { name: string, state: any }) => { + if (JSON.stringify(state[partial_state.name]) != JSON.stringify(partial_state.state)) { + setState((state) => ({ ...state, [partial_state.name]: partial_state.state })); } - const updateState = (partial_state: { name: string, state: any }) => { - if (JSON.stringify(state[partial_state.name]) != JSON.stringify(partial_state.state)) { - setState((state) => ({...state, [partial_state.name]: partial_state.state})); - } - }; - return ( -
- {Object.keys(nodes).map((index) => - )} -
- ); + }; + return ( +
+ {Object.keys(nodes).map((index) => + )} +
+ ); } export default function Main() { - const [vns, setVns] = useState({}); - const [danWallet, setDanWallets] = useState({}); - const [indexers, setIndexers] = useState({}); - const [node, setNode] = useState<{ grpc: any }>(); - const [wallet, _setWallet] = useState(); - const [logs, setLogs] = useState({}); - const [stdoutLogs, setStdoutLogs] = useState({}); - const [connectorSample, setConnectorSample] = useState(null); - const [selectedFile, setSelectedFile] = useState(null); - const [showLogs, setShowLogs] = useState(false); - const [autoRefresh, setAutoRefresh] = useState(true); - const [horizontal, setHorizontal] = useState(false); - const [instances, setInstances] = useState([]); + const [vns, setVns] = useState({}); + const [danWallet, setDanWallets] = useState({}); + const [indexers, setIndexers] = useState({}); + const [node, setNode] = useState<{ grpc: any }>(); + const [wallet, _setWallet] = useState(); + const [logs, setLogs] = useState({}); + const [stdoutLogs, setStdoutLogs] = useState({}); + const [connectorSample, setConnectorSample] = useState(null); + const [selectedFile, setSelectedFile] = useState(null); + const [showLogs, setShowLogs] = useState(false); + const [autoRefresh, setAutoRefresh] = useState(true); + const [horizontal, setHorizontal] = useState(false); + const [instances, setInstances] = useState([]); - const getInfo = () => { - jsonRpc("vns") + const getInfo = () => { + jsonRpc("vns") + .then((resp) => { + setVns(resp.nodes); + Object.keys(resp.nodes).map((index) => { + jsonRpc("get_logs", `vn ${index}`) .then((resp) => { - setVns(resp.nodes); - Object.keys(resp.nodes).map((index) => { - jsonRpc("get_logs", `vn ${index}`) - .then((resp) => { - setLogs((state: any) => ({...state, [`vn ${index}`]: resp})); - }) - .catch((error) => console.log(error)); - jsonRpc("get_stdout", `vn ${index}`) - .then((resp) => { - setStdoutLogs((state: any) => ({...state, [`vn ${index}`]: resp})); - }) - .catch((error) => console.log(error)); - }); + setLogs((state: any) => ({ ...state, [`vn ${index}`]: resp })); }) - .catch((error) => { - console.log(error); - }); - jsonRpc("dan_wallets") + .catch((error) => console.log(error)); + jsonRpc("get_stdout", `vn ${index}`) .then((resp) => { - setDanWallets(resp.nodes); - Object.keys(resp.nodes).map((index) => { - jsonRpc("get_logs", `dan ${index}`) - .then((resp) => { - setLogs((state: any) => ({...state, [`dan ${index}`]: resp})); - }) - .catch((error) => console.log(error)); - jsonRpc("get_stdout", `dan ${index}`) - .then((resp) => { - setStdoutLogs((state: any) => ({...state, [`dan ${index}`]: resp})); - }) - .catch((error) => console.log(error)); - }); + setStdoutLogs((state: any) => ({ ...state, [`vn ${index}`]: resp })); }) - .catch((error) => { - console.log(error); - }); - jsonRpc("indexers") + .catch((error) => console.log(error)); + }); + }) + .catch((error) => { + console.log(error); + }); + jsonRpc("dan_wallets") + .then((resp) => { + setDanWallets(resp.nodes); + Object.keys(resp.nodes).map((index) => { + jsonRpc("get_logs", `dan ${index}`) .then((resp) => { - setIndexers(resp.nodes); - Object.keys(resp.nodes).map((index) => { - jsonRpc("get_logs", `indexer ${index}`) - .then((resp) => { - setLogs((state: any) => ({...state, [`indexer ${index}`]: resp})); - }) - .catch((error) => console.log(error)); - jsonRpc("get_stdout", `indexer ${index}`) - .then((resp) => { - setStdoutLogs((state: any) => ({...state, [`indexer ${index}`]: resp})); - }) - .catch((error) => console.log(error)); - }); + setLogs((state: any) => ({ ...state, [`dan ${index}`]: resp })); }) - .catch((error) => { - console.log(error); - }); - jsonRpc("http_connector") + .catch((error) => console.log(error)); + jsonRpc("get_stdout", `dan ${index}`) .then((resp) => { - setConnectorSample(resp); + setStdoutLogs((state: any) => ({ ...state, [`dan ${index}`]: resp })); }) - .catch((error) => { - console.log(error); - }); - jsonRpc("get_logs", "node").then((resp) => { - setLogs((state: any) => ({...state, node: resp})); - }); - jsonRpc("get_logs", "wallet").then((resp) => { - setLogs((state: any) => ({...state, wallet: resp})); - }); - jsonRpc("get_logs", "miner").then((resp) => { - setLogs((state: any) => ({...state, miner: resp})); + .catch((error) => console.log(error)); }); - jsonRpc("get_stdout", "node").then((resp) => { - setStdoutLogs((state: any) => ({...state, node: resp})); - }); - jsonRpc("get_stdout", "wallet").then((resp) => { - setStdoutLogs((state: any) => ({...state, wallet: resp})); - }); - jsonRpc("get_stdout", "miner").then((resp) => { - setStdoutLogs((state: any) => ({...state, miner: resp})); + }) + .catch((error) => { + console.log(error); + }); + jsonRpc("indexers") + .then((resp) => { + setIndexers(resp.nodes); + Object.keys(resp.nodes).map((index) => { + jsonRpc("get_logs", `indexer ${index}`) + .then((resp) => { + setLogs((state: any) => ({ ...state, [`indexer ${index}`]: resp })); + }) + .catch((error) => console.log(error)); + jsonRpc("get_stdout", `indexer ${index}`) + .then((resp) => { + setStdoutLogs((state: any) => ({ ...state, [`indexer ${index}`]: resp })); + }) + .catch((error) => console.log(error)); }); - jsonRpc("grpc_node").then((resp) => setNode({grpc: resp})); - jsonRpc("list_instances", {by_type: null}).then(({instances}) => setInstances(instances)); - }; + }) + .catch((error) => { + console.log(error); + }); + jsonRpc("http_connector") + .then((resp) => { + setConnectorSample(resp); + }) + .catch((error) => { + console.log(error); + }); + jsonRpc("get_logs", "node").then((resp) => { + setLogs((state: any) => ({ ...state, node: resp })); + }); + jsonRpc("get_logs", "wallet").then((resp) => { + setLogs((state: any) => ({ ...state, wallet: resp })); + }); + jsonRpc("get_logs", "miner").then((resp) => { + setLogs((state: any) => ({ ...state, miner: resp })); + }); + jsonRpc("get_stdout", "node").then((resp) => { + setStdoutLogs((state: any) => ({ ...state, node: resp })); + }); + jsonRpc("get_stdout", "wallet").then((resp) => { + setStdoutLogs((state: any) => ({ ...state, wallet: resp })); + }); + jsonRpc("get_stdout", "miner").then((resp) => { + setStdoutLogs((state: any) => ({ ...state, miner: resp })); + }); + jsonRpc("grpc_node").then((resp) => setNode({ grpc: resp })); + jsonRpc("list_instances", { by_type: null }).then(({ instances }) => setInstances(instances)); + }; - useEffect(getInfo, []); + useEffect(getInfo, []); - const handleFileChange = (event: ChangeEvent) => { - const file = event.target.files?.item(0); - if (file) { - setSelectedFile(file); - } - }; + const handleFileChange = (event: ChangeEvent) => { + const file = event.target.files?.item(0); + if (file) { + setSelectedFile(file); + } + }; - const handleFileUpload = () => { - if (!selectedFile) { - return; - } - const address = import.meta.env.VITE_DAEMON_JRPC_ADDRESS || ""; //Current host - const formData = new FormData(); - formData.append("file", selectedFile); - fetch(`${address}/upload_template`, {method: "POST", body: formData}).then((resp) => { - console.log("resp", resp); - }); - }; - return ( -
- - - -
Base layer
-
- - - - - -
-
-
Validator Nodes
- -
-
-
Dan Wallets
- -
-
-
Indexers
- -
-
Templates
-
- - - - -
- {connectorSample && ( - - )} -
All Instances
-
- - - - - - - - - - {instances.filter((i: any) => i.is_running).map((instance: any, i: number) => - - - - )} - -
NamePortsBase Path
#{instance.id} {instance.name} ({instance.instance_type}){JSON.stringify(instance.ports)}{instance.base_path}
-
+ const handleFileUpload = () => { + if (!selectedFile) { + return; + } + const address = import.meta.env.VITE_DAEMON_JRPC_ADDRESS || ""; //Current host + const formData = new FormData(); + formData.append("file", selectedFile); + fetch(`${address}/upload_template`, { method: "POST", body: formData }).then((resp) => { + console.log("resp", resp); + }); + }; + return ( +
+ + + +
Base layer
+
+ + + + + +
+
+
Validator Nodes
+ +
+
+
Dan Wallets
+ +
+
+
Indexers
+ +
+
Templates
+
+ + + + +
+ {connectorSample && ( + - ); + )} +
All Instances
+
+ + + + + + + + + + {instances.filter((i: any) => i.is_running).map((instance: any, i: number) => + + + + )} + +
NamePortsBase Path
#{instance.id} {instance.name} ({instance.instance_type}){JSON.stringify(instance.ports)}{instance.base_path}
+
+
+ ); } diff --git a/applications/tari_validator_node/Cargo.toml b/applications/tari_validator_node/Cargo.toml index f30828688..b310030fa 100644 --- a/applications/tari_validator_node/Cargo.toml +++ b/applications/tari_validator_node/Cargo.toml @@ -41,6 +41,8 @@ tari_consensus = { workspace = true } tari_state_store_sqlite = { workspace = true } tari_networking = { workspace = true } tari_rpc_framework = { workspace = true } +tari_template_builtin = { workspace = true } + sqlite_message_logger = { workspace = true } libp2p = { workspace = true } diff --git a/applications/tari_validator_node/src/bootstrap.rs b/applications/tari_validator_node/src/bootstrap.rs index 9a65a1c87..3cdbb556b 100644 --- a/applications/tari_validator_node/src/bootstrap.rs +++ b/applications/tari_validator_node/src/bootstrap.rs @@ -30,6 +30,7 @@ use minotari_app_utilities::identity_management; use serde::Serialize; use sqlite_message_logger::SqliteMessageLogger; use tari_base_node_client::grpc::GrpcBaseNodeClient; +use tari_bor::cbor; use tari_common::{ configuration::Network, exit_codes::{ExitCode, ExitError}, @@ -48,11 +49,11 @@ use tari_dan_app_utilities::{ template_manager::{implementation::TemplateManager, interface::TemplateManagerHandle}, transaction_executor::TariDanTransactionProcessor, }; -use tari_dan_common_types::{shard::Shard, Epoch, NodeAddressable, NodeHeight, PeerAddress, SubstateAddress}; +use tari_dan_common_types::{shard::Shard, Epoch, NodeAddressable, NodeHeight, PeerAddress}; use tari_dan_engine::fees::FeeTable; use tari_dan_p2p::TariMessagingSpec; use tari_dan_storage::{ - consensus_models::{Block, BlockId, ExecutedTransaction, SubstateRecord}, + consensus_models::{Block, BlockId, SubstateRecord}, global::GlobalDb, StateStore, StateStoreReadTransaction, @@ -60,7 +61,13 @@ use tari_dan_storage::{ StorageError, }; use tari_dan_storage_sqlite::global::SqliteGlobalDbAdapter; -use tari_engine_types::{resource::Resource, substate::SubstateId}; +use tari_engine_types::{ + component::{ComponentBody, ComponentHeader}, + resource::Resource, + resource_container::ResourceContainer, + substate::{SubstateId, SubstateValue}, + vault::Vault, +}; use tari_epoch_manager::base_layer::{EpochManagerConfig, EpochManagerHandle}; use tari_indexer_lib::substate_scanner::SubstateScanner; use tari_networking::{MessagingMode, NetworkingHandle, RelayCircuitLimits, RelayReservationLimits, SwarmConfig}; @@ -69,12 +76,17 @@ use tari_shutdown::ShutdownSignal; use tari_state_store_sqlite::SqliteStateStore; use tari_template_lib::{ auth::ResourceAccessRules, - constants::{CONFIDENTIAL_TARI_RESOURCE_ADDRESS, PUBLIC_IDENTITY_RESOURCE_ADDRESS}, - models::Metadata, - prelude::{OwnerRule, ResourceType}, + constants::{ + CONFIDENTIAL_TARI_RESOURCE_ADDRESS, + PUBLIC_IDENTITY_RESOURCE_ADDRESS, + XTR_FAUCET_COMPONENT_ADDRESS, + XTR_FAUCET_VAULT_ADDRESS, + }, + models::{Amount, EntityId, Metadata}, + prelude::{ComponentAccessRules, OwnerRule, ResourceType}, resource::TOKEN_SYMBOL, }; -use tari_transaction::Transaction; +use tari_transaction::{Transaction, VersionedSubstateId}; use tari_validator_node_rpc::client::TariValidatorNodeRpcClientFactory; use tokio::{sync::mpsc, task::JoinHandle}; @@ -86,24 +98,13 @@ use crate::{ p2p::{ create_tari_validator_node_rpc_service, services::{ - mempool::{ - self, - ClaimFeeTransactionValidator, - EpochRangeValidator, - FeeTransactionValidator, - HasInputs, - HasInvolvedShards, - MempoolError, - MempoolHandle, - OutputsDontExistLocally, - TemplateExistsValidator, - TransactionSignatureValidator, - Validator, - }, + mempool::{self, MempoolHandle}, messaging::{ConsensusInboundMessaging, ConsensusOutboundMessaging, Gossip}, }, }, substate_resolver::TariSubstateResolver, + transaction_validators::{FeeTransactionValidator, HasInputs, TemplateExistsValidator, TransactionValidationError}, + validator::Validator, validator_registration_file::ValidatorRegistrationFile, virtual_substate::VirtualSubstateManager, ApplicationConfig, @@ -227,13 +228,8 @@ pub async fn spawn_services( per_log_cost: 1, } }; - let payload_processor = TariDanTransactionProcessor::new(config.network, template_manager.clone(), fee_table); - - let validator_node_client_factory = TariValidatorNodeRpcClientFactory::new(networking.clone()); - - // Consensus - let (tx_executed_transaction, rx_executed_transaction) = mpsc::channel(10); + // Messaging let local_address = PeerAddress::from(keypair.public_key().clone()); let (loopback_sender, loopback_receiver) = mpsc::unbounded_channel(); let inbound_messaging = ConsensusInboundMessaging::new( @@ -245,19 +241,26 @@ pub async fn spawn_services( let outbound_messaging = ConsensusOutboundMessaging::new(loopback_sender, networking.clone(), message_logger.clone()); - let transaction_executor = TariDanBlockTransactionExecutor::new(epoch_manager.clone(), payload_processor.clone()); + // Consensus + let payload_processor = TariDanTransactionProcessor::new(config.network, template_manager.clone(), fee_table); + let transaction_executor = TariDanBlockTransactionExecutor::new( + payload_processor.clone(), + consensus::create_transaction_validator(&config.validator_node, template_manager.clone()), + ); #[cfg(feature = "metrics")] let metrics = PrometheusConsensusMetrics::new(state_store.clone(), metrics_registry); #[cfg(not(feature = "metrics"))] let metrics = NoopHooks; - let (consensus_join_handle, consensus_handle, rx_consensus_to_mempool) = consensus::spawn( + let validator_node_client_factory = TariValidatorNodeRpcClientFactory::new(networking.clone()); + let signing_service = consensus::TariSignatureService::new(keypair.clone()); + let (consensus_join_handle, consensus_handle) = consensus::spawn( config.network, state_store.clone(), - keypair.clone(), + local_address, + signing_service, epoch_manager.clone(), - rx_executed_transaction, inbound_messaging, outbound_messaging.clone(), validator_node_client_factory.clone(), @@ -269,41 +272,13 @@ pub async fn spawn_services( .await; handles.push(consensus_join_handle); - // substate cache - let substate_cache_dir = config.common.base_path.join("substate_cache"); - let substate_cache = SubstateFileCache::new(substate_cache_dir) - .map_err(|e| ExitError::new(ExitCode::ConfigError, format!("Substate cache error: {}", e)))?; - - // Mempool - let virtual_substate_manager = VirtualSubstateManager::new(state_store.clone(), epoch_manager.clone()); - let scanner = SubstateScanner::new( - epoch_manager.clone(), - validator_node_client_factory.clone(), - substate_cache, - ); - let substate_resolver = TariSubstateResolver::new( - state_store.clone(), - scanner, - epoch_manager.clone(), - virtual_substate_manager.clone(), - ); - let gossip = Gossip::new(networking.clone(), rx_gossip_messages); let (mempool, join_handle) = mempool::spawn( gossip, - tx_executed_transaction, epoch_manager.clone(), - payload_processor.clone(), - substate_resolver.clone(), - create_mempool_before_execute_validator( - &config.validator_node, - template_manager.clone(), - epoch_manager.clone(), - ), - create_mempool_after_execute_validator(state_store.clone()), + create_mempool_transaction_validator(&config.validator_node, template_manager.clone()), state_store.clone(), - rx_consensus_to_mempool, consensus_handle.clone(), #[cfg(feature = "metrics")] metrics_registry, @@ -328,6 +303,25 @@ pub async fn spawn_services( ); handles.push(join_handle); + // substate cache + let substate_cache_dir = config.common.base_path.join("substate_cache"); + let substate_cache = SubstateFileCache::new(substate_cache_dir) + .map_err(|e| ExitError::new(ExitCode::ConfigError, format!("Substate cache error: {}", e)))?; + + // Dry-run services (TODO: should we implement dry-run on validator nodes, or just keep it in the indexer?) + let virtual_substate_manager = VirtualSubstateManager::new(state_store.clone(), epoch_manager.clone()); + let scanner = SubstateScanner::new( + epoch_manager.clone(), + validator_node_client_factory.clone(), + substate_cache, + ); + let substate_resolver = TariSubstateResolver::new( + state_store.clone(), + scanner, + epoch_manager.clone(), + virtual_substate_manager.clone(), + ); + spawn_p2p_rpc( config, &mut networking, @@ -447,88 +441,111 @@ async fn spawn_p2p_rpc( Ok(()) } -// TODO: Figure out the best way to have the engine shard store mirror these bootstrapped states. fn bootstrap_state(tx: &mut TTx, network: Network) -> Result<(), StorageError> where TTx: StateStoreWriteTransaction + Deref, TTx::Target: StateStoreReadTransaction, TTx::Addr: NodeAddressable + Serialize, { - let genesis_block = Block::genesis(network, Epoch(0), Shard::zero()); - let substate_id = SubstateId::Resource(PUBLIC_IDENTITY_RESOURCE_ADDRESS); - let substate_address = SubstateAddress::from_substate_id(&substate_id, 0); - let mut metadata: Metadata = Default::default(); - metadata.insert(TOKEN_SYMBOL, "ID".to_string()); - if !SubstateRecord::exists(&**tx, &substate_address)? { - // Create the resource for public identity - SubstateRecord { - substate_id, - version: 0, - substate_value: Resource::new( - ResourceType::NonFungible, - None, - OwnerRule::None, - ResourceAccessRules::new(), - metadata, - None, - None, - ) - .into(), - state_hash: Default::default(), - created_by_transaction: Default::default(), - created_justify: *genesis_block.justify().id(), - created_block: BlockId::zero(), - created_height: NodeHeight(0), - created_by_shard: Shard::zero(), - created_at_epoch: Epoch(0), - destroyed: None, - } - .create(tx)?; + // Assume that if the public identity resource exists, then the rest of the state has been bootstrapped + if SubstateRecord::exists( + &**tx, + &VersionedSubstateId::new(PUBLIC_IDENTITY_RESOURCE_ADDRESS.into(), 0), + )? { + return Ok(()); } - let substate_id = SubstateId::Resource(CONFIDENTIAL_TARI_RESOURCE_ADDRESS); - let substate_address = SubstateAddress::from_substate_id(&substate_id, 0); - let mut metadata = Metadata::new(); - metadata.insert(TOKEN_SYMBOL, "tXTR".to_string()); - if !SubstateRecord::exists(&**tx, &substate_address)? { - SubstateRecord { - substate_id, - version: 0, - substate_value: Resource::new( - ResourceType::Confidential, - None, - OwnerRule::None, - ResourceAccessRules::new(), - metadata, - None, - None, - ) - .into(), - state_hash: Default::default(), - created_by_transaction: Default::default(), - created_justify: *genesis_block.justify().id(), - created_block: BlockId::zero(), - created_height: NodeHeight(0), - created_at_epoch: Epoch(0), - created_by_shard: Shard::zero(), - destroyed: None, - } - .create(tx)?; + let value = Resource::new( + ResourceType::NonFungible, + None, + OwnerRule::None, + ResourceAccessRules::new(), + Metadata::from([(TOKEN_SYMBOL, "ID".to_string())]), + None, + None, + ); + create_substate(tx, network, PUBLIC_IDENTITY_RESOURCE_ADDRESS, value)?; + + let mut xtr_resource = Resource::new( + ResourceType::Confidential, + None, + OwnerRule::None, + ResourceAccessRules::new(), + Metadata::from([(TOKEN_SYMBOL, "XTR".to_string())]), + None, + None, + ); + + // Create faucet component + if !matches!(network, Network::MainNet) { + let value = ComponentHeader { + template_address: tari_template_builtin::FAUCET_TEMPLATE_ADDRESS, + module_name: "XtrFaucet".to_string(), + owner_key: None, + owner_rule: OwnerRule::None, + access_rules: ComponentAccessRules::allow_all(), + entity_id: EntityId::default(), + body: ComponentBody { + state: cbor!({"vault" => XTR_FAUCET_VAULT_ADDRESS}).unwrap(), + }, + }; + create_substate(tx, network, XTR_FAUCET_COMPONENT_ADDRESS, value)?; + + xtr_resource.increase_total_supply(Amount::MAX); + let value = Vault::new(ResourceContainer::Confidential { + address: CONFIDENTIAL_TARI_RESOURCE_ADDRESS, + commitments: Default::default(), + revealed_amount: Amount::MAX, + locked_commitments: Default::default(), + locked_revealed_amount: Default::default(), + }); + + create_substate(tx, network, XTR_FAUCET_VAULT_ADDRESS, value)?; } + create_substate(tx, network, CONFIDENTIAL_TARI_RESOURCE_ADDRESS, xtr_resource)?; + Ok(()) } -fn create_mempool_before_execute_validator( +fn create_substate( + tx: &mut TTx, + network: Network, + substate_id: TId, + value: TVal, +) -> Result<(), StorageError> +where + TTx: StateStoreWriteTransaction + Deref, + TTx::Target: StateStoreReadTransaction, + TTx::Addr: NodeAddressable + Serialize, + TId: Into, + TVal: Into, +{ + let genesis_block = Block::genesis(network, Epoch(0), Shard::zero()); + let substate_id = substate_id.into(); + let id = VersionedSubstateId::new(substate_id, 0); + SubstateRecord { + substate_id: id.substate_id, + version: id.version, + substate_value: value.into(), + state_hash: Default::default(), + created_by_transaction: Default::default(), + created_justify: *genesis_block.justify().id(), + created_block: BlockId::zero(), + created_height: NodeHeight(0), + created_by_shard: Shard::zero(), + created_at_epoch: Epoch(0), + destroyed: None, + } + .create(tx)?; + Ok(()) +} + +fn create_mempool_transaction_validator( config: &ValidatorNodeConfig, template_manager: TemplateManager, - epoch_manager: EpochManagerHandle, -) -> impl Validator { - let mut validator = TransactionSignatureValidator - .and_then(TemplateExistsValidator::new(template_manager)) - .and_then(EpochRangeValidator::new(epoch_manager.clone())) - .and_then(ClaimFeeTransactionValidator::new(epoch_manager)) - .boxed(); +) -> impl Validator { + let mut validator = TemplateExistsValidator::new(template_manager).boxed(); if !config.no_fees { // A transaction without fee payment may have 0 inputs. validator = HasInputs::new() @@ -539,8 +556,8 @@ fn create_mempool_before_execute_validator( validator } -fn create_mempool_after_execute_validator( - store: SqliteStateStore, -) -> impl Validator { - HasInvolvedShards::new().and_then(OutputsDontExistLocally::new(store)) -} +// fn create_mempool_after_execute_validator( +// store: SqliteStateStore, +// ) -> impl MempoolValidator { +// HasInvolvedShards::new().and_then(OutputsDontExistLocally::new(store)) +// } diff --git a/applications/tari_validator_node/src/consensus/block_transaction_executor.rs b/applications/tari_validator_node/src/consensus/block_transaction_executor.rs index a4cd2793a..a7cd42bab 100644 --- a/applications/tari_validator_node/src/consensus/block_transaction_executor.rs +++ b/applications/tari_validator_node/src/consensus/block_transaction_executor.rs @@ -1,6 +1,8 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause +use std::sync::Arc; + use indexmap::IndexMap; use log::info; use tari_consensus::{ @@ -8,79 +10,33 @@ use tari_consensus::{ traits::{BlockTransactionExecutor, BlockTransactionExecutorError, ReadableSubstateStore}, }; use tari_dan_app_utilities::transaction_executor::TransactionExecutor; -use tari_dan_common_types::optional::Optional; -use tari_dan_engine::{ - bootstrap_state, - state_store::{memory::MemoryStateStore, AtomicDb, StateWriter}, +use tari_dan_common_types::{optional::Optional, Epoch}; +use tari_dan_engine::state_store::{memory::MemoryStateStore, new_memory_store, AtomicDb, StateWriter}; +use tari_dan_storage::{ + consensus_models::{ExecutedTransaction, TransactionRecord}, + StateStore, }; -use tari_dan_storage::{consensus_models::ExecutedTransaction, StateStore}; use tari_engine_types::{ substate::{Substate, SubstateId}, - virtual_substate::VirtualSubstates, + virtual_substate::{VirtualSubstate, VirtualSubstateId, VirtualSubstates}, }; use tari_transaction::{Transaction, VersionedSubstateId}; +use crate::{transaction_validators::TransactionValidationError, validator::Validator}; + const LOG_TARGET: &str = "tari::dan::consensus::hotstuff::block_transaction_executor"; -#[derive(Debug, Clone)] -pub struct TariDanBlockTransactionExecutor { - // TODO: we will need the epoch manager for virtual substates and other operations in the future - #[allow(dead_code)] - epoch_manager: TEpochManager, +#[derive(Debug)] +pub struct TariDanBlockTransactionExecutor { executor: TExecutor, + validator: Arc, } -impl BlockTransactionExecutor - for TariDanBlockTransactionExecutor -where - TStateStore: StateStore, - TExecutor: TransactionExecutor, -{ - fn execute( - &self, - transaction: Transaction, - store: &PendingSubstateStore, - ) -> Result { - let id: tari_transaction::TransactionId = *transaction.id(); - - // Get the latest input substates - let inputs = self.resolve_substates::(&transaction, store)?; - info!(target: LOG_TARGET, "Transaction {} executing. Inputs: {:?}", id, inputs); - - // Create a memory db with all the input substates, needed for the transaction execution - let state_db = Self::new_state_db(); - self.add_substates_to_memory_db(&inputs, &state_db)?; - - // TODO: create the virtual substates for execution - let virtual_substates = VirtualSubstates::new(); - - // Execute the transaction and get the result - let exec_output = self - .executor - .execute(transaction, state_db, virtual_substates) - .map_err(|e| BlockTransactionExecutorError::ExecutionThreadFailure(e.to_string()))?; - - // Generate the resolved inputs to set the specific version and required lock flag, as we know it after - // execution - let resolved_inputs = exec_output.resolve_inputs(inputs); - - let executed = ExecutedTransaction::new( - exec_output.transaction, - exec_output.result, - resolved_inputs, - exec_output.outputs, - exec_output.execution_time, - ); - info!(target: LOG_TARGET, "Transaction {} executed. {}", id,executed.result().finalize.result); - Ok(executed) - } -} - -impl TariDanBlockTransactionExecutor { - pub fn new(epoch_manager: TEpochManager, executor: TExecutor) -> Self { +impl TariDanBlockTransactionExecutor { + pub fn new(executor: TExecutor, validator: TValidator) -> Self { Self { - epoch_manager, executor, + validator: Arc::new(validator), } } @@ -95,7 +51,7 @@ impl TariDanBlockTransactionExecutor { let id = VersionedSubstateId::new(input.substate_id, version); - let substate = store.get(&id.to_substate_address())?; + let substate = store.get(&id)?; info!(target: LOG_TARGET, "Resolved substate: {id}"); resolved_substates.insert(id, substate); }, @@ -146,14 +102,88 @@ impl TariDanBlockTransactionExecutor BlockTransactionExecutor + for TariDanBlockTransactionExecutor +where + TStateStore: StateStore, + TExecutor: TransactionExecutor, + for<'a> TValidator: Validator, +{ + fn validate( + &self, + _tx: &TStateStore::ReadTransaction<'_>, + current_epoch: Epoch, + transaction: &Transaction, + ) -> Result<(), BlockTransactionExecutorError> { + self.validator + .validate(&ValidationContext { current_epoch }, transaction) + // TODO: see if we can avoid the err as string + .map_err(|e| BlockTransactionExecutorError::TransactionValidationError(e.to_string())) + } - fn new_state_db() -> MemoryStateStore { - let state_db = MemoryStateStore::new(); - // unwrap: Memory state store is infallible - let mut tx = state_db.write_access().unwrap(); - // Add bootstrapped substates - bootstrap_state(&mut tx).unwrap(); - tx.commit().unwrap(); - state_db + fn prepare( + &self, + transaction: Transaction, + store: &TStateStore, + ) -> Result { + let t = store.with_read_tx(|tx| TransactionRecord::get(tx, transaction.id()))?; + Ok(t) } + + fn execute( + &self, + transaction: Transaction, + store: &PendingSubstateStore, + current_epoch: Epoch, + ) -> Result { + let id: tari_transaction::TransactionId = *transaction.id(); + + // Get the latest input substates + let inputs = self.resolve_substates::(&transaction, store)?; + info!(target: LOG_TARGET, "Transaction {} executing. Inputs: {:?}", id, inputs); + + // Create a memory db with all the input substates, needed for the transaction execution + let state_db = new_memory_store(); + self.add_substates_to_memory_db(&inputs, &state_db)?; + + let mut virtual_substates = VirtualSubstates::new(); + virtual_substates.insert( + VirtualSubstateId::CurrentEpoch, + VirtualSubstate::CurrentEpoch(current_epoch.as_u64()), + ); + + // Execute the transaction and get the result + let exec_output = self + .executor + .execute(transaction, state_db, virtual_substates) + .map_err(|e| BlockTransactionExecutorError::ExecutionThreadFailure(e.to_string()))?; + + // Generate the resolved inputs to set the specific version and required lock flag, as we know it after + // execution + let resolved_inputs = exec_output.resolve_inputs(inputs); + + let executed = ExecutedTransaction::new( + exec_output.transaction, + exec_output.result, + resolved_inputs, + exec_output.outputs, + exec_output.execution_time, + ); + info!(target: LOG_TARGET, "Transaction {} executed. {}", id,executed.result().finalize.result); + Ok(executed) + } +} + +impl Clone for TariDanBlockTransactionExecutor { + fn clone(&self) -> Self { + Self { + executor: self.executor.clone(), + validator: self.validator.clone(), + } + } +} +pub struct ValidationContext { + pub current_epoch: Epoch, } diff --git a/applications/tari_validator_node/src/consensus/handle.rs b/applications/tari_validator_node/src/consensus/handle.rs index 82db12b24..1acfc8f85 100644 --- a/applications/tari_validator_node/src/consensus/handle.rs +++ b/applications/tari_validator_node/src/consensus/handle.rs @@ -2,7 +2,8 @@ // SPDX-License-Identifier: BSD-3-Clause use tari_consensus::hotstuff::{ConsensusCurrentState, CurrentView, HotstuffEvent}; -use tokio::sync::{broadcast, watch}; +use tari_transaction::Transaction; +use tokio::sync::{broadcast, mpsc, watch}; use crate::event_subscription::EventSubscription; @@ -11,6 +12,7 @@ pub struct ConsensusHandle { rx_current_state: watch::Receiver, events_subscription: EventSubscription, current_view: CurrentView, + tx_new_transaction: mpsc::Sender<(Transaction, usize)>, } impl ConsensusHandle { @@ -18,14 +20,27 @@ impl ConsensusHandle { rx_current_state: watch::Receiver, events_subscription: EventSubscription, current_view: CurrentView, + tx_new_transaction: mpsc::Sender<(Transaction, usize)>, ) -> Self { Self { rx_current_state, events_subscription, current_view, + tx_new_transaction, } } + pub async fn notify_new_transaction( + &self, + transaction: Transaction, + num_pending: usize, + ) -> Result<(), mpsc::error::SendError<()>> { + self.tx_new_transaction + .send((transaction, num_pending)) + .await + .map_err(|_| mpsc::error::SendError(())) + } + pub fn current_view(&self) -> &CurrentView { &self.current_view } diff --git a/applications/tari_validator_node/src/consensus/metrics.rs b/applications/tari_validator_node/src/consensus/metrics.rs index 9d9cddd9d..990cc9092 100644 --- a/applications/tari_validator_node/src/consensus/metrics.rs +++ b/applications/tari_validator_node/src/consensus/metrics.rs @@ -196,7 +196,6 @@ impl ConsensusHooks for PrometheusConsensusMetrics { Decision::Abort => { self.transactions_finalized_aborted.inc(); }, - Decision::Deferred => {}, } } } diff --git a/applications/tari_validator_node/src/consensus/mod.rs b/applications/tari_validator_node/src/consensus/mod.rs index fd16a42a7..635286a4e 100644 --- a/applications/tari_validator_node/src/consensus/mod.rs +++ b/applications/tari_validator_node/src/consensus/mod.rs @@ -1,13 +1,24 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause +use sqlite_message_logger::SqliteMessageLogger; use tari_common::configuration::Network; -use tari_consensus::hotstuff::{ConsensusWorker, ConsensusWorkerContext, HotstuffConfig, HotstuffWorker}; +use tari_consensus::{ + hotstuff::{ConsensusWorker, ConsensusWorkerContext, HotstuffConfig, HotstuffWorker}, + traits::ConsensusSpec, +}; +use tari_dan_app_utilities::{ + consensus_constants::ConsensusConstants, + template_manager::implementation::TemplateManager, + transaction_executor::TariDanTransactionProcessor, +}; +use tari_dan_common_types::PeerAddress; use tari_dan_storage::consensus_models::TransactionPool; use tari_epoch_manager::base_layer::EpochManagerHandle; +use tari_rpc_state_sync::RpcStateSyncManager; use tari_shutdown::ShutdownSignal; use tari_state_store_sqlite::SqliteStateStore; -use tari_transaction::{Transaction, TransactionId}; +use tari_transaction::Transaction; use tari_validator_node_rpc::client::TariValidatorNodeRpcClientFactory; use tokio::{ sync::{broadcast, mpsc, watch}, @@ -15,12 +26,20 @@ use tokio::{ }; use crate::{ - consensus::{ - leader_selection::RoundRobinLeaderStrategy, - signature_service::TariSignatureService, - spec::TariConsensusSpec, - }, + consensus::{leader_selection::RoundRobinLeaderStrategy, spec::TariConsensusSpec}, event_subscription::EventSubscription, + p2p::services::messaging::{ConsensusInboundMessaging, ConsensusOutboundMessaging}, + transaction_validators::{ + ClaimFeeTransactionValidator, + EpochRangeValidator, + FeeTransactionValidator, + HasInputs, + TemplateExistsValidator, + TransactionSignatureValidator, + TransactionValidationError, + }, + validator::{BoxedValidator, Validator}, + ValidatorNodeConfig, }; mod block_transaction_executor; @@ -31,52 +50,39 @@ pub mod metrics; mod signature_service; mod spec; -pub use block_transaction_executor::TariDanBlockTransactionExecutor; +pub use block_transaction_executor::*; pub use handle::*; -use sqlite_message_logger::SqliteMessageLogger; -use tari_consensus::traits::ConsensusSpec; -use tari_dan_app_utilities::{ - consensus_constants::ConsensusConstants, - keypair::RistrettoKeypair, - template_manager::implementation::TemplateManager, - transaction_executor::TariDanTransactionProcessor, -}; -use tari_dan_common_types::PeerAddress; -use tari_rpc_state_sync::RpcStateSyncManager; +pub use signature_service::*; + +use crate::transaction_validators::WithContext; -use crate::p2p::services::messaging::{ConsensusInboundMessaging, ConsensusOutboundMessaging}; +pub type ConsensusTransactionValidator = BoxedValidator; pub async fn spawn( network: Network, store: SqliteStateStore, - keypair: RistrettoKeypair, + local_addr: PeerAddress, + signing_service: TariSignatureService, epoch_manager: EpochManagerHandle, - rx_new_transactions: mpsc::Receiver<(TransactionId, usize)>, inbound_messaging: ConsensusInboundMessaging, outbound_messaging: ConsensusOutboundMessaging, client_factory: TariValidatorNodeRpcClientFactory, hooks: ::Hooks, shutdown_signal: ShutdownSignal, transaction_executor: TariDanBlockTransactionExecutor< - EpochManagerHandle, TariDanTransactionProcessor>, + ConsensusTransactionValidator, >, consensus_constants: ConsensusConstants, -) -> ( - JoinHandle>, - ConsensusHandle, - mpsc::UnboundedReceiver, -) { - let (tx_mempool, rx_mempool) = mpsc::unbounded_channel(); +) -> (JoinHandle>, ConsensusHandle) { + let (tx_new_transaction, rx_new_transactions) = mpsc::channel(10); - let validator_addr = PeerAddress::from(keypair.public_key().clone()); - let signing_service = TariSignatureService::new(keypair); let leader_strategy = RoundRobinLeaderStrategy::new(); let transaction_pool = TransactionPool::new(); let (tx_hotstuff_events, _) = broadcast::channel(100); let hotstuff_worker = HotstuffWorker::::new( - validator_addr, + local_addr, network, inbound_messaging, outbound_messaging, @@ -88,7 +94,6 @@ pub async fn spawn( transaction_pool, transaction_executor, tx_hotstuff_events.clone(), - tx_mempool, hooks, shutdown_signal.clone(), HotstuffConfig { @@ -106,15 +111,39 @@ pub async fn spawn( tx_current_state, }; - let handle = ConsensusWorker::new(shutdown_signal).spawn(context); + let join_handle = ConsensusWorker::new(shutdown_signal).spawn(context); + + let consensus_handle = ConsensusHandle::new( + rx_current_state, + EventSubscription::new(tx_hotstuff_events), + current_view, + tx_new_transaction, + ); + + (join_handle, consensus_handle) +} - ( - handle, - ConsensusHandle::new( - rx_current_state, - EventSubscription::new(tx_hotstuff_events), - current_view, - ), - rx_mempool, - ) +pub fn create_transaction_validator( + config: &ValidatorNodeConfig, + template_manager: TemplateManager, +) -> ConsensusTransactionValidator { + let mut validator = WithContext::::new() + .map_context( + |_| (), + TransactionSignatureValidator.and_then(TemplateExistsValidator::new(template_manager)), + ) + .map_context( + |c| c.current_epoch, + EpochRangeValidator::new().and_then(ClaimFeeTransactionValidator::new()), + ) + .boxed(); + if !config.no_fees { + // A transaction without fee payment may have 0 inputs. + validator = WithContext::::new() + .map_context(|_| (), HasInputs::new()) + .and_then(validator) + .map_context(|_| (), FeeTransactionValidator) + .boxed(); + } + validator } diff --git a/applications/tari_validator_node/src/consensus/spec.rs b/applications/tari_validator_node/src/consensus/spec.rs index 86843e729..d3db0e612 100644 --- a/applications/tari_validator_node/src/consensus/spec.rs +++ b/applications/tari_validator_node/src/consensus/spec.rs @@ -20,6 +20,7 @@ use crate::{ consensus::{ leader_selection::RoundRobinLeaderStrategy, signature_service::TariSignatureService, + ConsensusTransactionValidator, TariDanBlockTransactionExecutor, }, p2p::services::messaging::{ConsensusInboundMessaging, ConsensusOutboundMessaging}, @@ -41,6 +42,8 @@ impl ConsensusSpec for TariConsensusSpec { type SignatureService = TariSignatureService; type StateStore = SqliteStateStore; type SyncManager = RpcStateSyncManager; - type TransactionExecutor = - TariDanBlockTransactionExecutor>>; + type TransactionExecutor = TariDanBlockTransactionExecutor< + TariDanTransactionProcessor>, + ConsensusTransactionValidator, + >; } diff --git a/applications/tari_validator_node/src/dry_run_transaction_processor.rs b/applications/tari_validator_node/src/dry_run_transaction_processor.rs index c4ff2cd10..589ed7089 100644 --- a/applications/tari_validator_node/src/dry_run_transaction_processor.rs +++ b/applications/tari_validator_node/src/dry_run_transaction_processor.rs @@ -27,10 +27,7 @@ use tari_dan_app_utilities::{ transaction_executor::{TariDanTransactionProcessor, TransactionExecutor, TransactionProcessorError}, }; use tari_dan_common_types::PeerAddress; -use tari_dan_engine::{ - bootstrap_state, - state_store::{memory::MemoryStateStore, AtomicDb, StateStoreError, StateWriter}, -}; +use tari_dan_engine::state_store::{new_memory_store, StateStoreError}; use tari_dan_storage::StorageError; use tari_engine_types::commit_result::ExecuteResult; use tari_epoch_manager::{base_layer::EpochManagerHandle, EpochManagerError, EpochManagerReader}; @@ -107,12 +104,7 @@ impl DryRunTransactionProcessor { transaction: Transaction, ) -> Result { // Resolve all local and foreign substates - let temp_state_store = MemoryStateStore::new(); - { - let mut tx = temp_state_store.write_access().map_err(StateStoreError::Custom)?; - bootstrap_state(&mut tx)?; - tx.commit()?; - } + let temp_state_store = new_memory_store(); let current_epoch = self.epoch_manager.current_epoch().await?; let virtual_substates = self diff --git a/applications/tari_validator_node/src/json_rpc/handlers.rs b/applications/tari_validator_node/src/json_rpc/handlers.rs index f567290f3..cdcd51fdb 100644 --- a/applications/tari_validator_node/src/json_rpc/handlers.rs +++ b/applications/tari_validator_node/src/json_rpc/handlers.rs @@ -480,7 +480,7 @@ impl JsonRpcHandlers { name: t.name, address: t.address, url: t.url, - binary_sha: t.binary_sha, + binary_sha: t.binary_sha.to_vec(), height: t.height, }) .collect(), @@ -508,7 +508,7 @@ impl JsonRpcHandlers { name: template.metadata.name, address: template.metadata.address, url: template.metadata.url, - binary_sha: template.metadata.binary_sha, + binary_sha: template.metadata.binary_sha.to_vec(), height: template.metadata.height, }, abi, diff --git a/applications/tari_validator_node/src/lib.rs b/applications/tari_validator_node/src/lib.rs index 0df7d88b5..7484d2644 100644 --- a/applications/tari_validator_node/src/lib.rs +++ b/applications/tari_validator_node/src/lib.rs @@ -35,7 +35,10 @@ mod p2p; mod substate_resolver; mod virtual_substate; +pub mod transaction_validators; +mod validator; mod validator_registration_file; + use std::{fs, io, process}; use log::*; diff --git a/applications/tari_validator_node/src/p2p/services/mempool/error.rs b/applications/tari_validator_node/src/p2p/services/mempool/error.rs index 85b7f63e9..4168c044a 100644 --- a/applications/tari_validator_node/src/p2p/services/mempool/error.rs +++ b/applications/tari_validator_node/src/p2p/services/mempool/error.rs @@ -1,26 +1,15 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use std::collections::HashSet; - -use indexmap::IndexMap; -use tari_dan_app_utilities::{ - template_manager::interface::TemplateManagerError, - transaction_executor::TransactionProcessorError, -}; -use tari_dan_common_types::Epoch; -use tari_dan_storage::{consensus_models::TransactionPoolError, StorageError}; -use tari_engine_types::substate::{Substate, SubstateId}; +use tari_dan_storage::StorageError; use tari_epoch_manager::EpochManagerError; use tari_networking::NetworkingError; -use tari_transaction::{SubstateRequirement, TransactionId}; use tokio::sync::{mpsc, oneshot}; use crate::{ dry_run_transaction_processor::DryRunTransactionProcessorError, p2p::services::mempool::MempoolRequest, - substate_resolver::SubstateResolverError, - virtual_substate::VirtualSubstateError, + transaction_validators::TransactionValidationError, }; #[derive(thiserror::Error, Debug)] @@ -31,50 +20,15 @@ pub enum MempoolError { EpochManagerError(#[from] EpochManagerError), #[error("Internal service request cancelled")] RequestCancelled, + #[error("Consensus channel closed")] + ConsensusChannelClosed, #[error("DryRunTransactionProcessor Error: {0}")] DryRunTransactionProcessorError(#[from] DryRunTransactionProcessorError), - #[error("Execution thread failure: {0}")] - ExecutionThreadPanicked(String), - #[error("Requires consensus for local substates: {local_substates:?}")] - MustDeferExecution { - local_substates: IndexMap, - foreign_substates: HashSet, - }, - #[error("SubstateResolver Error: {0}")] - SubstateResolverError(#[from] SubstateResolverError), - #[error("Transaction Execution Error: {0}")] - TransactionExecutionError(#[from] TransactionProcessorError), #[error("Storage Error: {0}")] StorageError(#[from] StorageError), - #[error("Virtual substate error: {0}")] - VirtualSubstateError(#[from] VirtualSubstateError), - #[error("Transaction pool error: {0}")] - TransactionPoolError(#[from] TransactionPoolError), + #[error("Transaction validation error: {0}")] + TransactionValidationError(#[from] TransactionValidationError), - // TODO: move these to MempoolValidationError type - #[error("Invalid template address: {0}")] - InvalidTemplateAddress(#[from] TemplateManagerError), - #[error("No fee instructions")] - NoFeeInstructions, - #[error("Output substate exists in transaction {transaction_id}")] - OutputSubstateExists { transaction_id: TransactionId }, - #[error("Validator fee claim instruction in transaction {transaction_id} contained invalid epoch {given_epoch}")] - ValidatorFeeClaimEpochInvalid { - transaction_id: TransactionId, - given_epoch: Epoch, - }, - #[error("Current epoch ({current_epoch}) is less than minimum epoch ({min_epoch}) required for transaction")] - CurrentEpochLessThanMinimum { current_epoch: Epoch, min_epoch: Epoch }, - #[error("Current epoch ({current_epoch}) is greater than maximum epoch ({max_epoch}) required for transaction")] - CurrentEpochGreaterThanMaximum { current_epoch: Epoch, max_epoch: Epoch }, - #[error("Transaction {transaction_id} does not have any inputs")] - NoInputs { transaction_id: TransactionId }, - #[error("Executed transaction {transaction_id} does not involved any shards")] - NoInvolvedShards { transaction_id: TransactionId }, - #[error("Invalid transaction signature")] - InvalidSignature, - #[error("Transaction {transaction_id} is not signed")] - TransactionNotSigned { transaction_id: TransactionId }, #[error("Network error: {0}")] NetworkingError(#[from] NetworkingError), } diff --git a/applications/tari_validator_node/src/p2p/services/mempool/executor.rs b/applications/tari_validator_node/src/p2p/services/mempool/executor.rs deleted file mode 100644 index 946fcfaf2..000000000 --- a/applications/tari_validator_node/src/p2p/services/mempool/executor.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2023 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use log::*; -use tari_dan_app_utilities::transaction_executor::{TransactionExecutor, TransactionProcessorError}; -use tari_dan_common_types::Epoch; -use tari_dan_engine::{ - bootstrap_state, - state_store::{memory::MemoryStateStore, AtomicDb, StateWriter}, -}; -use tari_dan_storage::consensus_models::{ExecutedTransaction, SubstateLockFlag, VersionedSubstateIdLockIntent}; -use tari_transaction::{Transaction, VersionedSubstateId}; -use tokio::task; - -use crate::{ - p2p::services::mempool::{MempoolError, ResolvedSubstates, SubstateResolver}, - substate_resolver::SubstateResolverError, -}; - -const LOG_TARGET: &str = "tari::dan::mempool::executor"; - -pub async fn execute_transaction( - transaction: Transaction, - substate_resolver: TSubstateResolver, - executor: TExecutor, - current_epoch: Epoch, -) -> Result, MempoolError> -where - TSubstateResolver: SubstateResolver, - TExecutor: TransactionExecutor + Send + Sync + 'static, -{ - let virtual_substates = match substate_resolver - .resolve_virtual_substates(&transaction, current_epoch) - .await - { - Ok(virtual_substates) => virtual_substates, - Err(err @ SubstateResolverError::UnauthorizedFeeClaim { .. }) => { - warn!(target: LOG_TARGET, "One or more invalid fee claims for transaction {}: {}", transaction.id(), err); - return Ok(Err(err.into())); - }, - Err(err) => return Err(err.into()), - }; - - info!(target: LOG_TARGET, "🎱 Transaction {} found virtual_substates = [{}]", transaction.id(), virtual_substates.keys().map(|addr| addr.to_string()).collect::>().join(", ")); - - let ResolvedSubstates { - local: local_substates, - unresolved_foreign: foreign, - } = match substate_resolver.try_resolve_local(&transaction) { - Ok(pair) => pair, - // Substates are downed/dont exist - Err(err @ SubstateResolverError::InputSubstateDowned { .. }) | - Err(err @ SubstateResolverError::InputSubstateDoesNotExist { .. }) => { - warn!(target: LOG_TARGET, "One or more invalid input shards for transaction {}: {}", transaction.id(), err); - // Ok(Err(_)) return that the transaction should be rejected, not an internal mempool execution failure - return Ok(Err(err.into())); - }, - // Some other issue - network, db, etc - Err(err) => return Err(err.into()), - }; - - if !foreign.is_empty() { - info!(target: LOG_TARGET, "Unable to execute transaction {} in the mempool because it has foreign inputs: {:?}", transaction.id(), foreign); - return Err(MempoolError::MustDeferExecution { - local_substates, - foreign_substates: foreign, - }); - } - - info!(target: LOG_TARGET, "🎱 Transaction {} resolved local inputs = [{}]", transaction.id(), local_substates.keys().map(|addr| addr.to_string()).collect::>().join(", ")); - - let res = task::spawn_blocking(move || { - let versioned_inputs = local_substates - .iter() - .map(|(id, substate)| VersionedSubstateId::new(id.clone(), substate.version())) - .collect::>(); - let state_db = new_state_db(); - state_db.set_many(local_substates).expect("memory db is infallible"); - - match executor.execute(transaction, state_db, virtual_substates) { - Ok(exec_output) => { - // Update the resolved inputs to set the specific version, as we know it after execution - let resolved_inputs = if let Some(diff) = exec_output.result.finalize.accept() { - versioned_inputs - .into_iter() - .map(|versioned_id| { - let lock_flag = if diff.down_iter().any(|(id, _)| *id == versioned_id.substate_id) { - // Update all inputs that were DOWNed to be write locked - SubstateLockFlag::Write - } else { - // Any input not downed, gets a read lock - SubstateLockFlag::Read - }; - VersionedSubstateIdLockIntent::new(versioned_id, lock_flag) - }) - .collect() - } else { - versioned_inputs - .into_iter() - .map(|versioned_id| { - // We cannot tell which inputs are written, however since this transaction is a - // reject it does not matter since it will not cause locks. - // We still set resolved inputs because this is used to determine which shards are - // involved. - VersionedSubstateIdLockIntent::new(versioned_id, SubstateLockFlag::Write) - }) - .collect() - }; - - Ok(ExecutedTransaction::new( - exec_output.transaction, - exec_output.result, - resolved_inputs, - exec_output.outputs, - exec_output.execution_time, - )) - }, - Err(err) => Err(err.into()), - } - }) - .await; - - // If this errors, the thread panicked due to a bug - res.map_err(|err| MempoolError::ExecutionThreadPanicked(err.to_string())) -} - -fn new_state_db() -> MemoryStateStore { - let state_db = MemoryStateStore::new(); - // unwrap: Memory state store is infallible - let mut tx = state_db.write_access().unwrap(); - // Add bootstrapped substates - bootstrap_state(&mut tx).unwrap(); - tx.commit().unwrap(); - state_db -} diff --git a/applications/tari_validator_node/src/p2p/services/mempool/initializer.rs b/applications/tari_validator_node/src/p2p/services/mempool/initializer.rs index a8c34600e..e07e70ecc 100644 --- a/applications/tari_validator_node/src/p2p/services/mempool/initializer.rs +++ b/applications/tari_validator_node/src/p2p/services/mempool/initializer.rs @@ -21,12 +21,10 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use log::*; -use tari_dan_app_utilities::transaction_executor::{TransactionExecutor, TransactionProcessorError}; use tari_dan_common_types::PeerAddress; -use tari_dan_storage::consensus_models::ExecutedTransaction; use tari_epoch_manager::base_layer::EpochManagerHandle; use tari_state_store_sqlite::SqliteStateStore; -use tari_transaction::{Transaction, TransactionId}; +use tari_transaction::Transaction; use tokio::{sync::mpsc, task, task::JoinHandle}; #[cfg(feature = "metrics")] @@ -34,32 +32,25 @@ use super::metrics::PrometheusMempoolMetrics; use crate::{ consensus::ConsensusHandle, p2p::services::{ - mempool::{handle::MempoolHandle, service::MempoolService, MempoolError, SubstateResolver, Validator}, + mempool::{handle::MempoolHandle, service::MempoolService}, messaging::Gossip, }, - substate_resolver::SubstateResolverError, + transaction_validators::TransactionValidationError, + validator::Validator, }; const LOG_TARGET: &str = "tari::dan::validator_node::mempool"; -pub fn spawn( +pub fn spawn( gossip: Gossip, - tx_executed_transactions: mpsc::Sender<(TransactionId, usize)>, epoch_manager: EpochManagerHandle, - transaction_executor: TExecutor, - substate_resolver: TSubstateResolver, - validator: TValidator, - after_executed_validator: TExecutedValidator, + transaction_validator: TValidator, state_store: SqliteStateStore, - rx_consensus_to_mempool: mpsc::UnboundedReceiver, consensus_handle: ConsensusHandle, #[cfg(feature = "metrics")] metrics_registry: &prometheus::Registry, ) -> (MempoolHandle, JoinHandle>) where - TValidator: Validator + Send + Sync + 'static, - TExecutedValidator: Validator + Send + Sync + 'static, - TExecutor: TransactionExecutor + Clone + Send + Sync + 'static, - TSubstateResolver: SubstateResolver + Clone + Send + Sync + 'static, + TValidator: Validator + Send + Sync + 'static, { // This channel only needs to be size 1, because each mempool request must wait for a reply and the mempool is // running on a single task and so there is no benefit to buffering multiple requests. @@ -70,14 +61,9 @@ where let mempool = MempoolService::new( rx_mempool_request, gossip, - tx_executed_transactions, epoch_manager, - transaction_executor, - substate_resolver, - validator, - after_executed_validator, + transaction_validator, state_store, - rx_consensus_to_mempool, consensus_handle, #[cfg(feature = "metrics")] metrics, diff --git a/applications/tari_validator_node/src/p2p/services/mempool/metrics.rs b/applications/tari_validator_node/src/p2p/services/mempool/metrics.rs index 49d9e520b..21f45db4c 100644 --- a/applications/tari_validator_node/src/p2p/services/mempool/metrics.rs +++ b/applications/tari_validator_node/src/p2p/services/mempool/metrics.rs @@ -1,21 +1,14 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -// Copyright 2024 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause -use prometheus::{Histogram, HistogramOpts, IntCounter, Registry}; -use tari_dan_storage::consensus_models::ExecutedTransaction; +use prometheus::{IntCounter, Registry}; use tari_transaction::{Transaction, TransactionId}; -use crate::{metrics::CollectorRegister, p2p::services::mempool::MempoolError}; +use crate::metrics::CollectorRegister; #[derive(Debug, Clone)] pub struct PrometheusMempoolMetrics { transactions_received: IntCounter, - transactions_executed: IntCounter, - transactions_execute_time: Histogram, - - transaction_execute_error: IntCounter, transaction_validation_error: IntCounter, } @@ -25,21 +18,6 @@ impl PrometheusMempoolMetrics { transactions_received: IntCounter::new("mempool_transactions_received", "Number of transactions received") .unwrap() .register_at(registry), - transactions_executed: IntCounter::new("mempool_transactions_executed", "Number of transactions executed") - .unwrap() - .register_at(registry), - transactions_execute_time: Histogram::with_opts(HistogramOpts::new( - "mempool_transactions_execute_time", - "Time to execute a transaction", - )) - .unwrap() - .register_at(registry), - transaction_execute_error: IntCounter::new( - "mempool_transaction_execute_error", - "Number of transaction execution errors", - ) - .unwrap() - .register_at(registry), transaction_validation_error: IntCounter::new( "mempool_transaction_validation_error", "Number of transaction validation errors", @@ -53,23 +31,6 @@ impl PrometheusMempoolMetrics { self.transactions_received.inc(); } - pub fn on_transaction_executed( - &mut self, - _transaction_id: &TransactionId, - execution_result: &Result, - ) { - self.transactions_executed.inc(); - match execution_result { - Ok(transaction) => { - self.transactions_execute_time - .observe(transaction.execution_time().as_millis() as f64); - }, - Err(_) => { - self.transaction_execute_error.inc(); - }, - } - } - pub fn on_transaction_validation_error(&mut self, _transaction: &TransactionId, _err: &E) { self.transaction_validation_error.inc(); } diff --git a/applications/tari_validator_node/src/p2p/services/mempool/mod.rs b/applications/tari_validator_node/src/p2p/services/mempool/mod.rs index 049b06888..799ff4f8f 100644 --- a/applications/tari_validator_node/src/p2p/services/mempool/mod.rs +++ b/applications/tari_validator_node/src/p2p/services/mempool/mod.rs @@ -27,14 +27,11 @@ mod initializer; pub use initializer::spawn; mod error; -mod executor; mod gossip; #[cfg(feature = "metrics")] mod metrics; mod service; mod traits; -mod validators; pub use error::*; pub use traits::*; -pub use validators::*; diff --git a/applications/tari_validator_node/src/p2p/services/mempool/service.rs b/applications/tari_validator_node/src/p2p/services/mempool/service.rs index 1a583d8c9..b7b8f1594 100644 --- a/applications/tari_validator_node/src/p2p/services/mempool/service.rs +++ b/applications/tari_validator_node/src/p2p/services/mempool/service.rs @@ -20,17 +20,12 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{collections::HashSet, fmt, fmt::Display, iter}; +use std::{collections::HashSet, fmt::Display, iter}; -use futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, StreamExt}; use log::*; -use tari_dan_app_utilities::transaction_executor::{TransactionExecutor, TransactionProcessorError}; -use tari_dan_common_types::{optional::Optional, shard::Shard, Epoch, PeerAddress, SubstateAddress}; +use tari_dan_common_types::{optional::Optional, shard::Shard, PeerAddress, SubstateAddress}; use tari_dan_p2p::{DanMessage, NewTransactionMessage}; -use tari_dan_storage::{ - consensus_models::{ExecutedTransaction, SubstateRecord, TransactionRecord}, - StateStore, -}; +use tari_dan_storage::{consensus_models::TransactionRecord, StateStore}; use tari_epoch_manager::{base_layer::EpochManagerHandle, EpochManagerEvent, EpochManagerReader}; use tari_state_store_sqlite::SqliteStateStore; use tari_transaction::{Transaction, TransactionId}; @@ -42,111 +37,47 @@ use super::MempoolError; use crate::{ consensus::ConsensusHandle, p2p::services::{ - mempool::{ - executor::execute_transaction, - gossip::MempoolGossip, - handle::MempoolRequest, - traits::SubstateResolver, - Validator, - }, + mempool::{gossip::MempoolGossip, handle::MempoolRequest}, messaging::Gossip, }, - substate_resolver::SubstateResolverError, + transaction_validators::TransactionValidationError, + validator::Validator, }; const LOG_TARGET: &str = "tari::validator_node::mempool::service"; -/// Data returned from a pending execution. -struct MempoolTransactionExecution { - transaction_id: TransactionId, - execution: TransactionExecution, - should_propagate: bool, - sender_shard: Option, -} - -pub enum TransactionExecution { - /// The transaction was executed in the mempool - Executed { - result: Result, - }, - /// Mempool execution failed due to an error that is unrelated to the transaction. IO, resources, database etc - ExecutionFailure { - error: MempoolError, - transaction: Transaction, - }, - /// Execution cannot occur in the mempool and is deferred to consensus - Deferred { transaction: Transaction }, -} - -impl Display for TransactionExecution { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TransactionExecution::Executed { result } => match result { - Ok(executed) => write!(f, "Executed {}: {}", executed.id(), executed.result().finalize.result), - Err(e) => write!(f, "Execution failed: {}", e), - }, - TransactionExecution::ExecutionFailure { error, .. } => { - write!(f, "Unexpected Execution failure: {}", error) - }, - TransactionExecution::Deferred { transaction } => write!(f, "Deferred: {}", transaction.id()), - } - } -} - #[derive(Debug)] -pub struct MempoolService { +pub struct MempoolService { transactions: HashSet, - pending_executions: FuturesUnordered>, mempool_requests: mpsc::Receiver, - tx_executed_transactions: mpsc::Sender<(TransactionId, usize)>, epoch_manager: EpochManagerHandle, before_execute_validator: TValidator, - after_execute_validator: TExecutedValidator, - transaction_executor: TExecutor, - substate_resolver: TSubstateResolver, state_store: SqliteStateStore, gossip: MempoolGossip, - rx_consensus_to_mempool: mpsc::UnboundedReceiver, consensus_handle: ConsensusHandle, #[cfg(feature = "metrics")] metrics: PrometheusMempoolMetrics, } -impl - MempoolService -where - TValidator: Validator, - TExecutedValidator: Validator, - TExecutor: TransactionExecutor + Clone + Send + Sync + 'static, - TSubstateResolver: SubstateResolver + Clone + Send + Sync + 'static, +impl MempoolService +where TValidator: Validator { pub(super) fn new( mempool_requests: mpsc::Receiver, gossip: Gossip, - tx_executed_transactions: mpsc::Sender<(TransactionId, usize)>, epoch_manager: EpochManagerHandle, - transaction_executor: TExecutor, - substate_resolver: TSubstateResolver, before_execute_validator: TValidator, - after_execute_validator: TExecutedValidator, state_store: SqliteStateStore, - rx_consensus_to_mempool: mpsc::UnboundedReceiver, consensus_handle: ConsensusHandle, #[cfg(feature = "metrics")] metrics: PrometheusMempoolMetrics, ) -> Self { Self { gossip: MempoolGossip::new(epoch_manager.clone(), gossip), transactions: Default::default(), - pending_executions: FuturesUnordered::new(), mempool_requests, - tx_executed_transactions, epoch_manager, - transaction_executor, - substate_resolver, before_execute_validator, - after_execute_validator, state_store, - rx_consensus_to_mempool, consensus_handle, #[cfg(feature = "metrics")] metrics, @@ -159,21 +90,11 @@ where loop { tokio::select! { Some(req) = self.mempool_requests.recv() => self.handle_request(req).await, - Some(result) = self.pending_executions.next() => { - if let Err(e) = self.handle_execution_task_complete(result).await { - error!(target: LOG_TARGET, "Possible bug: handle_execution_complete failed: {}", e); - } - }, Some(result) = self.gossip.next_message() => { if let Err(e) = self.handle_new_transaction_from_remote(result).await { warn!(target: LOG_TARGET, "Mempool rejected transaction: {}", e); } } - Some(msg) = self.rx_consensus_to_mempool.recv() => { - if let Err(e) = self.handle_new_transaction_from_local(msg, false).await { - warn!(target: LOG_TARGET, "Mempool rejected transaction: {}", e); - } - } Ok(event) = events.recv() => { if let EpochManagerEvent::EpochChanged(epoch) = event { if self.epoch_manager.is_this_validator_registered_for_epoch(epoch).await?{ @@ -328,7 +249,7 @@ where #[cfg(feature = "metrics")] self.metrics.on_transaction_received(&transaction); - if let Err(e) = self.before_execute_validator.validate(&transaction).await { + if let Err(e) = self.before_execute_validator.validate(&(), &transaction) { let transaction_id = *transaction.id(); self.state_store.with_write_tx(|tx| { TransactionRecord::new(transaction) @@ -338,7 +259,7 @@ where #[cfg(feature = "metrics")] self.metrics.on_transaction_validation_error(&transaction_id, &e); - return Err(e); + return Err(e.into()); } // Get the shards involved in claim fees. @@ -375,12 +296,16 @@ where if is_input_shard || is_output_shard { debug!(target: LOG_TARGET, "🎱 New transaction {} in mempool", transaction.id()); - let transaction = TransactionRecord::new(transaction); - self.state_store.with_write_tx(|tx| transaction.insert(tx))?; - let transaction = transaction.into_transaction(); + // let transaction = TransactionRecord::new(transaction); + // self.state_store.with_write_tx(|tx| transaction.insert(tx))?; + // let transaction = transaction.into_transaction(); self.transactions.insert(*transaction.id()); + self.consensus_handle + .notify_new_transaction(transaction.clone(), 0) + .await + .map_err(|_| MempoolError::ConsensusChannelClosed)?; - self.queue_transaction_for_execution(transaction.clone(), current_epoch, should_propagate, sender_shard); + // self.queue_transaction_for_execution(transaction.clone(), current_epoch, should_propagate, sender_shard); if should_propagate { // This validator is involved, we to send the transaction to local replicas @@ -465,299 +390,6 @@ where Ok(()) } - fn queue_transaction_for_execution( - &mut self, - transaction: Transaction, - current_epoch: Epoch, - should_propagate: bool, - sender_shard: Option, - ) { - let substate_resolver = self.substate_resolver.clone(); - let executor = self.transaction_executor.clone(); - let transaction_id = *transaction.id(); - - self.pending_executions.push(Box::pin( - execute_transaction(transaction.clone(), substate_resolver, executor, current_epoch).map(move |result| { - match result { - Ok(execution_result) => MempoolTransactionExecution { - transaction_id, - execution: TransactionExecution::Executed { - result: execution_result, - }, - should_propagate, - sender_shard, - }, - Err(MempoolError::MustDeferExecution { .. }) => MempoolTransactionExecution { - transaction_id, - execution: TransactionExecution::Deferred { transaction }, - should_propagate, - sender_shard, - }, - Err(error) => MempoolTransactionExecution { - transaction_id, - // IO, Database, etc errors - execution: TransactionExecution::ExecutionFailure { error, transaction }, - should_propagate, - sender_shard, - }, - } - }), - )); - } - - async fn handle_execution_task_complete( - &mut self, - result: MempoolTransactionExecution, - ) -> Result<(), MempoolError> { - let MempoolTransactionExecution { - transaction_id, - execution, - should_propagate, - sender_shard, - } = result; - - info!(target: LOG_TARGET, "🎱 Transaction {transaction_id} execution: {execution}"); - match execution { - TransactionExecution::Executed { result } => { - self.handle_execution_complete(transaction_id, result, should_propagate, sender_shard) - .await - }, - // Bubble the error up - TransactionExecution::ExecutionFailure { error, .. } => { - // TODO: should we retry this transaction at some point? - self.transactions.remove(&transaction_id); - Err(error) - }, - TransactionExecution::Deferred { transaction } => self.handle_deferred_execution(transaction).await, - } - } - - async fn handle_deferred_execution(&mut self, transaction: Transaction) -> Result<(), MempoolError> { - let transaction_id = *transaction.id(); - - let pending_exec_size = self.pending_executions.len(); - // Notify consensus about the transaction - if self - .tx_executed_transactions - .send((transaction_id, pending_exec_size)) - .await - .is_err() - { - debug!( - target: LOG_TARGET, - "Executed transaction channel closed before executed transaction could be sent" - ); - } - - self.transactions.remove(&transaction_id); - Ok(()) - } - - #[allow(clippy::too_many_lines)] - async fn handle_execution_complete( - &mut self, - transaction_id: TransactionId, - exec_result: Result, - should_propagate: bool, - sender_shard: Option, - ) -> Result<(), MempoolError> { - #[cfg(feature = "metrics")] - self.metrics.on_transaction_executed(&transaction_id, &exec_result); - - // The avoids the case where: - // 1. A transaction is received and start executing - // 2. The node switches to sync mode - // 3. Sync completes (some transactions that were finalized in sync may have been busy executing) - // 4. Execution completes and the transaction is added to the pool even though it is finalized via sync - // TODO: This is not guaranteed to work and is subject to races. The mempool should pause processing executed - // transactions until consensus is in sync. - if self - .state_store - .with_read_tx(|tx| SubstateRecord::exists_for_transaction(tx, &transaction_id))? - { - debug!( - target: LOG_TARGET, - "🎱 Transaction {} already processed. Ignoring", - transaction_id - ); - return Ok(()); - } - - let executed = match exec_result { - Ok(mut executed) => { - info!( - target: LOG_TARGET, - "✅ Transaction {} executed ({}) in {:?}", - executed.id(), - executed.result().finalize.result, - executed.execution_time() - ); - let has_involved_shards = executed.num_inputs_and_outputs() > 0; - - match self.after_execute_validator.validate(&executed).await { - Ok(_) => { - info!( - target: LOG_TARGET, - "✅ Transaction {} passed validation", - executed.id(), - ); - // Add the transaction result and push it into the pool for consensus. This is done in a single - // transaction so that if we receive a proposal for this transaction, we - // either are awaiting execution OR execution is complete and it's in the pool. - self.state_store.with_write_tx(|tx| { - if !has_involved_shards { - match executed.result().finalize.result.full_reject() { - Some(reason) => { - executed - .set_abort(format!("Transaction failed: {}", reason)) - .update(tx)?; - }, - None => { - executed - .set_abort("Mempool after execution validation failed: No involved shards") - .update(tx)?; - }, - } - - return Ok::<_, MempoolError>(()); - } - - executed.update(tx)?; - Ok::<_, MempoolError>(()) - })?; - }, - Err(e) => { - info!( - target: LOG_TARGET, - "❌ Executed transaction {} failed validation: {}", - executed.id(), - e, - ); - #[cfg(feature = "metrics")] - self.metrics.on_transaction_validation_error(&transaction_id, &e); - self.state_store.with_write_tx(|tx| { - match executed.result().finalize.result.full_reject() { - Some(reason) => { - executed - .set_abort(format!("Transaction failed: {}", reason)) - .update(tx)?; - }, - None => { - executed - .set_abort(format!("Mempool after execution validation failed: {}", e)) - .update(tx)?; - }, - } - - Ok::<_, MempoolError>(()) - })?; - // We want this to go though to consensus, because validation may only fail in this shard (e.g - // outputs already exist) so we need to send LocalPrepared(ABORT) to - // other shards. - }, - } - - // TODO: This transaction executed but no shard is involved even after execution - // (happens for CreateFreeTestCoin only) so we just ignore it. - if !has_involved_shards { - warn!( - target: LOG_TARGET, - "Transaction {} has no involved shards after executing. Ignoring", - transaction_id - ); - self.transactions.remove(&transaction_id); - return Ok(()); - } - - executed - }, - Err(e) => { - error!( - target: LOG_TARGET, - "❌ Transaction {} failed: {}", - transaction_id, - e - ); - self.state_store.with_write_tx(|tx| { - TransactionRecord::get(&**tx, &transaction_id)? - .set_abort(format!("Mempool failed to execute: {}", e)) - .update(tx) - })?; - - self.transactions.remove(&transaction_id); - - return Ok(()); - }, - }; - - let current_epoch = self.consensus_handle.current_view().get_epoch(); - - let local_committee_shard = self.epoch_manager.get_local_committee_info(current_epoch).await?; - let all_inputs_iter = executed.all_inputs_iter().map(|i| i.to_substate_address()); - let is_input_shard = local_committee_shard.includes_any_shard(all_inputs_iter) || - (executed.transaction().inputs().is_empty() && executed.transaction().filled_inputs().is_empty()); - - if should_propagate && is_input_shard { - // Forward the transaction to any output shards that are not part of the input shard set as these have - // already been forwarded - let num_committees = self.epoch_manager.get_num_committees(current_epoch).await?; - let input_shards = executed - .resolved_inputs() - .iter() - .map(|s| s.versioned_substate_id().to_committee_shard(num_committees)) - .collect::>(); - let tx_substate_address = SubstateAddress::for_transaction_receipt(executed.id().into_receipt_address()); - let output_shards = executed - .resulting_outputs() - .iter() - // All involved shards commit the transaction receipt, so we exclude the shard @ tx_substate_address from propagation and consensus. - .map(|s| s.to_substate_address()) - .filter(|s| *s != tx_substate_address) - .filter(|s| !input_shards.contains(&s.to_shard(num_committees))) - .collect(); - - if let Err(err) = self - .gossip - .forward_to_foreign_replicas( - current_epoch, - output_shards, - NewTransactionMessage { - transaction: executed.transaction().clone(), - output_shards: executed - .resulting_outputs() - .iter() - .map(|s| s.to_substate_address()) - .collect::>(), - }, - sender_shard, - ) - .await - { - error!( - target: LOG_TARGET, - "Unable to propagate transaction among peers: {}", err - ); - } - } - - // Notify consensus that a transaction is ready to go! - let pending_exec_size = self.pending_executions.len(); - if self - .tx_executed_transactions - .send((*executed.id(), pending_exec_size)) - .await - .is_err() - { - debug!( - target: LOG_TARGET, - "Executed transaction channel closed before executed transaction could be sent" - ); - } - - self.transactions.remove(&transaction_id); - Ok(()) - } - fn transaction_exists(&self, id: &TransactionId) -> Result { if self.transactions.contains(id) { debug!( diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/after/has_involved_shards.rs b/applications/tari_validator_node/src/p2p/services/mempool/validators/after/has_involved_shards.rs deleted file mode 100644 index ec3b2a313..000000000 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/after/has_involved_shards.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2023 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use async_trait::async_trait; -use log::*; -use tari_dan_storage::consensus_models::ExecutedTransaction; - -use crate::p2p::services::mempool::{MempoolError, Validator}; - -const LOG_TARGET: &str = "tari::dan::mempool::validators::has_involved_shards"; - -/// Refuse to process the transaction if it does not have any involved shards. -/// This may be removed in future in favour of a stricter rule that requires all transactions to have at least one -/// input/input_ref before execution. Currently, we need to allow zero inputs because of CreateFreeTestCoins. -pub struct HasInvolvedShards; - -impl HasInvolvedShards { - pub fn new() -> Self { - Self - } -} - -#[async_trait] -impl Validator for HasInvolvedShards { - type Error = MempoolError; - - async fn validate(&self, executed: &ExecutedTransaction) -> Result<(), Self::Error> { - if executed.num_inputs_and_outputs() == 0 { - warn!(target: LOG_TARGET, "HasInvolvedShards - FAIL: No input or output shards"); - return Err(MempoolError::NoInvolvedShards { - transaction_id: *executed.id(), - }); - } - - Ok(()) - } -} diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/after/mod.rs b/applications/tari_validator_node/src/p2p/services/mempool/validators/after/mod.rs deleted file mode 100644 index f1c3cba3d..000000000 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/after/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2023 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause -mod has_involved_shards; -mod outputs_dont_exist_locally; - -pub use has_involved_shards::*; -pub use outputs_dont_exist_locally::*; diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/after/outputs_dont_exist_locally.rs b/applications/tari_validator_node/src/p2p/services/mempool/validators/after/outputs_dont_exist_locally.rs deleted file mode 100644 index 60b1f5407..000000000 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/after/outputs_dont_exist_locally.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2023 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use async_trait::async_trait; -use log::*; -use tari_dan_storage::{ - consensus_models::{ExecutedTransaction, SubstateRecord}, - StateStore, -}; - -use crate::p2p::services::mempool::{MempoolError, Validator}; - -const LOG_TARGET: &str = "tari::dan::mempool::validators::outputs_dont_exist"; - -/// Refuse to process the transaction if any input_refs are downed -pub struct OutputsDontExistLocally { - store: TStateStore, -} - -impl OutputsDontExistLocally { - pub fn new(store: TStateStore) -> Self { - Self { store } - } -} - -#[async_trait] -impl Validator for OutputsDontExistLocally -where TStateStore: StateStore + Send + Sync -{ - type Error = MempoolError; - - async fn validate(&self, executed: &ExecutedTransaction) -> Result<(), Self::Error> { - if executed.resulting_outputs().is_empty() { - debug!(target: LOG_TARGET, "OutputsDontExistLocally - OK"); - return Ok(()); - } - - if self - .store - .with_read_tx(|tx| SubstateRecord::any_exist(tx, executed.resulting_outputs()))? - { - warn!(target: LOG_TARGET, "OutputsDontExistLocally - FAIL"); - return Err(MempoolError::OutputSubstateExists { - transaction_id: *executed.id(), - }); - } - - debug!(target: LOG_TARGET, "OutputsDontExistLocally - OK"); - Ok(()) - } -} diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/and_then.rs b/applications/tari_validator_node/src/p2p/services/mempool/validators/and_then.rs deleted file mode 100644 index 68fced0e8..000000000 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/and_then.rs +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2023 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use async_trait::async_trait; - -use super::Validator; - -pub struct AndThen { - first: A, - second: U, -} - -impl AndThen { - pub fn new(first: A, second: U) -> Self { - Self { first, second } - } -} - -#[async_trait] -impl Validator for AndThen -where - A: Validator + Send + Sync, - B: Validator + Send + Sync, - T: Sync, -{ - type Error = A::Error; - - async fn validate(&self, input: &T) -> Result<(), Self::Error> { - self.first.validate(input).await?; - self.second.validate(input).await?; - Ok(()) - } -} diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/claim_fee_instructions.rs b/applications/tari_validator_node/src/p2p/services/mempool/validators/before/claim_fee_instructions.rs deleted file mode 100644 index b8a8174dd..000000000 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/claim_fee_instructions.rs +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2023 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use async_trait::async_trait; -use log::*; -use tari_dan_common_types::Epoch; -use tari_engine_types::instruction::Instruction; -use tari_epoch_manager::EpochManagerReader; -use tari_transaction::Transaction; - -use crate::p2p::services::mempool::{MempoolError, Validator}; - -const LOG_TARGET: &str = "tari::dan::mempool::validators::claim_fee_instructions"; - -#[derive(Debug)] -pub struct ClaimFeeTransactionValidator { - epoch_manager: TEpochManager, -} - -impl ClaimFeeTransactionValidator { - pub fn new(epoch_manager: TEpochManager) -> Self { - Self { epoch_manager } - } -} - -#[async_trait] -impl Validator for ClaimFeeTransactionValidator { - type Error = MempoolError; - - async fn validate(&self, transaction: &Transaction) -> Result<(), MempoolError> { - let current_epoch = self.epoch_manager.current_epoch().await?; - - let mut claim_fees = transaction - .fee_instructions() - .iter() - .chain(transaction.instructions()) - .filter_map(|i| { - if let Instruction::ClaimValidatorFees { epoch, .. } = i { - Some(epoch) - } else { - None - } - }); - - if let Some(epoch) = claim_fees.find(|e| **e >= current_epoch.as_u64()) { - warn!( - target: LOG_TARGET, - "ClaimFeeTransactionValidator - FAIL: Rejecting fee claim for epoch {} because it is equal or greater than the current epoch {}", - epoch, - current_epoch - ); - return Err(MempoolError::ValidatorFeeClaimEpochInvalid { - transaction_id: *transaction.id(), - given_epoch: Epoch(*epoch), - }); - } - - Ok(()) - } -} diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/epoch_range.rs b/applications/tari_validator_node/src/p2p/services/mempool/validators/before/epoch_range.rs deleted file mode 100644 index 3327cc86b..000000000 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/epoch_range.rs +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2023 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use async_trait::async_trait; -use log::warn; -use tari_dan_common_types::NodeAddressable; -use tari_epoch_manager::{base_layer::EpochManagerHandle, EpochManagerReader}; -use tari_transaction::Transaction; - -use crate::p2p::services::mempool::{MempoolError, Validator}; - -const LOG_TARGET: &str = "tari::dan::mempool::validators::epoch_range"; - -#[derive(Debug)] -pub struct EpochRangeValidator { - epoch_manager: EpochManagerHandle, -} - -impl EpochRangeValidator { - pub fn new(epoch_manager: EpochManagerHandle) -> Self { - Self { epoch_manager } - } -} - -#[async_trait] -impl Validator for EpochRangeValidator { - type Error = MempoolError; - - async fn validate(&self, transaction: &Transaction) -> Result<(), MempoolError> { - let current_epoch = self.epoch_manager.current_epoch().await?; - if let Some(min_epoch) = transaction.min_epoch() { - if current_epoch < min_epoch { - warn!(target: LOG_TARGET, "EpochRangeValidator - FAIL: Current epoch {current_epoch} less than minimum epoch {min_epoch}."); - return Err(MempoolError::CurrentEpochLessThanMinimum { - current_epoch, - min_epoch, - }); - } - } - - if let Some(max_epoch) = transaction.max_epoch() { - if current_epoch > max_epoch { - warn!(target: LOG_TARGET, "EpochRangeValidator - FAIL: Current epoch {current_epoch} greater than maximum epoch {max_epoch}."); - return Err(MempoolError::CurrentEpochGreaterThanMaximum { - current_epoch, - max_epoch, - }); - } - } - - Ok(()) - } -} diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/has_inputs.rs b/applications/tari_validator_node/src/p2p/services/mempool/validators/before/has_inputs.rs deleted file mode 100644 index 1cec19e96..000000000 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/has_inputs.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2023 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use async_trait::async_trait; -use log::*; -use tari_engine_types::instruction::Instruction; -use tari_transaction::Transaction; - -use crate::p2p::services::mempool::{MempoolError, Validator}; - -const LOG_TARGET: &str = "tari::dan::mempool::validators::has_involved_shards"; - -/// Refuse to process the transaction if it does not have any inputs. -/// We make an exception (for now) for CreateFreeTestCoins transactions, which have no inputs. -pub struct HasInputs; - -impl HasInputs { - pub fn new() -> Self { - Self - } -} - -#[async_trait] -impl Validator for HasInputs { - type Error = MempoolError; - - async fn validate(&self, transaction: &Transaction) -> Result<(), Self::Error> { - if transaction.all_inputs_iter().next().is_none() { - // TODO: remove this conditional when we remove CreateFreeTestCoins - if transaction - .fee_instructions() - .iter() - .any(|i| matches!(i, Instruction::CreateFreeTestCoins { .. })) - { - debug!(target: LOG_TARGET, "HasInputs - OK: CreateFreeTestCoins"); - return Ok(()); - } - - warn!(target: LOG_TARGET, "HasInputs - FAIL: No input shards"); - return Err(MempoolError::NoInputs { - transaction_id: *transaction.id(), - }); - } - - debug!(target: LOG_TARGET, "HasInputs - OK"); - Ok(()) - } -} diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/mod.rs b/applications/tari_validator_node/src/p2p/services/mempool/validators/mod.rs deleted file mode 100644 index 54e47faf4..000000000 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/mod.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2022 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -pub use after::*; -pub use and_then::*; -pub use before::*; - -mod after; -mod before; - -mod and_then; -use async_trait::async_trait; - -#[async_trait] -pub trait Validator { - type Error; - - async fn validate(&self, input: &T) -> Result<(), Self::Error>; - - fn boxed(self) -> BoxedValidator - where Self: Sized + Send + Sync + 'static { - BoxedValidator { inner: Box::new(self) } - } - - fn and_then(self, other: V) -> AndThen - where - V: Validator, - Self: Sized, - { - AndThen::new(self, other) - } -} - -pub struct BoxedValidator { - inner: Box + Send + Sync + 'static>, -} - -#[async_trait] -impl Validator for BoxedValidator { - type Error = E; - - async fn validate(&self, input: &T) -> Result<(), Self::Error> { - self.inner.validate(input).await - } -} diff --git a/applications/tari_validator_node/src/transaction_validators/claim_fee_instructions.rs b/applications/tari_validator_node/src/transaction_validators/claim_fee_instructions.rs new file mode 100644 index 000000000..2434911fd --- /dev/null +++ b/applications/tari_validator_node/src/transaction_validators/claim_fee_instructions.rs @@ -0,0 +1,54 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use log::*; +use tari_dan_common_types::Epoch; +use tari_engine_types::instruction::Instruction; +use tari_transaction::Transaction; + +use crate::{transaction_validators::error::TransactionValidationError, validator::Validator}; + +const LOG_TARGET: &str = "tari::dan::validators::claim_fee_instructions"; + +#[derive(Debug, Default)] +pub struct ClaimFeeTransactionValidator; + +impl ClaimFeeTransactionValidator { + pub fn new() -> Self { + Self + } +} + +impl Validator for ClaimFeeTransactionValidator { + type Context = Epoch; + type Error = TransactionValidationError; + + fn validate(&self, ¤t_epoch: &Epoch, transaction: &Transaction) -> Result<(), Self::Error> { + let mut claim_fees = transaction + .fee_instructions() + .iter() + .chain(transaction.instructions()) + .filter_map(|i| { + if let Instruction::ClaimValidatorFees { epoch, .. } = i { + Some(epoch) + } else { + None + } + }); + + if let Some(&epoch) = claim_fees.find(|e| **e >= current_epoch.as_u64()) { + warn!( + target: LOG_TARGET, + "ClaimFeeTransactionValidator - FAIL: Rejecting fee claim for epoch {} because it is equal or greater than the current epoch {}", + epoch, + current_epoch + ); + return Err(TransactionValidationError::ValidatorFeeClaimEpochInvalid { + transaction_id: *transaction.id(), + given_epoch: Epoch(epoch), + }); + } + + Ok(()) + } +} diff --git a/applications/tari_validator_node/src/transaction_validators/epoch_range.rs b/applications/tari_validator_node/src/transaction_validators/epoch_range.rs new file mode 100644 index 000000000..6449e3b8f --- /dev/null +++ b/applications/tari_validator_node/src/transaction_validators/epoch_range.rs @@ -0,0 +1,48 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use log::warn; +use tari_dan_common_types::Epoch; +use tari_transaction::Transaction; + +use crate::{transaction_validators::TransactionValidationError, validator::Validator}; + +const LOG_TARGET: &str = "tari::dan::mempool::validators::epoch_range"; + +#[derive(Debug, Default)] +pub struct EpochRangeValidator; + +impl EpochRangeValidator { + pub fn new() -> Self { + Self + } +} + +impl Validator for EpochRangeValidator { + type Context = Epoch; + type Error = TransactionValidationError; + + fn validate(&self, ¤t_epoch: &Epoch, transaction: &Transaction) -> Result<(), TransactionValidationError> { + if let Some(min_epoch) = transaction.min_epoch() { + if current_epoch < min_epoch { + warn!(target: LOG_TARGET, "EpochRangeValidator - FAIL: Current epoch {current_epoch} less than minimum epoch {min_epoch}."); + return Err(TransactionValidationError::CurrentEpochLessThanMinimum { + current_epoch, + min_epoch, + }); + } + } + + if let Some(max_epoch) = transaction.max_epoch() { + if current_epoch > max_epoch { + warn!(target: LOG_TARGET, "EpochRangeValidator - FAIL: Current epoch {current_epoch} greater than maximum epoch {max_epoch}."); + return Err(TransactionValidationError::CurrentEpochGreaterThanMaximum { + current_epoch, + max_epoch, + }); + } + } + + Ok(()) + } +} diff --git a/applications/tari_validator_node/src/transaction_validators/error.rs b/applications/tari_validator_node/src/transaction_validators/error.rs new file mode 100644 index 000000000..067d9eb66 --- /dev/null +++ b/applications/tari_validator_node/src/transaction_validators/error.rs @@ -0,0 +1,47 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use tari_dan_app_utilities::template_manager::interface::TemplateManagerError; +use tari_dan_common_types::Epoch; +use tari_dan_storage::{consensus_models::TransactionPoolError, StorageError}; +use tari_networking::NetworkingError; +use tari_transaction::TransactionId; + +use crate::virtual_substate::VirtualSubstateError; + +#[derive(thiserror::Error, Debug)] +pub enum TransactionValidationError { + #[error("Storage Error: {0}")] + StorageError(#[from] StorageError), + #[error("Virtual substate error: {0}")] + VirtualSubstateError(#[from] VirtualSubstateError), + #[error("Transaction pool error: {0}")] + TransactionPoolError(#[from] TransactionPoolError), + + // TODO: move these to MempoolValidationError type + #[error("Invalid template address: {0}")] + InvalidTemplateAddress(#[from] TemplateManagerError), + #[error("No fee instructions")] + NoFeeInstructions, + #[error("Output substate exists in transaction {transaction_id}")] + OutputSubstateExists { transaction_id: TransactionId }, + #[error("Validator fee claim instruction in transaction {transaction_id} contained invalid epoch {given_epoch}")] + ValidatorFeeClaimEpochInvalid { + transaction_id: TransactionId, + given_epoch: Epoch, + }, + #[error("Current epoch ({current_epoch}) is less than minimum epoch ({min_epoch}) required for transaction")] + CurrentEpochLessThanMinimum { current_epoch: Epoch, min_epoch: Epoch }, + #[error("Current epoch ({current_epoch}) is greater than maximum epoch ({max_epoch}) required for transaction")] + CurrentEpochGreaterThanMaximum { current_epoch: Epoch, max_epoch: Epoch }, + #[error("Transaction {transaction_id} does not have any inputs")] + NoInputs { transaction_id: TransactionId }, + #[error("Executed transaction {transaction_id} does not involved any shards")] + NoInvolvedShards { transaction_id: TransactionId }, + #[error("Invalid transaction signature")] + InvalidSignature, + #[error("Transaction {transaction_id} is not signed")] + TransactionNotSigned { transaction_id: TransactionId }, + #[error("Network error: {0}")] + NetworkingError(#[from] NetworkingError), +} diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/fee.rs b/applications/tari_validator_node/src/transaction_validators/fee.rs similarity index 55% rename from applications/tari_validator_node/src/p2p/services/mempool/validators/before/fee.rs rename to applications/tari_validator_node/src/transaction_validators/fee.rs index 0bff2bbe3..f12601996 100644 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/fee.rs +++ b/applications/tari_validator_node/src/transaction_validators/fee.rs @@ -1,25 +1,24 @@ -// Copyright 2023 The Tari Project +// Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use async_trait::async_trait; use log::warn; use tari_transaction::Transaction; -use crate::p2p::services::mempool::{MempoolError, Validator}; +use crate::{transaction_validators::TransactionValidationError, validator::Validator}; const LOG_TARGET: &str = "tari::dan::mempool::validators::fee"; #[derive(Debug)] pub struct FeeTransactionValidator; -#[async_trait] impl Validator for FeeTransactionValidator { - type Error = MempoolError; + type Context = (); + type Error = TransactionValidationError; - async fn validate(&self, transaction: &Transaction) -> Result<(), MempoolError> { + fn validate(&self, _context: &(), transaction: &Transaction) -> Result<(), TransactionValidationError> { if transaction.fee_instructions().is_empty() { warn!(target: LOG_TARGET, "FeeTransactionValidator - FAIL: No fee instructions"); - return Err(MempoolError::NoFeeInstructions); + return Err(TransactionValidationError::NoFeeInstructions); } Ok(()) } diff --git a/applications/tari_validator_node/src/transaction_validators/has_inputs.rs b/applications/tari_validator_node/src/transaction_validators/has_inputs.rs new file mode 100644 index 000000000..05b5ebba7 --- /dev/null +++ b/applications/tari_validator_node/src/transaction_validators/has_inputs.rs @@ -0,0 +1,37 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use log::*; +use tari_transaction::Transaction; + +use crate::{transaction_validators::TransactionValidationError, validator::Validator}; + +const LOG_TARGET: &str = "tari::dan::mempool::validators::has_involved_shards"; + +/// Refuse to process the transaction if it does not have any inputs. +/// We make an exception (for now) for CreateFreeTestCoins transactions, which have no inputs. +#[derive(Debug, Clone, Default)] +pub struct HasInputs; + +impl HasInputs { + pub fn new() -> Self { + Self + } +} + +impl Validator for HasInputs { + type Context = (); + type Error = TransactionValidationError; + + fn validate(&self, _context: &(), transaction: &Transaction) -> Result<(), Self::Error> { + if transaction.all_inputs_iter().next().is_none() { + warn!(target: LOG_TARGET, "HasInputs - FAIL: No input shards"); + return Err(TransactionValidationError::NoInputs { + transaction_id: *transaction.id(), + }); + } + + debug!(target: LOG_TARGET, "HasInputs - OK"); + Ok(()) + } +} diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/mod.rs b/applications/tari_validator_node/src/transaction_validators/mod.rs similarity index 72% rename from applications/tari_validator_node/src/p2p/services/mempool/validators/before/mod.rs rename to applications/tari_validator_node/src/transaction_validators/mod.rs index 3ac77ef34..ff6a9140e 100644 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/mod.rs +++ b/applications/tari_validator_node/src/transaction_validators/mod.rs @@ -1,5 +1,6 @@ -// Copyright 2023 The Tari Project +// Copyright 2022 The Tari Project // SPDX-License-Identifier: BSD-3-Clause + mod claim_fee_instructions; mod epoch_range; mod fee; @@ -13,3 +14,9 @@ pub use fee::*; pub use has_inputs::*; pub use signature::*; pub use template_exists::*; + +mod error; +mod with_context; + +pub use error::*; +pub use with_context::*; diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/signature.rs b/applications/tari_validator_node/src/transaction_validators/signature.rs similarity index 64% rename from applications/tari_validator_node/src/p2p/services/mempool/validators/before/signature.rs rename to applications/tari_validator_node/src/transaction_validators/signature.rs index 2a8a835c2..81d107e53 100644 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/signature.rs +++ b/applications/tari_validator_node/src/transaction_validators/signature.rs @@ -1,32 +1,31 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use async_trait::async_trait; use log::warn; use tari_transaction::Transaction; -use crate::p2p::services::mempool::{MempoolError, Validator}; +use crate::{transaction_validators::TransactionValidationError, validator::Validator}; const LOG_TARGET: &str = "tari::dan::mempool::validators::signature"; #[derive(Debug)] pub struct TransactionSignatureValidator; -#[async_trait] impl Validator for TransactionSignatureValidator { - type Error = MempoolError; + type Context = (); + type Error = TransactionValidationError; - async fn validate(&self, transaction: &Transaction) -> Result<(), MempoolError> { + fn validate(&self, _context: &(), transaction: &Transaction) -> Result<(), TransactionValidationError> { if transaction.signatures().is_empty() { warn!(target: LOG_TARGET, "TransactionSignatureValidator - FAIL: No signatures"); - return Err(MempoolError::TransactionNotSigned { + return Err(TransactionValidationError::TransactionNotSigned { transaction_id: *transaction.id(), }); } if !transaction.verify_all_signatures() { warn!(target: LOG_TARGET, "TransactionSignatureValidator - FAIL: Invalid signature"); - return Err(MempoolError::InvalidSignature); + return Err(TransactionValidationError::InvalidSignature); } Ok(()) diff --git a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/template_exists.rs b/applications/tari_validator_node/src/transaction_validators/template_exists.rs similarity index 78% rename from applications/tari_validator_node/src/p2p/services/mempool/validators/before/template_exists.rs rename to applications/tari_validator_node/src/transaction_validators/template_exists.rs index 213d67c12..298145935 100644 --- a/applications/tari_validator_node/src/p2p/services/mempool/validators/before/template_exists.rs +++ b/applications/tari_validator_node/src/transaction_validators/template_exists.rs @@ -1,14 +1,13 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use async_trait::async_trait; use log::warn; use tari_dan_app_utilities::template_manager::{implementation::TemplateManager, interface::TemplateManagerError}; use tari_dan_common_types::NodeAddressable; use tari_engine_types::instruction::Instruction; use tari_transaction::Transaction; -use crate::p2p::services::mempool::{MempoolError, Validator}; +use crate::{transaction_validators::TransactionValidationError, validator::Validator}; const LOG_TARGET: &str = "tari::dan::mempool::validators::template_exists"; @@ -23,21 +22,21 @@ impl TemplateExistsValidator { } } -#[async_trait] impl Validator for TemplateExistsValidator { - type Error = MempoolError; + type Context = (); + type Error = TransactionValidationError; - async fn validate(&self, transaction: &Transaction) -> Result<(), MempoolError> { + fn validate(&self, _context: &(), transaction: &Transaction) -> Result<(), TransactionValidationError> { let instructions = transaction.instructions(); for instruction in instructions { match instruction { Instruction::CallFunction { template_address, .. } => { let template_exists = self.template_manager.template_exists(template_address); match template_exists { - Err(e) => return Err(MempoolError::InvalidTemplateAddress(e)), + Err(e) => return Err(TransactionValidationError::InvalidTemplateAddress(e)), Ok(false) => { warn!(target: LOG_TARGET, "TemplateExistsValidator - FAIL: Template not found"); - return Err(MempoolError::InvalidTemplateAddress( + return Err(TransactionValidationError::InvalidTemplateAddress( TemplateManagerError::TemplateNotFound { address: *template_address, }, diff --git a/applications/tari_validator_node/src/transaction_validators/with_context.rs b/applications/tari_validator_node/src/transaction_validators/with_context.rs new file mode 100644 index 000000000..250695d30 --- /dev/null +++ b/applications/tari_validator_node/src/transaction_validators/with_context.rs @@ -0,0 +1,24 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use std::marker::PhantomData; + +use crate::validator::Validator; + +#[derive(Debug, Default)] +pub struct WithContext(PhantomData<(C, T, E)>); + +impl WithContext { + pub fn new() -> Self { + Self(PhantomData) + } +} + +impl Validator for WithContext { + type Context = C; + type Error = E; + + fn validate(&self, _context: &C, _input: &T) -> Result<(), Self::Error> { + Ok(()) + } +} diff --git a/applications/tari_validator_node/src/validator.rs b/applications/tari_validator_node/src/validator.rs new file mode 100644 index 000000000..2ed78df8e --- /dev/null +++ b/applications/tari_validator_node/src/validator.rs @@ -0,0 +1,110 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use std::fmt::{Debug, Formatter}; + +pub trait Validator { + type Context; + type Error; + + fn validate(&self, context: &Self::Context, input: &T) -> Result<(), Self::Error>; + + fn boxed(self) -> BoxedValidator + where Self: Sized + Send + Sync + 'static { + BoxedValidator { inner: Box::new(self) } + } + + fn and_then(self, other: V) -> AndThen + where + V: Validator, + Self: Sized, + { + AndThen::new(self, other) + } + + fn map_context(self, f: F, validator: V) -> MapContext + where + V: Validator, + F: Fn(&Self::Context) -> V::Context, + Self: Sized, + { + MapContext::new(self, validator, f) + } +} + +pub struct BoxedValidator { + inner: Box + Send + Sync + 'static>, +} + +impl Validator for BoxedValidator { + type Context = C; + type Error = E; + + fn validate(&self, context: &Self::Context, input: &T) -> Result<(), Self::Error> { + self.inner.validate(context, input) + } +} + +impl Debug for BoxedValidator { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("BoxedValidator") + .field("inner", &"Box") + .finish() + } +} + +pub struct AndThen { + first: A, + second: B, +} + +impl AndThen { + pub fn new(first: A, second: B) -> Self { + Self { first, second } + } +} + +impl Validator for AndThen +where + A: Validator + Send + Sync, + B: Validator + Send + Sync, + T: Sync, +{ + type Context = A::Context; + type Error = A::Error; + + fn validate(&self, context: &Self::Context, input: &T) -> Result<(), Self::Error> { + self.first.validate(context, input)?; + self.second.validate(context, input)?; + Ok(()) + } +} + +pub struct MapContext { + first: A, + second: B, + mapper: F, +} + +impl MapContext { + pub fn new(first: A, second: B, mapper: F) -> Self { + Self { first, second, mapper } + } +} + +impl Validator for MapContext +where + A: Validator + Send + Sync, + B: Validator + Send + Sync, + F: Fn(&A::Context) -> B::Context, + T: Sync, +{ + type Context = A::Context; + type Error = A::Error; + + fn validate(&self, context: &Self::Context, input: &T) -> Result<(), Self::Error> { + self.first.validate(context, input)?; + self.second.validate(&(self.mapper)(context), input)?; + Ok(()) + } +} diff --git a/bindings/src/types/Decision.ts b/bindings/src/types/Decision.ts index 1411b5cfe..a697b0f15 100644 --- a/bindings/src/types/Decision.ts +++ b/bindings/src/types/Decision.ts @@ -1,3 +1,3 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -export type Decision = "Commit" | "Abort" | "Deferred"; +export type Decision = "Commit" | "Abort"; diff --git a/bindings/src/types/ForeignProposal.ts b/bindings/src/types/ForeignProposal.ts index a04264c51..fcd39385b 100644 --- a/bindings/src/types/ForeignProposal.ts +++ b/bindings/src/types/ForeignProposal.ts @@ -3,7 +3,7 @@ import type { ForeignProposalState } from "./ForeignProposalState"; import type { NodeHeight } from "./NodeHeight"; export interface ForeignProposal { - bucket: number; + shard: number; block_id: string; state: ForeignProposalState; proposed_height: NodeHeight | null; diff --git a/bindings/src/types/Instruction.ts b/bindings/src/types/Instruction.ts index 8ea451274..38fb0b612 100644 --- a/bindings/src/types/Instruction.ts +++ b/bindings/src/types/Instruction.ts @@ -1,9 +1,7 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -import type { Amount } from "./Amount"; import type { Arg } from "./Arg"; import type { ComponentAddress } from "./ComponentAddress"; import type { ConfidentialClaim } from "./ConfidentialClaim"; -import type { ConfidentialOutput } from "./ConfidentialOutput"; import type { LogLevel } from "./LogLevel"; export type Instruction = @@ -14,5 +12,4 @@ export type Instruction = | { EmitLog: { level: LogLevel; message: string } } | { ClaimBurn: { claim: ConfidentialClaim } } | { ClaimValidatorFees: { epoch: number; validator_public_key: string } } - | "DropAllProofsInWorkspace" - | { CreateFreeTestCoins: { revealed_amount: Amount; output: ConfidentialOutput | null } }; + | "DropAllProofsInWorkspace"; diff --git a/bindings/src/types/TransactionPoolRecord.ts b/bindings/src/types/TransactionPoolRecord.ts index bf3c67e2e..533d96108 100644 --- a/bindings/src/types/TransactionPoolRecord.ts +++ b/bindings/src/types/TransactionPoolRecord.ts @@ -1,12 +1,17 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. import type { Decision } from "./Decision"; -import type { TransactionAtom } from "./TransactionAtom"; +import type { Evidence } from "./Evidence"; +import type { LeaderFee } from "./LeaderFee"; import type { TransactionPoolStage } from "./TransactionPoolStage"; export interface TransactionPoolRecord { - atom: TransactionAtom; + transaction_id: string; + evidence: Evidence; + transaction_fee: number; + leader_fee: LeaderFee | null; stage: TransactionPoolStage; pending_stage: TransactionPoolStage | null; + original_decision: Decision; local_decision: Decision | null; remote_decision: Decision | null; is_ready: boolean; diff --git a/dan_layer/common_types/src/substate_address.rs b/dan_layer/common_types/src/substate_address.rs index fe4d76295..529a0b05f 100644 --- a/dan_layer/common_types/src/substate_address.rs +++ b/dan_layer/common_types/src/substate_address.rs @@ -22,10 +22,7 @@ use tari_engine_types::{ }; use tari_template_lib::{models::ObjectKey, Hash}; -use crate::{ - shard::Shard, - uint::{U256, U256_ZERO}, -}; +use crate::{shard::Shard, uint::U256}; /// This is u16::MAX / 2 as a u32 = 32767 shards. Any number of shards greater than this will be clamped to this value. /// This is done to limit the number of addresses that are added to the final shard to allow the same shard boundaries. @@ -113,6 +110,10 @@ impl SubstateAddress { Ok(Self(hash.into_array())) } + pub fn is_zero(&self) -> bool { + self.as_bytes().iter().all(|&b| b == 0) + } + pub const fn into_array(self) -> [u8; 32] { self.0 } @@ -140,16 +141,10 @@ impl SubstateAddress { /// Calculates and returns the shard number that this SubstateAddress belongs. /// A shard is a division of the 256-bit shard space where the boundary of the division if always a power of two. pub fn to_shard(&self, num_shards: u32) -> Shard { - if num_shards == 0 { + if num_shards <= 1 || self.is_zero() { return Shard::from(0u32); } let addr_u256 = self.to_u256(); - if addr_u256 == U256_ZERO { - return Shard::from(0u32); - } - if num_shards == 1 { - return Shard::from(0u32); - } if num_shards.is_power_of_two() { let shard_size = U256::MAX >> num_shards.trailing_zeros(); diff --git a/dan_layer/consensus/Cargo.toml b/dan_layer/consensus/Cargo.toml index 741a2aa2d..57337877e 100644 --- a/dan_layer/consensus/Cargo.toml +++ b/dan_layer/consensus/Cargo.toml @@ -19,7 +19,6 @@ tari_state_tree = { workspace = true } # Used for PublicKey and Signature and Network enum tari_common = { workspace = true } tari_common_types = { workspace = true } -tari_mmr = { workspace = true } tari_shutdown = { workspace = true } anyhow = { workspace = true } diff --git a/dan_layer/consensus/src/hotstuff/block_change_set.rs b/dan_layer/consensus/src/hotstuff/block_change_set.rs index 8514d0856..650486c7d 100644 --- a/dan_layer/consensus/src/hotstuff/block_change_set.rs +++ b/dan_layer/consensus/src/hotstuff/block_change_set.rs @@ -121,7 +121,7 @@ impl ProposedBlockChangeSet { block_height: self.block.height, transaction_id: *transaction.transaction_id(), stage: next_stage, - evidence: transaction.atom().evidence.clone(), + evidence: transaction.evidence().clone(), is_ready, local_decision: transaction.current_decision(), }); diff --git a/dan_layer/consensus/src/hotstuff/error.rs b/dan_layer/consensus/src/hotstuff/error.rs index 78586bb04..38bcf848e 100644 --- a/dan_layer/consensus/src/hotstuff/error.rs +++ b/dan_layer/consensus/src/hotstuff/error.rs @@ -8,7 +8,6 @@ use tari_dan_storage::{ StorageError, }; use tari_epoch_manager::EpochManagerError; -use tari_mmr::BalancedBinaryMerkleProofError; use tari_state_tree::StateTreeError; use tari_transaction::{TransactionId, VersionedSubstateIdError}; @@ -45,8 +44,6 @@ pub enum HotStuffError { DecisionMismatch { block_id: BlockId, pool: &'static str }, #[error("Not the leader. {details}")] NotTheLeader { details: String }, - #[error("Merkle proof error: {0}")] - BalancedBinaryMerkleProofError(#[from] BalancedBinaryMerkleProofError), #[error("Epoch manager error: {0}")] EpochManagerError(anyhow::Error), #[error("State manager error: {0}")] @@ -183,8 +180,6 @@ pub enum ProposalValidationError { QCInvalidSignature { qc: QuorumCertificate }, #[error("Quorum was not reached: {qc}")] QuorumWasNotReached { qc: QuorumCertificate }, - #[error("Merkle proof error: {0}")] - BalancedBinaryMerkleProofError(#[from] BalancedBinaryMerkleProofError), #[error("Invalid network in block {block_id}: expected {expected_network}, given {block_network}")] InvalidNetwork { expected_network: String, diff --git a/dan_layer/consensus/src/hotstuff/mod.rs b/dan_layer/consensus/src/hotstuff/mod.rs index 5d08221fc..4f5374881 100644 --- a/dan_layer/consensus/src/hotstuff/mod.rs +++ b/dan_layer/consensus/src/hotstuff/mod.rs @@ -14,9 +14,9 @@ mod on_propose; mod on_ready_to_vote_on_local_block; mod on_receive_foreign_proposal; mod on_receive_local_proposal; +mod on_receive_new_transaction; mod on_receive_new_view; mod on_receive_request_missing_transactions; -mod on_receive_requested_transactions; mod on_receive_vote; mod on_sync_request; // mod on_sync_response; diff --git a/dan_layer/consensus/src/hotstuff/on_message_validate.rs b/dan_layer/consensus/src/hotstuff/on_message_validate.rs index 4f04f94e8..5c5346520 100644 --- a/dan_layer/consensus/src/hotstuff/on_message_validate.rs +++ b/dan_layer/consensus/src/hotstuff/on_message_validate.rs @@ -5,16 +5,10 @@ use std::collections::HashSet; use log::*; use tari_common::configuration::Network; -use tari_dan_common_types::{optional::Optional, NodeHeight}; +use tari_common_types::types::PublicKey; +use tari_dan_common_types::{Epoch, NodeHeight}; use tari_dan_storage::{ - consensus_models::{ - Block, - ExecutedTransaction, - TransactionAtom, - TransactionPool, - TransactionPoolRecord, - TransactionRecord, - }, + consensus_models::{Block, BlockId, TransactionRecord}, StateStore, StateStoreWriteTransaction, }; @@ -26,7 +20,7 @@ use super::config::HotstuffConfig; use crate::{ block_validations, hotstuff::{error::HotStuffError, HotstuffEvent}, - messages::{HotstuffMessage, ProposalMessage, RequestMissingTransactionsMessage}, + messages::{HotstuffMessage, MissingTransactionsRequest, ProposalMessage}, traits::{ConsensusSpec, OutboundMessaging}, }; @@ -41,8 +35,10 @@ pub struct OnMessageValidate { leader_strategy: TConsensusSpec::LeaderStrategy, vote_signing_service: TConsensusSpec::SignatureService, outbound_messaging: TConsensusSpec::OutboundMessaging, - transaction_pool: TransactionPool, tx_events: broadcast::Sender, + /// Keep track of max 16 in-flight requests + active_missing_transaction_requests: SimpleFixedArray, + current_request_id: u32, } impl OnMessageValidate { @@ -55,7 +51,6 @@ impl OnMessageValidate { leader_strategy: TConsensusSpec::LeaderStrategy, vote_signing_service: TConsensusSpec::SignatureService, outbound_messaging: TConsensusSpec::OutboundMessaging, - transaction_pool: TransactionPool, tx_events: broadcast::Sender, ) -> Self { Self { @@ -67,8 +62,9 @@ impl OnMessageValidate { leader_strategy, vote_signing_service, outbound_messaging, - transaction_pool, tx_events, + active_missing_transaction_requests: SimpleFixedArray::new(), + current_request_id: 0, } } @@ -93,10 +89,53 @@ impl OnMessageValidate { message: HotstuffMessage::ForeignProposal(proposal), }) }, + HotstuffMessage::MissingTransactionsResponse(msg) => { + if !self.active_missing_transaction_requests.remove_element(&msg.request_id) { + warn!(target: LOG_TARGET, "❓Received missing transactions (req_id = {}) from {} that we did not request. Discarding message", msg.request_id, from); + return Ok(MessageValidationResult::Discard); + } + if msg.transactions.len() > 1000 { + warn!(target: LOG_TARGET, "⚠️Peer sent more than the maximum amount of transactions. Discarding message"); + return Ok(MessageValidationResult::Discard); + } + Ok(MessageValidationResult::Ready { + from, + message: HotstuffMessage::MissingTransactionsResponse(msg), + }) + }, msg => Ok(MessageValidationResult::Ready { from, message: msg }), } } + pub async fn request_missing_transactions( + &mut self, + to: TConsensusSpec::Addr, + block_id: BlockId, + epoch: Epoch, + missing_txs: HashSet, + ) -> Result<(), HotStuffError> { + let request_id = self.next_request_id(); + self.active_missing_transaction_requests.insert(request_id); + self.outbound_messaging + .send( + to, + HotstuffMessage::MissingTransactionsRequest(MissingTransactionsRequest { + request_id, + block_id, + epoch, + transactions: missing_txs, + }), + ) + .await?; + Ok(()) + } + + fn next_request_id(&mut self) -> u32 { + let req_id = self.current_request_id; + self.current_request_id += 1; + req_id + } + async fn process_local_proposal( &mut self, current_height: NodeHeight, @@ -132,24 +171,11 @@ impl OnMessageValidate { }); } - let Some(ready_block) = self.handle_missing_transactions(block).await? else { - // Block not ready -park it - return Ok(MessageValidationResult::NotReady); - }; - - // let vn = self - // .epoch_manager - // .get_validator_node_by_public_key(ready_block.epoch(), ready_block.proposed_by()) - // .await?; - - Ok(MessageValidationResult::Ready { - from, - message: HotstuffMessage::Proposal(ProposalMessage { block: ready_block }), - }) + self.handle_missing_transactions(from, block).await } pub async fn update_parked_blocks( - &mut self, + &self, current_height: NodeHeight, transaction_id: &TransactionId, ) -> Result, HotStuffError> { @@ -163,36 +189,6 @@ impl OnMessageValidate { info!(target: LOG_TARGET, "♻️ all transactions for block {unparked_block} are ready for consensus"); - // todo(hacky): ensure that all transactions are in the pool. Race condition: because we have not yet - // received it yet in the select! loop. - self.store.with_write_tx(|tx| { - for tx_id in unparked_block.all_transaction_ids() { - if self.transaction_pool.exists(&**tx, tx_id)? { - continue; - } - - warn!( - target: LOG_TARGET, - "⚠️ Transaction {} is missing from the transaction pool. Attempting to recover.", - tx_id - ); - - let transaction = TransactionRecord::get(&**tx, tx_id)?; - // Did the mempool execute it? - if transaction.is_executed() { - // This should never fail - let executed = ExecutedTransaction::try_from(transaction)?; - self.transaction_pool.insert(tx, executed.to_atom())?; - } else { - // Deferred execution - self.transaction_pool - .insert(tx, TransactionAtom::deferred(*transaction.id()))?; - } - } - - Ok::<_, HotStuffError>(()) - })?; - let vn = self .epoch_manager .get_validator_node_by_public_key(unparked_block.epoch(), unparked_block.proposed_by()) @@ -221,120 +217,67 @@ impl OnMessageValidate { Ok(()) } - async fn handle_missing_transactions(&mut self, block: Block) -> Result, HotStuffError> { - let (missing_tx_ids, awaiting_execution) = self + async fn handle_missing_transactions( + &mut self, + from: TConsensusSpec::Addr, + block: Block, + ) -> Result, HotStuffError> { + let missing_tx_ids = self .store .with_write_tx(|tx| self.check_for_missing_transactions(tx, &block))?; - if missing_tx_ids.is_empty() && awaiting_execution.is_empty() { - return Ok(Some(block)); + if missing_tx_ids.is_empty() { + return Ok(MessageValidationResult::Ready { + from, + message: HotstuffMessage::Proposal(ProposalMessage { block }), + }); } let _ignore = self.tx_events.send(HotstuffEvent::ProposedBlockParked { block: block.as_leaf_block(), num_missing_txs: missing_tx_ids.len(), - num_awaiting_txs: awaiting_execution.len(), + // TODO: remove + num_awaiting_txs: 0, }); - if !missing_tx_ids.is_empty() { - let block_id = *block.id(); - let epoch = block.epoch(); - let block_proposed_by = block.proposed_by().clone(); - - let vn = self - .epoch_manager - .get_validator_node_by_public_key(epoch, &block_proposed_by) - .await?; - - let mut request_from_address = vn.address; - - // (Yet another) Edge case: If we're catching up, we could be the proposer but we no longer have the - // transaction (we deleted our database) In this case, request from another random VN - // (TODO: not 100% reliable) - if request_from_address == self.local_validator_addr { - let mut local_committee = self.epoch_manager.get_local_committee(epoch).await?; - - local_committee.shuffle(); - match local_committee - .into_iter() - .find(|(addr, _)| *addr != self.local_validator_addr) - { - Some((addr, _)) => { - warn!(target: LOG_TARGET, "⚠️Requesting missing transactions from another validator {addr} because we are (presumably) catching up (local_peer_id = {})", self.local_validator_addr); - request_from_address = addr; - }, - None => { - warn!( - target: LOG_TARGET, - "❌NEVERHAPPEN: We're the only validator in the committee but we need to request missing transactions." - ); - return Ok(None); - }, - } - } - - self.outbound_messaging - .send( - request_from_address, - HotstuffMessage::RequestMissingTransactions(RequestMissingTransactionsMessage { - block_id, - epoch, - transactions: missing_tx_ids, - }), - ) - .await?; - } - - Ok(None) + Ok(MessageValidationResult::ParkedProposal { + block_id: *block.id(), + epoch: block.epoch(), + proposed_by: block.proposed_by().clone(), + missing_txs: missing_tx_ids, + }) } fn check_for_missing_transactions( &self, tx: &mut ::WriteTransaction<'_>, block: &Block, - ) -> Result<(HashSet, HashSet), HotStuffError> { + ) -> Result, HotStuffError> { if block.commands().is_empty() { debug!( target: LOG_TARGET, "✅ Block {} is empty (no missing transactions)", block ); - return Ok((HashSet::new(), HashSet::new())); - } - let (transactions, missing_tx_ids) = TransactionRecord::get_any(&**tx, block.all_transaction_ids())?; - let awaiting_execution_or_deferred = transactions - .into_iter() - .filter(|tx| tx.final_decision.is_some()) - .filter(|tx| tx.result.is_none()) - .map(|tx| *tx.transaction.id()) - .collect::>(); - - // TODO(hacky): improve this. We need to account for transactions that are deferred when determining which - // transactions are awaiting execution. - let mut awaiting_execution = HashSet::new(); - for id in &awaiting_execution_or_deferred { - if let Some(t) = TransactionPoolRecord::get(&**tx, id).optional()? { - if !t.is_deferred() { - awaiting_execution.insert(*id); - } - } + return Ok(HashSet::new()); } + let missing_tx_ids = TransactionRecord::get_missing(&**tx, block.all_transaction_ids())?; - if missing_tx_ids.is_empty() && awaiting_execution.is_empty() { + if missing_tx_ids.is_empty() { debug!( target: LOG_TARGET, "✅ Block {} has no missing transactions", block ); - return Ok((HashSet::new(), HashSet::new())); + return Ok(HashSet::new()); } info!( target: LOG_TARGET, - "⏳ Block {} has {} missing transactions and {} awaiting execution", block, missing_tx_ids.len(), awaiting_execution.len(), + "⏳ Block {} has {} missing transactions", block, missing_tx_ids.len(), ); - tx.missing_transactions_insert(block, &missing_tx_ids, &awaiting_execution)?; + tx.missing_transactions_insert(block, &missing_tx_ids, &[])?; - Ok((missing_tx_ids, awaiting_execution)) + Ok(missing_tx_ids) } } @@ -344,7 +287,12 @@ pub enum MessageValidationResult { from: TAddr, message: HotstuffMessage, }, - NotReady, + ParkedProposal { + block_id: BlockId, + epoch: Epoch, + proposed_by: PublicKey, + missing_txs: HashSet, + }, Discard, Invalid { from: TAddr, @@ -352,3 +300,42 @@ pub enum MessageValidationResult { err: HotStuffError, }, } + +#[derive(Debug, Clone)] +struct SimpleFixedArray { + elems: [Option; SZ], + ptr: usize, +} + +impl SimpleFixedArray { + pub fn new() -> Self { + Self { + elems: [None; SZ], + ptr: 0, + } + } + + pub fn insert(&mut self, elem: T) { + // We dont care about overwriting "old" elements + self.elems[self.ptr] = Some(elem); + self.ptr = (self.ptr + 1) % SZ; + } + + pub fn remove_element(&mut self, elem: &T) -> bool + where T: PartialEq { + for (i, e) in self.elems.iter().enumerate() { + if e.as_ref() == Some(elem) { + // We dont care about "holes" in the collection + self.elems[i] = None; + return true; + } + } + false + } +} + +impl Default for SimpleFixedArray { + fn default() -> Self { + Self::new() + } +} diff --git a/dan_layer/consensus/src/hotstuff/on_propose.rs b/dan_layer/consensus/src/hotstuff/on_propose.rs index b55d58706..1ab71d0fa 100644 --- a/dan_layer/consensus/src/hotstuff/on_propose.rs +++ b/dan_layer/consensus/src/hotstuff/on_propose.rs @@ -174,7 +174,7 @@ where TConsensusSpec: ConsensusSpec for mut executed in executed_transactions.into_values() { // TODO: This is a hacky workaround, if the executed transaction has no shards after execution, we // remove it from the pool so that it does not get proposed again. Ideally we should be - // able to catch this in transaction validation. + // able to catch this in transaction validation and propose ABORT. if local_committee_info.count_distinct_shards(executed.involved_addresses_iter()) == 0 { self.transaction_pool.remove(tx, *executed.id())?; executed @@ -237,13 +237,14 @@ where TConsensusSpec: ConsensusSpec fn execute_transaction( &self, store: &PendingSubstateStore, + current_epoch: Epoch, transaction_id: &TransactionId, ) -> Result { let transaction = TransactionRecord::get(store.read_transaction(), transaction_id)?; let executed = self .transaction_executor - .execute(transaction.into_transaction(), store) + .execute(transaction.into_transaction(), store, current_epoch) .map_err(|e| HotStuffError::TransactionExecutorError(e.to_string()))?; Ok(executed) @@ -254,38 +255,29 @@ where TConsensusSpec: ConsensusSpec fn transaction_pool_record_to_command( &self, tx: &::ReadTransaction<'_>, + parent_block: &LeafBlock, mut tx_rec: TransactionPoolRecord, local_committee_info: &CommitteeInfo, substate_store: &mut PendingSubstateStore, executed_transactions: &mut HashMap, ) -> Result, HotStuffError> { - // Execute deferred transaction - if tx_rec.is_deferred() { - info!( - target: LOG_TARGET, - "👨‍🔧 PROPOSE: Executing deferred transaction {}", - tx_rec.transaction_id(), - ); + info!( + target: LOG_TARGET, + "👨‍🔧 PROPOSE: Executing transaction {}", + tx_rec.transaction_id(), + ); - let executed = self.execute_transaction(substate_store, tx_rec.transaction_id())?; + if tx_rec.current_stage().is_new() { + let executed = self.execute_transaction(substate_store, parent_block.epoch(), tx_rec.transaction_id())?; // Update the decision so that we can propose it tx_rec.set_local_decision(executed.decision()); - tx_rec.set_initial_evidence(executed.to_initial_evidence()); + tx_rec.set_evidence(executed.to_initial_evidence()); tx_rec.set_transaction_fee(executed.transaction_fee()); executed_transactions.insert(*executed.id(), executed); - } else if tx_rec.current_decision().is_commit() && tx_rec.current_stage().is_new() { - // Executed in mempool. Add to this block's executed transactions - let executed = ExecutedTransaction::get(tx, tx_rec.transaction_id())?; - tx_rec.set_local_decision(executed.decision()); - tx_rec.set_initial_evidence(executed.to_initial_evidence()); - tx_rec.set_transaction_fee(executed.transaction_fee()); - executed_transactions.insert(*executed.id(), executed); - } else { - // Continue... - }; + } let num_involved_shards = - local_committee_info.count_distinct_shards(tx_rec.atom().evidence.substate_addresses_iter()); + local_committee_info.count_distinct_shards(tx_rec.evidence().substate_addresses_iter()); if num_involved_shards == 0 { warn!( @@ -414,15 +406,17 @@ where TConsensusSpec: ConsensusSpec let leader_fee = tx_rec.calculate_leader_fee(involved, EXHAUST_DIVISOR); let tx_atom = tx_rec.get_final_transaction_atom(leader_fee); if tx_atom.decision.is_commit() { - let transaction = tx_rec.get_transaction(tx)?; - let result = transaction.result().ok_or_else(|| { - HotStuffError::InvariantError(format!( - "Transaction {} is committed but has no result when proposing", - tx_rec.transaction_id(), - )) - })?; - - let diff = result.finalize.result.accept().ok_or_else(|| { + let execution = tx_rec + .get_execution_for_block(tx, parent_block.block_id()) + .optional()? + .ok_or_else(|| { + HotStuffError::InvariantError(format!( + "Transaction {} is committed but has no result when proposing", + tx_rec.transaction_id(), + )) + })?; + + let diff = execution.result().finalize.accept().ok_or_else(|| { HotStuffError::InvariantError(format!( "Transaction {} has COMMIT decision but execution failed when proposing", tx_rec.transaction_id(), @@ -455,14 +449,14 @@ where TConsensusSpec: ConsensusSpec high_qc: QuorumCertificate, proposed_by: PublicKey, local_committee_info: &CommitteeInfo, - empty_block: bool, + dont_propose_transactions: bool, base_layer_block_height: u64, base_layer_block_hash: FixedHash, propose_epoch_end: bool, ) -> Result<(Block, HashMap), HotStuffError> { // TODO: Configure - const TARGET_BLOCK_SIZE: usize = 1000; - let batch = if empty_block || propose_epoch_end { + const TARGET_BLOCK_SIZE: usize = 500; + let batch = if dont_propose_transactions || propose_epoch_end { vec![] } else { self.transaction_pool.get_batch_for_next_block(tx, TARGET_BLOCK_SIZE)? @@ -496,11 +490,12 @@ where TConsensusSpec: ConsensusSpec // batch is empty for is_empty, is_epoch_end and is_epoch_start blocks let tree_store = ChainScopedTreeStore::new(epoch, local_committee_info.shard(), tx); - let mut substate_store = PendingSubstateStore::new(tree_store); + let mut substate_store = PendingSubstateStore::new(*parent_block.block_id(), tree_store); let mut executed_transactions = HashMap::new(); for transaction in batch { if let Some(command) = self.transaction_pool_record_to_command( tx, + parent_block, transaction, local_committee_info, &mut substate_store, diff --git a/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs b/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs index 6448ddedc..4898fad5e 100644 --- a/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs +++ b/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs @@ -5,7 +5,7 @@ use std::num::NonZeroU64; use log::*; -use tari_dan_common_types::{committee::CommitteeInfo, optional::Optional}; +use tari_dan_common_types::{committee::CommitteeInfo, optional::Optional, Epoch}; use tari_dan_storage::{ consensus_models::{ Block, @@ -13,7 +13,6 @@ use tari_dan_storage::{ BlockId, Command, Decision, - ExecutedTransaction, ForeignProposal, LastExecuted, LastVoted, @@ -49,6 +48,7 @@ use crate::{ const LOG_TARGET: &str = "tari::dan::consensus::hotstuff::on_ready_to_vote_on_local_block"; +#[derive(Debug, Clone)] pub struct OnReadyToVoteOnLocalBlock { local_validator_addr: TConsensusSpec::Addr, store: TConsensusSpec::StateStore, @@ -192,27 +192,9 @@ where TConsensusSpec: ConsensusSpec // Store used for transactions that have inputs without specific versions. // It lives through the entire block so multiple transactions can be sequenced together in the same block let tree_store = ChainScopedTreeStore::new(block.epoch(), block.shard(), tx); - let mut substate_store = PendingSubstateStore::new(tree_store); + let mut substate_store = PendingSubstateStore::new(*block.parent(), tree_store); let mut proposed_block_change_set = ProposedBlockChangeSet::new(block.as_leaf_block()); - // if epoch_should_start && !block.is_epoch_start() { - // warn!( - // target: LOG_TARGET, - // "❌ EpochEvent::Start command expected for block {} but not found", - // block.id() - // ); - // return Ok(proposed_block_change_set.no_vote()); - // } - // - // if epoch_should_end && !block.is_epoch_end() { - // warn!( - // target: LOG_TARGET, - // "❌ EpochEvent::End command expected for block {} but not found", - // block.id() - // ); - // return Ok(proposed_block_change_set.no_vote()); - // } - if block.is_epoch_end() && block.commands().len() > 1 { warn!( target: LOG_TARGET, @@ -278,37 +260,19 @@ where TConsensusSpec: ConsensusSpec ); match cmd { Command::LocalOnly(t) => { - if tx_rec.is_deferred() { - info!( - target: LOG_TARGET, - "👨‍🔧 LOCAL-ONLY: Executing deferred transaction {} in block {}", - tx_rec.transaction_id(), - block, - ); + info!( + target: LOG_TARGET, + "👨‍🔧 LOCAL-ONLY: Executing deferred transaction {} in block {}", + tx_rec.transaction_id(), + block, + ); - let executed = self.execute_transaction_if_required(&substate_store, &atom.id, block.id())?; - tx_rec.set_local_decision(executed.decision()); - tx_rec.set_initial_evidence(executed.to_initial_evidence()); - tx_rec.set_transaction_fee(executed.transaction_fee()); - proposed_block_change_set.add_transaction_execution(executed); - } else if tx_rec.current_decision().is_commit() && - matches!( - tx_rec.current_stage(), - // TODO: Investigate race condition where transaction pool stage is already LocalOnly - TransactionPoolStage::New | TransactionPoolStage::LocalOnly - ) - { - // We need to include the transaction execution context for this block if a transaction is yet - // to be prepared. - let execution = ExecutedTransaction::get_pending_execution_for_block(tx, block.id(), &t.id)?; - // Align the TransactionPoolRecord with the relevant execution - tx_rec.set_local_decision(execution.decision()); - tx_rec.set_initial_evidence(execution.to_initial_evidence()); - tx_rec.set_transaction_fee(execution.transaction_fee()); - proposed_block_change_set.add_transaction_execution(execution); - } else { - // continue - } + let executed = + self.execute_transaction_if_required(&substate_store, &atom.id, block.id(), block.epoch())?; + tx_rec.set_local_decision(executed.decision()); + tx_rec.set_evidence(executed.to_initial_evidence()); + tx_rec.set_transaction_fee(executed.transaction_fee()); + proposed_block_change_set.add_transaction_execution(executed); if !tx_rec.current_stage().is_new() && !tx_rec.current_stage().is_local_only() { warn!( @@ -321,13 +285,13 @@ where TConsensusSpec: ConsensusSpec return Ok(proposed_block_change_set.no_vote()); } - if tx_rec.atom().transaction_fee != t.transaction_fee { + if tx_rec.transaction_fee() != t.transaction_fee { warn!( target: LOG_TARGET, "❌ LocalOnly transaction fee disagreement for block {}. Leader proposed {}, we calculated {}", block, t.transaction_fee, - tx_rec.atom().transaction_fee + tx_rec.transaction_fee() ); return Ok(proposed_block_change_set.no_vote()); } @@ -349,7 +313,7 @@ where TConsensusSpec: ConsensusSpec } if !local_committee_info - .includes_all_substate_addresses(tx_rec.atom().evidence.substate_addresses_iter()) + .includes_all_substate_addresses(tx_rec.evidence().substate_addresses_iter()) { warn!( target: LOG_TARGET, @@ -370,6 +334,8 @@ where TConsensusSpec: ConsensusSpec t.id, block )) })?; + // TODO: If we proposed this block, we shouldn't have to reprocess the locks. Locks are + // currently fairly expensive. if !self.try_obtain_locks(execution, local_committee_info, &mut substate_store)? { // They want to ABORT a successfully executed transaction because of a lock conflict, which // we also have. @@ -460,26 +426,19 @@ where TConsensusSpec: ConsensusSpec ); }, Command::Prepare(t) => { - if tx_rec.is_deferred() { - info!( - target: LOG_TARGET, - "👨‍🔧 PREPARE: Executing deferred transaction {} in block {}", - tx_rec.transaction_id(), - block, - ); + info!( + target: LOG_TARGET, + "👨‍🔧 PREPARE: Executing deferred transaction {} in block {}", + tx_rec.transaction_id(), + block, + ); - let executed = self.execute_transaction_if_required(&substate_store, &atom.id, block.id())?; - tx_rec.set_local_decision(executed.decision()); - tx_rec.set_initial_evidence(executed.to_initial_evidence()); - tx_rec.set_transaction_fee(executed.transaction_fee()); - proposed_block_change_set.add_transaction_execution(executed); - } else { - let executed = ExecutedTransaction::get_pending_execution_for_block(tx, block.id(), &t.id)?; - tx_rec.set_local_decision(executed.decision()); - tx_rec.set_initial_evidence(executed.to_initial_evidence()); - tx_rec.set_transaction_fee(executed.transaction_fee()); - proposed_block_change_set.add_transaction_execution(executed); - } + let executed = + self.execute_transaction_if_required(&substate_store, &atom.id, block.id(), block.epoch())?; + tx_rec.set_local_decision(executed.decision()); + tx_rec.set_evidence(executed.to_initial_evidence()); + tx_rec.set_transaction_fee(executed.transaction_fee()); + proposed_block_change_set.add_transaction_execution(executed); if !tx_rec.current_stage().is_new() && !tx_rec.current_stage().is_prepared() { warn!( @@ -492,13 +451,13 @@ where TConsensusSpec: ConsensusSpec return Ok(proposed_block_change_set.no_vote()); } - if tx_rec.atom().transaction_fee != t.transaction_fee { + if tx_rec.transaction_fee() != t.transaction_fee { warn!( target: LOG_TARGET, "❌ Accept transaction fee disagreement for block {}. Leader proposed {}, we calculated {}", block, t.transaction_fee, - tx_rec.atom().transaction_fee + tx_rec.transaction_fee() ); return Ok(proposed_block_change_set.no_vote()); } @@ -585,14 +544,14 @@ where TConsensusSpec: ConsensusSpec return Ok(proposed_block_change_set.no_vote()); } - if tx_rec.atom().transaction_fee != t.transaction_fee { + if tx_rec.transaction_fee() != t.transaction_fee { warn!( target: LOG_TARGET, "❌ Accept transaction fee disagreement tx {} in block {}. Leader proposed {}, we calculated {}", tx_rec.transaction_id(), block, t.transaction_fee, - tx_rec.atom().transaction_fee + tx_rec.transaction_fee() ); return Ok(proposed_block_change_set.no_vote()); } @@ -600,7 +559,7 @@ where TConsensusSpec: ConsensusSpec proposed_block_change_set.set_next_transaction_update( &tx_rec, TransactionPoolStage::LocalPrepared, - tx_rec.atom().evidence.all_shards_justified(), + tx_rec.evidence().all_shards_justified(), ); }, Command::Accept(t) => { @@ -629,26 +588,26 @@ where TConsensusSpec: ConsensusSpec return Ok(proposed_block_change_set.no_vote()); } - if !tx_rec.atom().evidence.all_shards_justified() { + if !tx_rec.evidence().all_shards_justified() { warn!( target: LOG_TARGET, "❌ Accept evidence disagreement tx {} in block {}. Evidence for {} out of {} shards", tx_rec.transaction_id(), block, - tx_rec.atom().evidence.num_justified_shards(), - tx_rec.atom().evidence.len(), + tx_rec.evidence().num_justified_shards(), + tx_rec.evidence().len(), ); return Ok(proposed_block_change_set.no_vote()); } - if tx_rec.atom().transaction_fee != t.transaction_fee { + if tx_rec.transaction_fee() != t.transaction_fee { warn!( target: LOG_TARGET, "❌ Accept transaction fee disagreement tx {} in block {}. Leader proposed {}, we calculated {}", tx_rec.transaction_id(), block, t.transaction_fee, - tx_rec.atom().transaction_fee + tx_rec.transaction_fee() ); return Ok(proposed_block_change_set.no_vote()); @@ -658,18 +617,18 @@ where TConsensusSpec: ConsensusSpec // It is possible that the transaction was not marked as ready yet because of the order we // received messages, but if we are in LocalPrepared and we have all the // evidence, we would have proposed this too so we can continue. - if !tx_rec.is_ready() && !tx_rec.atom().evidence.all_shards_justified() { + if !tx_rec.is_ready() && !tx_rec.evidence().all_shards_justified() { warn!( target: LOG_TARGET, "⚠️ Local proposal received ({}) for transaction {} which is not ready. Not voting.", block, - tx_rec.atom() + tx_rec.transaction_id() ); return Ok(proposed_block_change_set.no_vote()); } let distinct_shards = - local_committee_info.count_distinct_shards(tx_rec.atom().evidence.substate_addresses_iter()); + local_committee_info.count_distinct_shards(tx_rec.evidence().substate_addresses_iter()); let distinct_shards = NonZeroU64::new(distinct_shards as u64).ok_or_else(|| { HotStuffError::InvariantError(format!( "Distinct shards is zero for transaction {} in block {}", @@ -789,6 +748,7 @@ where TConsensusSpec: ConsensusSpec store: &PendingSubstateStore, transaction_id: &TransactionId, block_id: &BlockId, + current_epoch: Epoch, ) -> Result { // If the transaction is already executed in the propose phase we simply load it for this block if let Some(execution) = @@ -799,15 +759,9 @@ where TConsensusSpec: ConsensusSpec let transaction = TransactionRecord::get(store.read_transaction(), transaction_id)?; - info!( - target: LOG_TARGET, - "🔥 Executing transaction {}", - transaction_id, - ); - let executed = self .transaction_executor - .execute(transaction.into_transaction(), store) + .execute(transaction.into_transaction(), store, current_epoch) .map_err(|e| HotStuffError::TransactionExecutorError(e.to_string()))?; Ok(executed.into_execution_for_block(*block_id)) diff --git a/dan_layer/consensus/src/hotstuff/on_receive_foreign_proposal.rs b/dan_layer/consensus/src/hotstuff/on_receive_foreign_proposal.rs index 7347aabc7..d9d14ebf8 100644 --- a/dan_layer/consensus/src/hotstuff/on_receive_foreign_proposal.rs +++ b/dan_layer/consensus/src/hotstuff/on_receive_foreign_proposal.rs @@ -187,7 +187,7 @@ where TConsensusSpec: ConsensusSpec // If all shards are complete and we've already received our LocalPrepared, we can set out LocalPrepared // transaction as ready to propose ACCEPT. If we have not received the local LocalPrepared, the transition // will happen when we receive the local block. - if tx_rec.current_stage().is_local_prepared() && tx_rec.atom().evidence.all_shards_justified() { + if tx_rec.current_stage().is_local_prepared() && tx_rec.evidence().all_shards_justified() { info!( target: LOG_TARGET, "🔥 FOREIGN PROPOSAL: Transaction is ready for propose ACCEPT({}, {}) Local Stage: {}", diff --git a/dan_layer/consensus/src/hotstuff/on_receive_local_proposal.rs b/dan_layer/consensus/src/hotstuff/on_receive_local_proposal.rs index bd4a62e17..5f19ab1fa 100644 --- a/dan_layer/consensus/src/hotstuff/on_receive_local_proposal.rs +++ b/dan_layer/consensus/src/hotstuff/on_receive_local_proposal.rs @@ -13,27 +13,23 @@ use tari_dan_storage::{ consensus_models::{ Block, Decision, - ExecutedTransaction, ForeignProposal, HighQc, LastSentVote, QuorumDecision, - TransactionAtom, TransactionPool, - TransactionPoolStage, TransactionRecord, ValidBlock, }, StateStore, }; use tari_epoch_manager::EpochManagerReader; -use tokio::sync::broadcast; +use tokio::{sync::broadcast, task}; use super::proposer::Proposer; use crate::{ hotstuff::{ calculate_dummy_blocks, - current_view::CurrentView, error::HotStuffError, on_ready_to_vote_on_local_block::OnReadyToVoteOnLocalBlock, pacemaker_handle::PaceMakerHandle, @@ -100,7 +96,7 @@ impl OnReceiveLocalProposalHandler Result<(), HotStuffError> { + pub async fn handle(&mut self, current_epoch: Epoch, message: ProposalMessage) -> Result<(), HotStuffError> { let ProposalMessage { block } = message; debug!( @@ -110,7 +106,7 @@ impl OnReceiveLocalProposalHandler Ok(()), Err(err @ HotStuffError::ProposalValidationError(_)) => { self.hooks.on_block_validation_failed(&err); @@ -121,7 +117,7 @@ impl OnReceiveLocalProposalHandler Result<(), HotStuffError> { + async fn process_block(&mut self, current_epoch: Epoch, block: Block) -> Result<(), HotStuffError> { if !self.epoch_manager.is_epoch_active(block.epoch()).await? { return Err(HotStuffError::EpochNotActive { epoch: block.epoch(), @@ -149,26 +145,6 @@ impl OnReceiveLocalProposalHandler OnReceiveLocalProposalHandler current_view.get_epoch(); - - let block_decision = self.on_ready_to_vote_on_local_block.handle( - &valid_block, - local_committee_info, - can_propose_epoch_end, - )?; + let em_epoch = self.epoch_manager.current_epoch().await?; + let can_propose_epoch_end = em_epoch > current_epoch; + + let mut on_ready_to_vote_on_local_block = self.on_ready_to_vote_on_local_block.clone(); + let (block_decision, valid_block) = task::spawn_blocking(move || { + let decision = on_ready_to_vote_on_local_block.handle( + &valid_block, + local_committee_info, + can_propose_epoch_end, + )?; + Ok::<_, HotStuffError>((decision, valid_block)) + }) + .await + .unwrap()?; self.hooks .on_local_block_decide(&valid_block, block_decision.quorum_decision); @@ -425,7 +407,7 @@ impl OnReceiveLocalProposalHandler { + store: TConsensusSpec::StateStore, + transaction_pool: TransactionPool, + executor: TConsensusSpec::TransactionExecutor, + tx_missing_transactions: mpsc::UnboundedSender, +} + +impl OnReceiveNewTransaction +where TConsensusSpec: ConsensusSpec +{ + pub fn new( + store: TConsensusSpec::StateStore, + transaction_pool: TransactionPool, + executor: TConsensusSpec::TransactionExecutor, + tx_missing_transactions: mpsc::UnboundedSender, + ) -> Self { + Self { + store, + transaction_pool, + executor, + tx_missing_transactions, + } + } + + pub async fn process_requested( + &mut self, + current_epoch: Epoch, + from: TConsensusSpec::Addr, + msg: MissingTransactionsResponse, + ) -> Result<(), HotStuffError> { + info!(target: LOG_TARGET, "Receiving {} requested transactions for block {} from {:?}", msg.transactions.len(), msg.block_id, from, ); + self.store.with_write_tx(|tx| { + for transaction in msg.transactions { + if let Some(rec) = self.validate_and_sequence_transaction(tx, current_epoch, transaction)? { + // TODO: Could this cause a race-condition? Transaction could be proposed as Prepare before the + // unparked block is processed (however, if there's a parked block it's probably not our turn to + // propose). Ideally we remove this channel because it's a work around + self.tx_missing_transactions + .send(*rec.id()) + .map_err(|_| HotStuffError::InternalChannelClosed { + context: "process_requested", + })?; + } + } + Ok(()) + }) + } + + pub fn try_sequence_transaction( + &mut self, + current_epoch: Epoch, + transaction: Transaction, + ) -> Result, HotStuffError> { + self.store + .with_write_tx(|tx| self.validate_and_sequence_transaction(tx, current_epoch, transaction)) + } + + fn validate_and_sequence_transaction( + &self, + tx: &mut <::StateStore as StateStore>::WriteTransaction<'_>, + current_epoch: Epoch, + transaction: Transaction, + ) -> Result, HotStuffError> { + if self.transaction_pool.exists(&**tx, transaction.id())? { + return Ok(None); + } + + let mut rec = TransactionRecord::get(&**tx, transaction.id()) + .optional()? + .unwrap_or_else(|| TransactionRecord::new(transaction)); + + // Edge case: a validator sends a transaction that is already finalized as a missing transaction or via + // propagation + if rec.is_finalized() { + warn!( + target: LOG_TARGET, "Transaction {} is already finalized. Consensus will ignore it.", rec.id() + ); + return Ok(None); + } + + let result = self.executor.validate(&**tx, current_epoch, rec.transaction()); + + if let Err(err) = result { + warn!( + target: LOG_TARGET, + "Transaction {} failed validation: {}", rec.id(), err + ); + rec.set_current_decision_to_abort(err.to_string()).insert(tx)?; + self.add_to_pool(tx, &rec)?; + return Ok(Some(rec)); + } + rec.save(tx)?; + self.add_to_pool(tx, &rec)?; + Ok(Some(rec)) + } + + fn add_to_pool( + &self, + tx: &mut ::WriteTransaction<'_>, + transaction: &TransactionRecord, + ) -> Result<(), HotStuffError> { + self.transaction_pool + .insert_new(tx, *transaction.id(), transaction.current_decision())?; + Ok(()) + } +} diff --git a/dan_layer/consensus/src/hotstuff/on_receive_request_missing_transactions.rs b/dan_layer/consensus/src/hotstuff/on_receive_request_missing_transactions.rs index 2f3fe4518..7d15e6772 100644 --- a/dan_layer/consensus/src/hotstuff/on_receive_request_missing_transactions.rs +++ b/dan_layer/consensus/src/hotstuff/on_receive_request_missing_transactions.rs @@ -6,7 +6,7 @@ use tari_dan_storage::{consensus_models::TransactionRecord, StateStore}; use crate::{ hotstuff::error::HotStuffError, - messages::{HotstuffMessage, RequestMissingTransactionsMessage, RequestedTransactionMessage}, + messages::{HotstuffMessage, MissingTransactionsRequest, MissingTransactionsResponse}, traits::{ConsensusSpec, OutboundMessaging}, }; @@ -30,7 +30,7 @@ where TConsensusSpec: ConsensusSpec pub async fn handle( &mut self, from: TConsensusSpec::Addr, - msg: RequestMissingTransactionsMessage, + msg: MissingTransactionsRequest, ) -> Result<(), HotStuffError> { info!(target: LOG_TARGET, "{} requested {} missing transaction(s) from block {}", from, msg.transactions.len(), msg.block_id); let (txs, missing) = self @@ -46,7 +46,8 @@ where TConsensusSpec: ConsensusSpec self.outbound_messaging .send( from, - HotstuffMessage::RequestedTransaction(RequestedTransactionMessage { + HotstuffMessage::MissingTransactionsResponse(MissingTransactionsResponse { + request_id: msg.request_id, epoch: msg.epoch, block_id: msg.block_id, transactions: txs.into_iter().map(|tx| tx.into_transaction()).collect(), diff --git a/dan_layer/consensus/src/hotstuff/on_receive_requested_transactions.rs b/dan_layer/consensus/src/hotstuff/on_receive_requested_transactions.rs deleted file mode 100644 index f7a4c019d..000000000 --- a/dan_layer/consensus/src/hotstuff/on_receive_requested_transactions.rs +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2023 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use log::*; -use tari_transaction::Transaction; -use tokio::sync::mpsc; - -use crate::{hotstuff::error::HotStuffError, messages::RequestedTransactionMessage, traits::ConsensusSpec}; - -const LOG_TARGET: &str = "tari::dan::consensus::hotstuff::on_receive_requested_transactions"; - -pub struct OnReceiveRequestedTransactions { - tx_mempool: mpsc::UnboundedSender, - _phantom: std::marker::PhantomData, -} - -impl OnReceiveRequestedTransactions -where TConsensusSpec: ConsensusSpec -{ - pub fn new(tx_mempool: mpsc::UnboundedSender) -> Self { - Self { - tx_mempool, - _phantom: Default::default(), - } - } - - pub async fn handle( - &mut self, - from: TConsensusSpec::Addr, - msg: RequestedTransactionMessage, - ) -> Result<(), HotStuffError> { - info!(target: LOG_TARGET, "Receiving {} requested transactions for block {} from {:?}", msg.transactions.len(), msg.block_id, from, ); - // TODO: Check that we requested this - for tx in msg.transactions { - self.tx_mempool - .send(tx) - .map_err(|_| HotStuffError::InternalChannelClosed { - context: "tx_new_transaction in OnReceiveRequestedTransactions::handle", - })?; - } - Ok(()) - } -} diff --git a/dan_layer/consensus/src/hotstuff/proposer.rs b/dan_layer/consensus/src/hotstuff/proposer.rs index 1eb446ee1..552f3d1f0 100644 --- a/dan_layer/consensus/src/hotstuff/proposer.rs +++ b/dan_layer/consensus/src/hotstuff/proposer.rs @@ -1,15 +1,10 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use std::collections::{BTreeSet, HashSet}; +use std::collections::HashSet; use log::{debug, info}; -use tari_dan_common_types::shard::Shard; -use tari_dan_storage::{ - consensus_models::{Block, Command, ExecutedTransaction}, - StateStore, - StateStoreReadTransaction, -}; +use tari_dan_storage::consensus_models::Block; use tari_epoch_manager::EpochManagerReader; use super::HotStuffError; @@ -20,7 +15,6 @@ use crate::{ #[derive(Clone)] pub struct Proposer { - store: TConsensusSpec::StateStore, epoch_manager: TConsensusSpec::EpochManager, outbound_messaging: TConsensusSpec::OutboundMessaging, } @@ -31,12 +25,10 @@ impl Proposer where TConsensusSpec: ConsensusSpec { pub fn new( - store: TConsensusSpec::StateStore, epoch_manager: TConsensusSpec::EpochManager, outbound_messaging: TConsensusSpec::OutboundMessaging, ) -> Self { Self { - store, epoch_manager, outbound_messaging, } @@ -47,9 +39,14 @@ where TConsensusSpec: ConsensusSpec let validator = self.epoch_manager.get_our_validator_node(block.epoch()).await?; let local_shard = validator.shard_key.to_shard(num_committees); - let non_local_shards = self - .store - .with_read_tx(|tx| get_non_local_shards(tx, &block, num_committees, local_shard))?; + let non_local_shards = block + .commands() + .iter() + .filter_map(|c| c.local_prepared()) + .flat_map(|p| p.evidence.substate_addresses_iter()) + .map(|addr| addr.to_shard(num_committees)) + .filter(|shard| *shard != local_shard) + .collect::>(); if non_local_shards.is_empty() { return Ok(()); } @@ -88,29 +85,3 @@ where TConsensusSpec: ConsensusSpec Ok(()) } } - -pub fn get_non_local_shards( - tx: &TTx, - block: &Block, - num_committees: u32, - local_shard: Shard, -) -> Result, HotStuffError> { - get_non_local_shards_from_commands(tx, block.commands(), num_committees, local_shard) -} - -fn get_non_local_shards_from_commands( - tx: &TTx, - commands: &BTreeSet, - num_committees: u32, - local_shard: Shard, -) -> Result, HotStuffError> { - let prepared_iter = commands.iter().filter_map(|cmd| cmd.local_prepared()).map(|t| &t.id); - let prepared_txs = ExecutedTransaction::get_involved_shards(tx, prepared_iter)?; - let non_local_shards = prepared_txs - .into_iter() - .flat_map(|(_, addresses)| addresses) - .map(|address| address.to_shard(num_committees)) - .filter(|shard| *shard != local_shard) - .collect(); - Ok(non_local_shards) -} diff --git a/dan_layer/consensus/src/hotstuff/substate_store/pending_store.rs b/dan_layer/consensus/src/hotstuff/substate_store/pending_store.rs index ecf97e815..8b4a93f41 100644 --- a/dan_layer/consensus/src/hotstuff/substate_store/pending_store.rs +++ b/dan_layer/consensus/src/hotstuff/substate_store/pending_store.rs @@ -10,6 +10,8 @@ use tari_dan_common_types::{optional::Optional, SubstateAddress}; use tari_dan_storage::{ consensus_models::{ Block, + BlockDiff, + BlockId, LockedSubstate, PendingStateTreeDiff, SubstateChange, @@ -39,15 +41,17 @@ pub struct PendingSubstateStore<'a, 'tx, TStore: StateStore + 'a + 'tx> { /// Append only list of changes ordered oldest to newest diff: Vec, new_locks: IndexMap>, + parent_block: BlockId, } impl<'a, 'tx, TStore: StateStore + 'a> PendingSubstateStore<'a, 'tx, TStore> { - pub fn new(store: ChainScopedTreeStore<&'a TStore::ReadTransaction<'tx>>) -> Self { + pub fn new(parent_block: BlockId, store: ChainScopedTreeStore<&'a TStore::ReadTransaction<'tx>>) -> Self { Self { store, pending: HashMap::new(), diff: Vec::new(), new_locks: IndexMap::new(), + parent_block, } } @@ -59,15 +63,23 @@ impl<'a, 'tx, TStore: StateStore + 'a> PendingSubstateStore<'a, 'tx, TStore> { impl<'a, 'tx, TStore: StateStore + 'a + 'tx> ReadableSubstateStore for PendingSubstateStore<'a, 'tx, TStore> { type Error = SubstateStoreError; - fn get(&self, key: &SubstateAddress) -> Result { - if let Some(change) = self.get_pending(key) { + fn get(&self, id: &VersionedSubstateId) -> Result { + if let Some(change) = self.get_pending(id) { return change.up().cloned().ok_or_else(|| SubstateStoreError::SubstateIsDown { id: change.versioned_substate_id().clone(), }); } - let Some(substate) = SubstateRecord::get(self.read_transaction(), key).optional()? else { - return Err(SubstateStoreError::SubstateNotFound { address: *key }); + if let Some(change) = + BlockDiff::get_for_substate(self.read_transaction(), &self.parent_block, &id.substate_id).optional()? + { + return change + .into_up() + .ok_or_else(|| SubstateStoreError::SubstateIsDown { id: id.clone() }); + } + + let Some(substate) = SubstateRecord::get(self.read_transaction(), &id.to_substate_address()).optional()? else { + return Err(SubstateStoreError::SubstateNotFound { address: id.to_substate_address() }); }; Ok(substate.into_substate()) } @@ -96,9 +108,6 @@ impl<'a, 'tx, TStore: StateStore + 'a + 'tx> WriteableSubstateStore for PendingS impl<'a, 'tx, TStore: StateStore + 'a + 'tx> PendingSubstateStore<'a, 'tx, TStore> { pub fn get_latest(&self, id: &SubstateId) -> Result { - // TODO: This returns the pledged inputs (local or foreign) - - // TODO(perf): O(n) lookup. Can be improved by maintaining a map of latest substates if let Some(substate) = self .diff .iter() @@ -109,6 +118,13 @@ impl<'a, 'tx, TStore: StateStore + 'a + 'tx> PendingSubstateStore<'a, 'tx, TStor return Ok(substate.clone()); } + if let Some(change) = BlockDiff::get_for_substate(self.read_transaction(), &self.parent_block, id).optional()? { + let id = change.versioned_substate_id().clone(); + return change + .into_up() + .ok_or_else(|| SubstateStoreError::SubstateIsDown { id }); + } + let substate = SubstateRecord::get_latest(self.read_transaction(), id)?; Ok(substate.into_substate()) } @@ -220,7 +236,7 @@ impl<'a, 'tx, TStore: StateStore + 'a + 'tx> PendingSubstateStore<'a, 'tx, TStor transaction_id, requested_lock.versioned_substate_id().version(), requested_lock_flag, - true, + is_local_only, ), ); }, @@ -265,7 +281,7 @@ impl<'a, 'tx, TStore: StateStore + 'a + 'tx> PendingSubstateStore<'a, 'tx, TStor transaction_id, requested_lock.versioned_substate_id().version(), SubstateLockFlag::Output, - true, + is_local_only, ), ); }, @@ -308,7 +324,7 @@ impl<'a, 'tx, TStore: StateStore + 'a + 'tx> PendingSubstateStore<'a, 'tx, TStor requested_lock.versioned_substate_id().version(), // WRITE or READ requested_lock_flag, - true, + is_local_only, ), ); }, @@ -317,9 +333,9 @@ impl<'a, 'tx, TStore: StateStore + 'a + 'tx> PendingSubstateStore<'a, 'tx, TStor Ok(()) } - fn get_pending(&self, key: &SubstateAddress) -> Option<&SubstateChange> { + fn get_pending(&self, key: &VersionedSubstateId) -> Option<&SubstateChange> { self.pending - .get(key) + .get(&key.to_substate_address()) .map(|&pos| self.diff.get(pos).expect("Index map and diff are out of sync")) } @@ -345,33 +361,40 @@ impl<'a, 'tx, TStore: StateStore + 'a + 'tx> PendingSubstateStore<'a, 'tx, TStor } fn assert_is_up(&self, id: &VersionedSubstateId) -> Result<(), SubstateStoreError> { - let address = id.to_substate_address(); - if let Some(change) = self.get_pending(&address) { + if let Some(change) = self.get_pending(id) { if change.is_down() { return Err(SubstateStoreError::SubstateIsDown { id: id.clone() }); } return Ok(()); } - let is_up = SubstateRecord::substate_is_up(self.read_transaction(), &address) - .optional()? - .unwrap_or(false); - if !is_up { + if let Some(change) = + BlockDiff::get_for_substate(self.read_transaction(), &self.parent_block, &id.substate_id).optional()? + { + if change.is_up() { + return Ok(()); + } return Err(SubstateStoreError::SubstateIsDown { id: id.clone() }); } - Ok(()) + match SubstateRecord::substate_is_up(self.read_transaction(), &id.to_substate_address()).optional()? { + Some(true) => Ok(()), + Some(false) => Err(SubstateStoreError::SubstateIsDown { id: id.clone() }), + None => Err(SubstateStoreError::SubstateNotFound { + address: id.to_substate_address(), + }), + } } fn assert_is_down(&self, id: &VersionedSubstateId) -> Result<(), SubstateStoreError> { - let address = id.to_substate_address(); - if let Some(change) = self.get_pending(&address) { + if let Some(change) = self.get_pending(id) { if change.is_up() { return Err(SubstateStoreError::ExpectedSubstateDown { id: id.clone() }); } return Ok(()); } + let address = id.to_substate_address(); let Some(is_up) = SubstateRecord::substate_is_up(self.read_transaction(), &address).optional()? else { debug!(target: LOG_TARGET, "Expected substate {} to be DOWN but it does not exist", address); return Err(SubstateStoreError::SubstateNotFound { address }); @@ -384,15 +407,14 @@ impl<'a, 'tx, TStore: StateStore + 'a + 'tx> PendingSubstateStore<'a, 'tx, TStor } fn assert_not_exist(&self, id: &VersionedSubstateId) -> Result<(), SubstateStoreError> { - let address = id.to_substate_address(); - if let Some(change) = self.get_pending(&address) { + if let Some(change) = self.get_pending(id) { if change.is_up() { return Err(SubstateStoreError::ExpectedSubstateNotExist { id: id.clone() }); } return Ok(()); } - if SubstateRecord::exists(self.read_transaction(), &address)? { + if SubstateRecord::exists(self.read_transaction(), id)? { return Err(SubstateStoreError::ExpectedSubstateNotExist { id: id.clone() }); } diff --git a/dan_layer/consensus/src/hotstuff/worker.rs b/dan_layer/consensus/src/hotstuff/worker.rs index 38405af44..16d9f64fb 100644 --- a/dan_layer/consensus/src/hotstuff/worker.rs +++ b/dan_layer/consensus/src/hotstuff/worker.rs @@ -7,16 +7,7 @@ use log::*; use tari_common::configuration::Network; use tari_dan_common_types::{shard::Shard, Epoch, NodeHeight}; use tari_dan_storage::{ - consensus_models::{ - Block, - BlockDiff, - ExecutedTransaction, - HighQc, - LeafBlock, - TransactionAtom, - TransactionPool, - TransactionRecord, - }, + consensus_models::{Block, BlockDiff, HighQc, LeafBlock, TransactionPool}, StateStore, }; use tari_epoch_manager::{EpochManagerEvent, EpochManagerReader}; @@ -26,7 +17,7 @@ use tokio::sync::{broadcast, mpsc}; use super::{ config::HotstuffConfig, - on_receive_requested_transactions::OnReceiveRequestedTransactions, + on_receive_new_transaction::OnReceiveNewTransaction, proposer::Proposer, ProposalValidationError, }; @@ -56,12 +47,13 @@ use crate::{ const LOG_TARGET: &str = "tari::dan::consensus::hotstuff::worker"; pub struct HotstuffWorker { - validator_addr: TConsensusSpec::Addr, + local_validator_addr: TConsensusSpec::Addr, network: Network, hooks: TConsensusSpec::Hooks, tx_events: broadcast::Sender, - rx_new_transactions: mpsc::Receiver<(TransactionId, usize)>, + rx_new_transactions: mpsc::Receiver<(Transaction, usize)>, + rx_missing_transactions: mpsc::UnboundedReceiver, on_inbound_message: OnInboundMessage, on_next_sync_view: OnNextSyncViewHandler, @@ -70,7 +62,7 @@ pub struct HotstuffWorker { on_receive_vote: OnReceiveVoteHandler, on_receive_new_view: OnReceiveNewViewHandler, on_receive_request_missing_txs: OnReceiveRequestMissingTransactions, - on_receive_requested_txs: OnReceiveRequestedTransactions, + on_receive_new_transaction: OnReceiveNewTransaction, on_message_validate: OnMessageValidate, on_propose: OnPropose, on_sync_request: OnSyncRequest, @@ -92,7 +84,7 @@ impl HotstuffWorker { network: Network, inbound_messaging: TConsensusSpec::InboundMessaging, outbound_messaging: TConsensusSpec::OutboundMessaging, - rx_new_transactions: mpsc::Receiver<(TransactionId, usize)>, + rx_new_transactions: mpsc::Receiver<(Transaction, usize)>, state_store: TConsensusSpec::StateStore, epoch_manager: TConsensusSpec::EpochManager, leader_strategy: TConsensusSpec::LeaderStrategy, @@ -100,11 +92,11 @@ impl HotstuffWorker { transaction_pool: TransactionPool, transaction_executor: TConsensusSpec::TransactionExecutor, tx_events: broadcast::Sender, - tx_mempool: mpsc::UnboundedSender, hooks: TConsensusSpec::Hooks, shutdown: ShutdownSignal, config: HotstuffConfig, ) -> Self { + let (tx_missing_transactions, rx_missing_transactions) = mpsc::unbounded_channel(); let pacemaker = PaceMaker::new(); let vote_receiver = VoteReceiver::new( network, @@ -114,13 +106,13 @@ impl HotstuffWorker { signing_service.clone(), pacemaker.clone_handle(), ); - let proposer = - Proposer::::new(state_store.clone(), epoch_manager.clone(), outbound_messaging.clone()); + let proposer = Proposer::::new(epoch_manager.clone(), outbound_messaging.clone()); Self { - validator_addr: validator_addr.clone(), + local_validator_addr: validator_addr.clone(), network, tx_events: tx_events.clone(), rx_new_transactions, + rx_missing_transactions, on_inbound_message: OnInboundMessage::new(inbound_messaging, hooks.clone()), on_message_validate: OnMessageValidate::new( @@ -132,7 +124,6 @@ impl HotstuffWorker { leader_strategy.clone(), signing_service.clone(), outbound_messaging.clone(), - transaction_pool.clone(), tx_events.clone(), ), @@ -176,7 +167,12 @@ impl HotstuffWorker { state_store.clone(), outbound_messaging.clone(), ), - on_receive_requested_txs: OnReceiveRequestedTransactions::new(tx_mempool), + on_receive_new_transaction: OnReceiveNewTransaction::new( + state_store.clone(), + transaction_pool.clone(), + transaction_executor.clone(), + tx_missing_transactions, + ), on_propose: OnPropose::new( network, state_store.clone(), @@ -272,14 +268,14 @@ impl HotstuffWorker { tokio::select! { Some(result) = self.on_inbound_message.next_message(current_epoch, current_height) => { - if let Err(err) = self.on_unvalidated_message(current_height, result).await { + if let Err(err) = self.on_unvalidated_message(current_epoch, current_height, result).await { self.hooks.on_error(&err); error!(target: LOG_TARGET, "🚨Error handling new message: {}", err); } }, Some((tx_id, pending)) = self.rx_new_transactions.recv() => { - if let Err(err) = self.on_new_transaction(tx_id, pending, current_height).await { + if let Err(err) = self.on_new_transaction(tx_id, pending, current_epoch, current_height).await { self.hooks.on_error(&err); error!(target: LOG_TARGET, "🚨Error handling new transaction: {}", err); } @@ -289,6 +285,17 @@ impl HotstuffWorker { self.on_epoch_manager_event(event).await?; }, + // TODO: This channel is used to work around some design-flaws in missing transactions handling. + // We cannot simply call check_if_block_can_be_unparked in dispatch_hotstuff_message as that creates a cycle. + // One suggestion is to refactor consensus to emit events (kinda like libp2p does) and handle those events. + // This should be easy to reason about and avoid a large depth of async calls and "callback channels". + Some(tx_id) = self.rx_missing_transactions.recv() => { + if let Err(err) = self.check_if_block_can_be_unparked(current_epoch, current_height, &tx_id).await { + self.hooks.on_error(&err); + error!(target: LOG_TARGET, "🚨Error handling missing transaction: {}", err); + } + }, + _ = on_beat.wait() => { if let Err(e) = self.on_beat(current_epoch).await { self.on_failure("on_beat", &e).await; @@ -330,96 +337,141 @@ impl HotstuffWorker { async fn on_unvalidated_message( &mut self, + current_epoch: Epoch, current_height: NodeHeight, result: Result<(TConsensusSpec::Addr, HotstuffMessage), HotStuffError>, ) -> Result<(), HotStuffError> { let (from, msg) = result?; - match self.on_message_validate.handle(current_height, from, msg).await? { + match self + .on_message_validate + .handle(current_height, from.clone(), msg) + .await? + { MessageValidationResult::Ready { from, message: msg } => { - if let Err(e) = self.dispatch_hotstuff_message(from, msg).await { + if let Err(e) = self.dispatch_hotstuff_message(current_epoch, from, msg).await { self.on_failure("on_unvalidated_message -> dispatch_hotstuff_message", &e) .await; return Err(e); } Ok(()) }, - MessageValidationResult::NotReady | MessageValidationResult::Discard => Ok(()), + MessageValidationResult::ParkedProposal { + epoch, + missing_txs, + block_id, + .. + } => { + let mut request_from_address = from; + if request_from_address == self.local_validator_addr { + // let vn = self + // .epoch_manager + // .get_validator_node_by_public_key(epoch, &proposed_by) + // .await?; + // + // let mut request_from_address = vn.address; + // + // // (Yet another) Edge case: If we're catching up, we could be the proposer but we no longer have + // the // transaction (we deleted our database) In this case, request from + // another random VN // (TODO: not 100% reliable) + // if request_from_address == self.local_validator_addr { + let mut local_committee = self.epoch_manager.get_local_committee(epoch).await?; + + local_committee.shuffle(); + match local_committee + .into_iter() + .find(|(addr, _)| *addr != self.local_validator_addr) + { + Some((addr, _)) => { + warn!(target: LOG_TARGET, "⚠️Requesting missing transactions from another validator {addr} + because we are (presumably) catching up (local_peer_id = {})", self.local_validator_addr); + request_from_address = addr; + }, + None => { + warn!( + target: LOG_TARGET, + "❌NEVERHAPPEN: We're the only validator in the committee but we need to request missing + transactions." ); + return Ok(()); + }, + } + } + + self.on_message_validate + .request_missing_transactions(request_from_address, block_id, epoch, missing_txs) + .await?; + Ok(()) + }, + MessageValidationResult::Discard => Ok(()), MessageValidationResult::Invalid { err, .. } => Err(err), } } async fn on_new_transaction( &mut self, - tx_id: TransactionId, + transaction: Transaction, num_pending_txs: usize, + current_epoch: Epoch, current_height: NodeHeight, ) -> Result<(), HotStuffError> { - let exists = self.state_store.with_write_tx(|tx| { - if self.transaction_pool.exists(&**tx, &tx_id)? { - return Ok(Some(true)); - } - let transaction = TransactionRecord::get(&**tx, &tx_id)?; - if transaction.is_finalized() { - warn!( - target: LOG_TARGET, "Transaction {} is already finalized. Consensus will ignore it.", transaction.id() - ); - return Ok(None); - } - // Did the mempool execute it? - if transaction.is_executed() { - // This should never fail - let executed = ExecutedTransaction::try_from(transaction)?; - self.transaction_pool.insert(tx, executed.to_atom())?; - } else { - debug!( - target: LOG_TARGET, - "🔥 New transaction {tx_id} is deferred (not executed yet)", - ); - // Deferred execution - self.transaction_pool - .insert(tx, TransactionAtom::deferred(*transaction.id()))?; - } - Ok::<_, HotStuffError>(Some(false)) - })?; + let maybe_transaction = self + .on_receive_new_transaction + .try_sequence_transaction(current_epoch, transaction)?; - let Some(exists) = exists else { + let Some(transaction) = maybe_transaction else { return Ok(()); }; debug!( target: LOG_TARGET, - "🔥 new transaction ready for consensus: {} ({} pending, already exists = {})", - tx_id, + "🔥 new transaction ready for consensus: {} ({} pending)", + transaction.id(), num_pending_txs, - exists ); - if !exists { - self.hooks.on_transaction_ready(&tx_id); - } + self.hooks.on_transaction_ready(transaction.id()); - if let Some((from, msg)) = self - .on_message_validate - .update_parked_blocks(current_height, &tx_id) + if self + .check_if_block_can_be_unparked(current_epoch, current_height, transaction.id()) .await? { - if let Err(e) = self.dispatch_hotstuff_message(from, msg).await { - self.on_failure("on_new_transaction -> dispatch_hotstuff_message", &e) - .await; - return Err(e); - } + // No need to call on_beat, a block was unparked so on_beat will be called as needed + return Ok(()); } // There are num_pending_txs transactions in the queue. If we have no pending transactions, we'll propose now if // able. - if !exists && num_pending_txs == 0 { + if num_pending_txs == 0 { self.pacemaker.beat(); } Ok(()) } + /// Returns true if a block was unparked, otherwise false + async fn check_if_block_can_be_unparked( + &mut self, + current_epoch: Epoch, + current_height: NodeHeight, + tx_id: &TransactionId, + ) -> Result { + match self + .on_message_validate + .update_parked_blocks(current_height, tx_id) + .await? + { + Some((from, msg)) => { + if let Err(e) = self.dispatch_hotstuff_message(current_epoch, from, msg).await { + self.on_failure("on_new_transaction -> dispatch_hotstuff_message", &e) + .await; + return Err(e); + } + Ok(true) + }, + None => Ok(false), + } + } + async fn on_epoch_manager_event(&mut self, event: EpochManagerEvent) -> Result<(), HotStuffError> { match event { EpochManagerEvent::EpochChanged(epoch) => { @@ -451,7 +503,7 @@ impl HotstuffWorker { async fn request_initial_catch_up_sync(&mut self, current_epoch: Epoch) -> Result<(), HotStuffError> { let committee = self.epoch_manager.get_local_committee(current_epoch).await?; for member in committee.shuffled() { - if *member != self.validator_addr { + if *member != self.local_validator_addr { self.on_catch_up_sync.request_sync(current_epoch, member).await?; break; } @@ -521,14 +573,16 @@ impl HotstuffWorker { let local_committee = self.epoch_manager.get_local_committee(epoch).await?; - let is_leader = - self.leader_strategy - .is_leader_for_next_block(&self.validator_addr, &local_committee, leaf_block.height); + let is_leader = self.leader_strategy.is_leader_for_next_block( + &self.local_validator_addr, + &local_committee, + leaf_block.height, + ); info!( target: LOG_TARGET, "🔥 [on_beat{}] {} Is leader: {:?}, leaf_block: {}, local_committee: {}", if is_newview_propose { " (NEWVIEW)"} else { "" }, - self.validator_addr, + self.local_validator_addr, is_leader, leaf_block, local_committee @@ -559,6 +613,7 @@ impl HotstuffWorker { async fn dispatch_hotstuff_message( &mut self, + current_epoch: Epoch, from: TConsensusSpec::Addr, msg: HotstuffMessage, ) -> Result<(), HotStuffError> { @@ -570,11 +625,9 @@ impl HotstuffWorker { self.on_receive_new_view.handle(from, message).await, ), HotstuffMessage::Proposal(msg) => { - let current_view = self.pacemaker.current_view().clone(); - let current_epoch = current_view.get_epoch(); match log_err( "on_receive_local_proposal", - self.on_receive_local_proposal.handle(current_view, msg).await, + self.on_receive_local_proposal.handle(current_epoch, msg).await, ) { Ok(_) => Ok(()), Err( @@ -597,13 +650,15 @@ impl HotstuffWorker { self.on_receive_foreign_proposal.handle(from, msg).await, ), HotstuffMessage::Vote(msg) => log_err("on_receive_vote", self.on_receive_vote.handle(from, msg).await), - HotstuffMessage::RequestMissingTransactions(msg) => log_err( + HotstuffMessage::MissingTransactionsRequest(msg) => log_err( "on_receive_request_missing_transactions", self.on_receive_request_missing_txs.handle(from, msg).await, ), - HotstuffMessage::RequestedTransaction(msg) => log_err( - "on_receive_requested_txs", - self.on_receive_requested_txs.handle(from, msg).await, + HotstuffMessage::MissingTransactionsResponse(msg) => log_err( + "on_receive_new_transaction", + self.on_receive_new_transaction + .process_requested(current_epoch, from, msg) + .await, ), HotstuffMessage::CatchUpSyncRequest(msg) => { self.on_sync_request.handle(from, msg); @@ -660,7 +715,7 @@ impl HotstuffWorker { impl Debug for HotstuffWorker { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("HotstuffWorker") - .field("validator_addr", &self.validator_addr) + .field("validator_addr", &self.local_validator_addr) .field("epoch_manager", &"EpochManager") .field("pacemaker_handle", &self.pacemaker) .field("pacemaker", &"Pacemaker") diff --git a/dan_layer/consensus/src/messages/message.rs b/dan_layer/consensus/src/messages/message.rs index 9b8c60b78..b77a1ec6e 100644 --- a/dan_layer/consensus/src/messages/message.rs +++ b/dan_layer/consensus/src/messages/message.rs @@ -6,8 +6,8 @@ use std::fmt::Display; use serde::Serialize; use tari_dan_common_types::Epoch; -use super::{NewViewMessage, ProposalMessage, RequestedTransactionMessage, VoteMessage}; -use crate::messages::{RequestMissingTransactionsMessage, SyncRequestMessage, SyncResponseMessage}; +use super::{MissingTransactionsResponse, NewViewMessage, ProposalMessage, VoteMessage}; +use crate::messages::{MissingTransactionsRequest, SyncRequestMessage, SyncResponseMessage}; // Serialize is implemented for the message logger #[derive(Debug, Clone, Serialize)] @@ -16,8 +16,8 @@ pub enum HotstuffMessage { Proposal(ProposalMessage), ForeignProposal(ProposalMessage), Vote(VoteMessage), - RequestMissingTransactions(RequestMissingTransactionsMessage), - RequestedTransaction(RequestedTransactionMessage), + MissingTransactionsRequest(MissingTransactionsRequest), + MissingTransactionsResponse(MissingTransactionsResponse), CatchUpSyncRequest(SyncRequestMessage), // TODO: remove unused SyncResponse(SyncResponseMessage), @@ -30,8 +30,8 @@ impl HotstuffMessage { HotstuffMessage::Proposal(_) => "Proposal", HotstuffMessage::ForeignProposal(_) => "ForeignProposal", HotstuffMessage::Vote(_) => "Vote", - HotstuffMessage::RequestMissingTransactions(_) => "RequestMissingTransactions", - HotstuffMessage::RequestedTransaction(_) => "RequestedTransaction", + HotstuffMessage::MissingTransactionsRequest(_) => "MissingTransactionsRequest", + HotstuffMessage::MissingTransactionsResponse(_) => "MissingTransactionsResponse", HotstuffMessage::CatchUpSyncRequest(_) => "CatchUpSyncRequest", HotstuffMessage::SyncResponse(_) => "SyncResponse", } @@ -43,8 +43,8 @@ impl HotstuffMessage { Self::Proposal(msg) => msg.block.epoch(), Self::ForeignProposal(msg) => msg.block.epoch(), Self::Vote(msg) => msg.epoch, - Self::RequestMissingTransactions(msg) => msg.epoch, - Self::RequestedTransaction(msg) => msg.epoch, + Self::MissingTransactionsRequest(msg) => msg.epoch, + Self::MissingTransactionsResponse(msg) => msg.epoch, Self::CatchUpSyncRequest(msg) => msg.epoch, Self::SyncResponse(msg) => msg.epoch, } @@ -67,7 +67,7 @@ impl Display for HotstuffMessage { }, HotstuffMessage::ForeignProposal(msg) => write!(f, "ForeignProposal({})", msg), HotstuffMessage::Vote(msg) => write!(f, "Vote({}, {}, {})", msg.block_height, msg.block_id, msg.decision), - HotstuffMessage::RequestMissingTransactions(msg) => { + HotstuffMessage::MissingTransactionsRequest(msg) => { write!( f, "RequestMissingTransactions({} transaction(s), block: {}, epoch: {})", @@ -76,7 +76,7 @@ impl Display for HotstuffMessage { msg.epoch ) }, - HotstuffMessage::RequestedTransaction(msg) => write!( + HotstuffMessage::MissingTransactionsResponse(msg) => write!( f, "RequestedTransaction({} transaction(s), block: {}, epoch: {})", msg.transactions.len(), diff --git a/dan_layer/consensus/src/messages/request_missing_transaction.rs b/dan_layer/consensus/src/messages/request_missing_transaction.rs index b5658c7bd..8bda6cd9f 100644 --- a/dan_layer/consensus/src/messages/request_missing_transaction.rs +++ b/dan_layer/consensus/src/messages/request_missing_transaction.rs @@ -9,7 +9,8 @@ use tari_dan_storage::consensus_models::BlockId; use tari_transaction::TransactionId; #[derive(Debug, Clone, Serialize)] -pub struct RequestMissingTransactionsMessage { +pub struct MissingTransactionsRequest { + pub request_id: u32, pub epoch: Epoch, pub block_id: BlockId, pub transactions: HashSet, diff --git a/dan_layer/consensus/src/messages/requested_transaction.rs b/dan_layer/consensus/src/messages/requested_transaction.rs index 20acace33..6537d7da3 100644 --- a/dan_layer/consensus/src/messages/requested_transaction.rs +++ b/dan_layer/consensus/src/messages/requested_transaction.rs @@ -7,7 +7,8 @@ use tari_dan_storage::consensus_models::BlockId; use tari_transaction::Transaction; #[derive(Debug, Clone, Serialize)] -pub struct RequestedTransactionMessage { +pub struct MissingTransactionsResponse { + pub request_id: u32, pub epoch: Epoch, pub block_id: BlockId, pub transactions: Vec, diff --git a/dan_layer/consensus/src/traits/substate_store.rs b/dan_layer/consensus/src/traits/substate_store.rs index cd7813503..32209017c 100644 --- a/dan_layer/consensus/src/traits/substate_store.rs +++ b/dan_layer/consensus/src/traits/substate_store.rs @@ -1,7 +1,6 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use tari_dan_common_types::SubstateAddress; use tari_dan_storage::{ consensus_models::{SubstateChange, SubstateRecord}, StateStoreReadTransaction, @@ -13,7 +12,7 @@ use tari_transaction::{TransactionId, VersionedSubstateId}; pub trait ReadableSubstateStore { type Error; - fn get(&self, key: &SubstateAddress) -> Result; + fn get(&self, id: &VersionedSubstateId) -> Result; } pub trait WriteableSubstateStore: ReadableSubstateStore { @@ -46,8 +45,8 @@ impl SubstateStore for T {} impl ReadableSubstateStore for &T { type Error = StorageError; - fn get(&self, key: &SubstateAddress) -> Result { - let substate = SubstateRecord::get(*self, key)?; + fn get(&self, id: &VersionedSubstateId) -> Result { + let substate = SubstateRecord::get(*self, &id.to_substate_address())?; Ok(substate.into_substate()) } } diff --git a/dan_layer/consensus/src/traits/transaction_executor.rs b/dan_layer/consensus/src/traits/transaction_executor.rs index 05e1c1a01..3a76d9779 100644 --- a/dan_layer/consensus/src/traits/transaction_executor.rs +++ b/dan_layer/consensus/src/traits/transaction_executor.rs @@ -1,7 +1,12 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use tari_dan_storage::{consensus_models::ExecutedTransaction, StateStore, StorageError}; +use tari_dan_common_types::Epoch; +use tari_dan_storage::{ + consensus_models::{ExecutedTransaction, TransactionRecord}, + StateStore, + StorageError, +}; use tari_engine_types::substate::SubstateId; use tari_transaction::Transaction; @@ -22,12 +27,32 @@ pub enum BlockTransactionExecutorError { StateStoreError(String), #[error("Substate store error: {0}")] SubstateStoreError(#[from] SubstateStoreError), + #[error("Transaction validation error: {0}")] + TransactionValidationError(String), } pub trait BlockTransactionExecutor { + fn validate( + &self, + tx: &TStateStore::ReadTransaction<'_>, + current_epoch: Epoch, + transaction: &Transaction, + ) -> Result<(), BlockTransactionExecutorError>; + + fn prepare( + &self, + transaction: Transaction, + store: &TStateStore, + ) -> Result; fn execute( &self, transaction: Transaction, store: &PendingSubstateStore, + current_epoch: Epoch, ) -> Result; + // fn accept( + // &self, + // transaction: ExecutedTransaction, + // store: &TStateStore, + // ) -> Result<(), BlockTransactionExecutorError>; } diff --git a/dan_layer/consensus_tests/src/consensus.rs b/dan_layer/consensus_tests/src/consensus.rs index 55649cd42..957f4e86b 100644 --- a/dan_layer/consensus_tests/src/consensus.rs +++ b/dan_layer/consensus_tests/src/consensus.rs @@ -10,6 +10,7 @@ use std::time::Duration; +use tari_common_types::types::PrivateKey; use tari_consensus::hotstuff::HotStuffError; use tari_dan_common_types::{optional::Optional, shard::Shard, Epoch, NodeHeight}; use tari_dan_storage::{ @@ -22,7 +23,6 @@ use tari_transaction::{SubstateRequirement, Transaction}; use crate::support::{ build_transaction, build_transaction_from, - build_transaction_with_inputs, change_decision, create_execution_result_for_transaction, logging::setup_logger, @@ -145,8 +145,21 @@ async fn node_requests_missing_transaction_from_local_leader() { // block. We could send to "1" but the test would have to wait for the block time to be hit and block 1 to be // proposed before node "1" can propose block 2 with all the transactions. for _ in 0..10 { - test.send_transaction_to(&TestAddress::new("2"), Decision::Commit, 1, 5) + let transaction = test + .send_transaction_to(&TestAddress::new("2"), Decision::Commit, 1, 5) .await; + // All VNs will decide the same thing + test.create_execution_at_destination( + TestNetworkDestination::All, + create_execution_result_for_transaction( + BlockId::zero(), + *transaction.id(), + transaction.current_decision(), + 0, + transaction.resolved_inputs.clone().unwrap_or_default(), + transaction.resulting_outputs.clone(), + ), + ); } test.start_epoch(Epoch(1)).await; loop { @@ -279,6 +292,7 @@ async fn foreign_shard_decides_to_abort() { .await; let tx2 = change_decision(tx1.clone().try_into().unwrap(), Decision::Abort); assert_eq!(tx1.id(), tx2.id()); + assert!(tx2.current_decision().is_abort()); test.send_transaction_to_destination(TestNetworkDestination::Shard(1), tx2.clone()) .await; @@ -302,12 +316,11 @@ async fn foreign_shard_decides_to_abort() { } test.assert_all_validators_at_same_height().await; - test.assert_all_validators_have_decision(tx1.id(), Decision::Abort) + test.assert_all_validators_have_decision(tx2.id(), Decision::Abort) .await; - test.assert_all_validators_did_not_commit(); - log::info!("total messages sent: {}", test.network().total_messages_sent()); test.assert_clean_shutdown().await; + log::info!("total messages sent: {}", test.network().total_messages_sent()); } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] @@ -367,7 +380,72 @@ async fn output_conflict_abort() { } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn inputs_depend_on_outputs_multishard() { +async fn single_shard_inputs_from_previous_outputs() { + setup_logger(); + let mut test = Test::builder() + .debug_sql("/tmp/test{}.db") + .add_committee(0, vec!["1", "2"]) + .start() + .await; + + let tx1 = build_transaction(Decision::Commit, 1, 5, 2); + let resulting_outputs = tx1.resulting_outputs().to_vec(); + test.send_transaction_to_destination(TestNetworkDestination::All, tx1.clone()) + .await; + + let tx2 = Transaction::builder() + .with_inputs(resulting_outputs.clone().into_iter().map(Into::into)) + .sign(&Default::default()) + .build(); + let tx2 = build_transaction_from( + tx2.clone(), + Decision::Commit, + 1, + resulting_outputs + .clone() + .into_iter() + .map(VersionedSubstateIdLockIntent::write) + .collect(), + vec![], + ); + + test.send_transaction_to_destination(TestNetworkDestination::All, tx2.clone()) + .await; + + test.start_epoch(Epoch(1)).await; + + test.wait_for_n_to_be_finalized(2).await; + + let leaf1 = test.get_validator(&TestAddress::new("1")).get_leaf_block(); + let leaf2 = test.get_validator(&TestAddress::new("2")).get_leaf_block(); + if leaf1.height > NodeHeight(30) || leaf2.height > NodeHeight(30) { + panic!( + "Not all transaction committed after {}/{} blocks", + leaf1.height, leaf2.height, + ); + } + + test.assert_all_validators_at_same_height().await; + // We do not work out input dependencies when we sequence transactions in blocks. Currently ordering within a block + // is lexicographical by transaction id, therefore both will only be committed if tx1 happens to be sequenced + // first. + if tx1.id() < tx2.id() { + test.assert_all_validators_have_decision(tx1.id(), Decision::Commit) + .await; + test.assert_all_validators_have_decision(tx2.id(), Decision::Commit) + .await; + } else { + test.assert_all_validators_have_decision(tx1.id(), Decision::Commit) + .await; + test.assert_all_validators_have_decision(tx2.id(), Decision::Abort) + .await; + } + + test.assert_clean_shutdown().await; + log::info!("total messages sent: {}", test.network().total_messages_sent()); +} +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn multishard_inputs_from_previous_outputs() { setup_logger(); let mut test = Test::builder() .with_test_timeout(Duration::from_secs(60)) @@ -395,7 +473,6 @@ async fn inputs_depend_on_outputs_multishard() { .collect(), vec![], ); - assert_ne!(tx1.id(), tx2.id()); test.send_transaction_to_destination(TestNetworkDestination::All, tx2.clone()) .await; @@ -431,7 +508,7 @@ async fn inputs_depend_on_outputs_multishard() { } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn deferred_input_conflict() { +async fn single_shard_input_conflict() { setup_logger(); let mut test = Test::builder().add_committee(0, vec!["1", "2"]).start().await; @@ -449,31 +526,38 @@ async fn deferred_input_conflict() { .build(); let tx2 = TransactionRecord::new(tx2); - test.transaction_executions() - .insert(create_execution_result_for_transaction( + test.create_execution_at_destination( + TestNetworkDestination::All, + create_execution_result_for_transaction( BlockId::zero(), *tx1.id(), Decision::Commit, 0, vec![VersionedSubstateIdLockIntent::read(substate_id.clone())], vec![], - )) - .insert(create_execution_result_for_transaction( + ), + ) + .create_execution_at_destination( + TestNetworkDestination::All, + create_execution_result_for_transaction( BlockId::zero(), *tx2.id(), Decision::Commit, 0, vec![VersionedSubstateIdLockIntent::write(substate_id)], vec![], - )); + ), + ); // Transactions are sorted in the blocks, because we have a "first come first serve" policy for locking objects // the "first" will be Committed and the "last" Aborted let mut sorted_tx_ids = [tx1.id(), tx2.id()]; sorted_tx_ids.sort(); - test.send_transaction_to_destination(TestNetworkDestination::All, tx1.clone()) + test.network() + .send_transaction(TestNetworkDestination::All, tx1.clone()) .await; - test.send_transaction_to_destination(TestNetworkDestination::All, tx2.clone()) + test.network() + .send_transaction(TestNetworkDestination::All, tx2.clone()) .await; test.start_epoch(Epoch(1)).await; @@ -667,28 +751,34 @@ async fn foreign_block_distribution() { } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn deferred_execution() { +async fn single_shard_unversioned_inputs() { setup_logger(); - let mut test = Test::builder().add_committee(0, vec!["1"]).start().await; + let mut test = Test::builder().add_committee(0, vec!["1", "2"]).start().await; // First get transaction in the mempool let inputs = test.create_substates_on_all_vns(1); // Remove versions from inputs to allow deferred transactions let unversioned_inputs = inputs .iter() .map(|i| SubstateRequirement::new(i.substate_id.clone(), None)); - let tx = build_transaction_with_inputs(Decision::Deferred, 1, unversioned_inputs); + let tx = Transaction::builder() + .with_inputs(unversioned_inputs) + .sign(&PrivateKey::default()) + .build(); + let tx = TransactionRecord::new(tx); - test.transaction_executions() - .insert(create_execution_result_for_transaction( + test.send_transaction_to_destination(TestNetworkDestination::All, tx.clone()) + .await; + test.create_execution_at_destination( + TestNetworkDestination::All, + create_execution_result_for_transaction( BlockId::zero(), *tx.id(), Decision::Commit, 0, inputs.into_iter().map(VersionedSubstateIdLockIntent::write).collect(), vec![], - )); - test.send_transaction_to_destination(TestNetworkDestination::All, tx) - .await; + ), + ); test.start_epoch(Epoch(1)).await; diff --git a/dan_layer/consensus_tests/src/substate_store.rs b/dan_layer/consensus_tests/src/substate_store.rs index a5070ec17..84deccf98 100644 --- a/dan_layer/consensus_tests/src/substate_store.rs +++ b/dan_layer/consensus_tests/src/substate_store.rs @@ -87,7 +87,7 @@ fn it_allows_down_then_up() { }) .unwrap(); - let s = store.get(&id.to_next_version().to_substate_address()).unwrap(); + let s = store.get(&id.to_next_version()).unwrap(); assert_substate_eq(s, new_substate(1, 1)); let s = store.get_latest(id.substate_id()).unwrap(); assert_substate_eq(s, new_substate(1, 1)); @@ -224,7 +224,7 @@ fn create_pending_store<'a, 'tx, TAddr: NodeAddressable>( tx: &'a as StateStore>::ReadTransaction<'tx>, ) -> PendingSubstateStore<'a, 'tx, SqliteStateStore> { let tree_store = ChainScopedTreeStore::new(Epoch::zero(), Shard::zero(), tx); - PendingSubstateStore::new(tree_store) + PendingSubstateStore::new(BlockId::zero(), tree_store) } fn new_substate_id(seed: u8) -> SubstateId { diff --git a/dan_layer/consensus_tests/src/support/executions_store.rs b/dan_layer/consensus_tests/src/support/executions_store.rs index 843a27c33..652d64dd9 100644 --- a/dan_layer/consensus_tests/src/support/executions_store.rs +++ b/dan_layer/consensus_tests/src/support/executions_store.rs @@ -9,9 +9,11 @@ use std::{ use tari_dan_storage::consensus_models::TransactionExecution; use tari_transaction::TransactionId; +type TestExecutionStore = HashMap; + #[derive(Debug, Clone, Default)] pub struct TestTransactionExecutionsStore { - transactions: Arc>>, + transactions: Arc>, } impl TestTransactionExecutionsStore { diff --git a/dan_layer/consensus_tests/src/support/harness.rs b/dan_layer/consensus_tests/src/support/harness.rs index 38dcdac81..017aeba36 100644 --- a/dan_layer/consensus_tests/src/support/harness.rs +++ b/dan_layer/consensus_tests/src/support/harness.rs @@ -3,6 +3,7 @@ use std::{ collections::{hash_map, HashMap, HashSet}, + fmt::Display, time::Duration, }; @@ -10,7 +11,7 @@ use futures::{stream::FuturesUnordered, FutureExt, StreamExt}; use tari_consensus::hotstuff::HotstuffEvent; use tari_dan_common_types::{committee::Committee, shard::Shard, Epoch, NodeHeight}; use tari_dan_storage::{ - consensus_models::{BlockId, Decision, QcId, SubstateRecord, TransactionRecord}, + consensus_models::{BlockId, Decision, QcId, SubstateRecord, TransactionExecution, TransactionRecord}, StateStore, StateStoreReadTransaction, StorageError, @@ -25,11 +26,10 @@ use tari_template_lib::models::ComponentAddress; use tari_transaction::{TransactionId, VersionedSubstateId}; use tokio::{sync::broadcast, task, time::sleep}; -use super::{helpers, MessageFilter}; +use super::{create_execution_result_for_transaction, helpers, MessageFilter}; use crate::support::{ address::TestAddress, epoch_manager::TestEpochManager, - executions_store::TestTransactionExecutionsStore, network::{spawn_network, TestNetwork, TestNetworkDestination}, transaction::build_transaction, validator::Validator, @@ -40,7 +40,6 @@ use crate::support::{ pub struct Test { validators: HashMap, network: TestNetwork, - transaction_executions: TestTransactionExecutionsStore, _leader_strategy: RoundRobinLeaderStrategy, epoch_manager: TestEpochManager, shutdown: Shutdown, @@ -52,11 +51,18 @@ impl Test { TestBuilder::new() } - pub async fn send_transaction_to(&self, addr: &TestAddress, decision: Decision, fee: u64, num_shards: usize) { + pub async fn send_transaction_to( + &self, + addr: &TestAddress, + decision: Decision, + fee: u64, + num_shards: usize, + ) -> TransactionRecord { let num_committees = self.epoch_manager.get_num_committees(Epoch(0)).await.unwrap(); let transaction = build_transaction(decision, fee, num_shards, num_committees); - self.send_transaction_to_destination(TestNetworkDestination::Address(addr.clone()), transaction) + self.send_transaction_to_destination(TestNetworkDestination::Address(addr.clone()), transaction.clone()) .await; + transaction } pub async fn send_transaction_to_all(&self, decision: Decision, fee: u64, num_shards: usize) { @@ -67,17 +73,31 @@ impl Test { } pub async fn send_transaction_to_destination(&self, dest: TestNetworkDestination, transaction: TransactionRecord) { - let num_committees = self.epoch_manager.get_num_committees(Epoch(0)).await.unwrap(); - self.validators.values().for_each(|v| { - if dest.is_for(&v.address, v.substate_address.to_shard(num_committees)) { - v.state_store.with_write_tx(|tx| transaction.insert(tx)).unwrap(); - } - }); + self.create_execution_at_destination( + dest.clone(), + create_execution_result_for_transaction( + BlockId::zero(), + *transaction.id(), + transaction.current_decision(), + 0, + transaction.resolved_inputs.clone().unwrap_or_default(), + transaction.resulting_outputs.clone(), + ), + ); self.network.send_transaction(dest, transaction).await; } - pub fn transaction_executions(&self) -> &TestTransactionExecutionsStore { - &self.transaction_executions + pub fn create_execution_at_destination( + &self, + dest: TestNetworkDestination, + execution: TransactionExecution, + ) -> &Self { + for vn in self.validators.values() { + if dest.is_for(&vn.address, vn.shard) { + vn.transaction_executions.insert(execution.clone()); + } + } + self } pub fn create_substates_on_all_vns(&self, num: usize) -> Vec { @@ -230,12 +250,23 @@ impl Test { }) } + pub async fn wait_for_n_to_be_finalized(&self, n: usize) { + self.wait_all_for_predicate("waiting for n to be finalized", |vn| { + let transactions = vn + .state_store + .with_read_tx(|tx| tx.transactions_get_paginated(10000, 0, None)) + .unwrap(); + log::info!("{} has {} transactions in pool", vn.address, transactions.len()); + transactions.iter().filter(|tx| tx.is_finalized()).count() >= n + }) + .await + } + pub fn with_all_validators(&self, f: impl FnMut(&Validator)) { self.validators.values().for_each(f); } - #[allow(dead_code)] - async fn wait_all_for_predicate bool>(&self, description: String, mut predicate: P) { + async fn wait_all_for_predicate bool>(&self, description: T, mut predicate: P) { let mut complete = vec![]; let mut remaining_loops = 100usize; // ~10 seconds loop { @@ -330,6 +361,7 @@ impl Test { attempts += 1; // Send this task to the back of the queue and try again after other tasks have executed // to allow validators to catch up + // tokio::time::sleep(Duration::from_millis(50)).await; task::yield_now().await; continue 'outer; } @@ -351,16 +383,6 @@ impl Test { }); } - pub fn assert_all_validators_did_not_commit(&self) { - self.validators.values().for_each(|v| { - assert!( - !v.has_committed_substates(), - "Validator {} committed but we expected it not to", - v.address - ); - }); - } - pub async fn assert_clean_shutdown(&mut self) { self.shutdown.trigger(); for (_, v) in self.validators.drain() { @@ -434,7 +456,6 @@ impl TestBuilder { &self, leader_strategy: &RoundRobinLeaderStrategy, epoch_manager: &TestEpochManager, - transaction_executions: TestTransactionExecutionsStore, shutdown_signal: ShutdownSignal, ) -> (Vec, HashMap) { epoch_manager @@ -447,7 +468,6 @@ impl TestBuilder { let (channels, validator) = Validator::builder() .with_sql_url(sql_address) - .with_transaction_executions(transaction_executions.clone()) .with_address_and_secret_key(address.clone(), sk) .with_shard(shard) .with_bucket(bucket) @@ -478,21 +498,14 @@ impl TestBuilder { let epoch_manager = TestEpochManager::new(tx_epoch_events); epoch_manager.add_committees(self.committees.clone()).await; let shutdown = Shutdown::new(); - let transaction_executions = TestTransactionExecutionsStore::new(); let (channels, validators) = self - .build_validators( - &leader_strategy, - &epoch_manager, - transaction_executions.clone(), - shutdown.to_signal(), - ) + .build_validators(&leader_strategy, &epoch_manager, shutdown.to_signal()) .await; let network = spawn_network(channels, shutdown.to_signal(), self.message_filter); Test { validators, network, - transaction_executions, _leader_strategy: leader_strategy, epoch_manager, diff --git a/dan_layer/consensus_tests/src/support/network.rs b/dan_layer/consensus_tests/src/support/network.rs index d6f9ff7fe..68c5c623b 100644 --- a/dan_layer/consensus_tests/src/support/network.rs +++ b/dan_layer/consensus_tests/src/support/network.rs @@ -10,7 +10,7 @@ use futures::{stream::FuturesUnordered, FutureExt, StreamExt}; use itertools::Itertools; use tari_consensus::messages::HotstuffMessage; use tari_dan_common_types::shard::Shard; -use tari_dan_storage::{consensus_models::TransactionRecord, StateStore}; +use tari_dan_storage::consensus_models::TransactionRecord; use tari_shutdown::ShutdownSignal; use tari_state_store_sqlite::SqliteStateStore; use tari_transaction::{Transaction, TransactionId}; @@ -37,7 +37,7 @@ pub fn spawn_network( .map(|c| { ( c.address.clone(), - (c.bucket, c.tx_new_transactions.clone(), c.state_store.clone()), + (c.shard, c.tx_new_transactions.clone(), c.state_store.clone()), ) }) .collect(); @@ -45,15 +45,9 @@ pub fn spawn_network( .iter() .map(|c| (c.address.clone(), c.tx_hs_message.clone())) .collect(); - let (rx_broadcast, rx_leader, rx_mempool) = channels + let (rx_broadcast, rx_leader) = channels .into_iter() - .map(|c| { - ( - (c.address.clone(), c.rx_broadcast), - (c.address.clone(), c.rx_leader), - (c.address, c.rx_mempool), - ) - }) + .map(|c| ((c.address.clone(), c.rx_broadcast), (c.address.clone(), c.rx_leader))) .multiunzip(); let (tx_new_transaction, rx_new_transaction) = mpsc::channel(100); let (tx_network_status, network_status) = watch::channel(NetworkStatus::Paused); @@ -70,7 +64,6 @@ pub fn spawn_network( tx_hs_message, rx_broadcast: Some(rx_broadcast), rx_leader: Some(rx_leader), - rx_mempool: Some(rx_mempool), on_message: tx_on_message, num_sent_messages: num_sent_messages.clone(), num_filtered_messages: num_filtered_messages.clone(), @@ -180,20 +173,13 @@ impl TestNetworkDestination { pub struct TestNetworkWorker { rx_new_transaction: Option>, #[allow(clippy::type_complexity)] - tx_new_transactions: HashMap< - TestAddress, - ( - Shard, - mpsc::Sender<(TransactionId, usize)>, - SqliteStateStore, - ), - >, + tx_new_transactions: + HashMap, SqliteStateStore)>, tx_hs_message: HashMap>, #[allow(clippy::type_complexity)] rx_broadcast: Option, HotstuffMessage)>>>, #[allow(clippy::type_complexity)] rx_leader: Option>>, - rx_mempool: Option>>, network_status: watch::Receiver, on_message: watch::Sender>, num_sent_messages: Arc, @@ -213,7 +199,6 @@ impl TestNetworkWorker { async fn run(mut self) { let mut rx_broadcast = self.rx_broadcast.take().unwrap(); let mut rx_leader = self.rx_leader.take().unwrap(); - let mut rx_mempool = self.rx_mempool.take().unwrap(); let mut rx_new_transaction = self.rx_new_transaction.take().unwrap(); let tx_new_transactions = self.tx_new_transactions.clone(); @@ -242,13 +227,13 @@ impl TestNetworkWorker { for (addr, (shard, tx_new_transaction_to_consensus, _)) in &tx_new_transactions { if dest.is_for(addr, *shard) { tx_new_transaction_to_consensus - .send((*tx_record.id(), remaining)) + .send((tx_record.transaction().clone(), remaining)) .await .unwrap(); log::info!("🐞 New transaction {} for vn {}", tx_record.id(), addr); } else { - log::warn!( - "⚠️🐞 New transaction {} for vn {} was not sent to consensus (dest = {:?})", + log::debug!( + "ℹ️🐞 New transaction {} not destined for vn {} (dest = {:?})", tx_record.id(), addr, dest @@ -269,11 +254,6 @@ impl TestNetworkWorker { .map(|(from, rx)| rx.recv().map(|r| (from.clone(), r))) .collect::>(); - let mut rx_mempool = rx_mempool - .iter_mut() - .map(|(from, rx)| rx.recv().map(|r| (from.clone(), r))) - .collect::>(); - tokio::select! { biased; @@ -299,7 +279,6 @@ impl TestNetworkWorker { Some((from, Some((to, msg)))) = rx_broadcast.next() => self.handle_broadcast(from, to, msg).await, Some((from, Some((to, msg)))) = rx_leader.next() => self.handle_leader(from, to, msg).await, - Some((from, Some(msg))) = rx_mempool.next() => self.handle_mempool(from, msg).await, } } @@ -352,29 +331,8 @@ impl TestNetworkWorker { self.tx_hs_message.get(&to).unwrap().send((from, msg)).await.unwrap(); } - async fn is_offline_destination(&self, addr: &TestAddress, bucket: Shard) -> bool { + async fn is_offline_destination(&self, addr: &TestAddress, shard: Shard) -> bool { let lock = self.offline_destinations.read().await; - lock.iter().any(|d| d.is_for(addr, bucket)) - } - - /// Handles transactions that come in from missing transactions - async fn handle_mempool(&mut self, from: TestAddress, msg: Transaction) { - let (_, sender, state_store) = self - .tx_new_transactions - .get(&from) - .unwrap_or_else(|| panic!("No new transaction channel for {}", from)); - - // In the normal case, we need to provide the same execution results to consensus. In future, we could add code - // here to make a local decision to ABORT. - let existing_tx = self.transaction_store.read().await.get(msg.id()).unwrap().clone(); - state_store - .with_write_tx(|tx| { - // Add the transaction from the store to the node's db - existing_tx.upsert(tx)?; - Ok::<_, anyhow::Error>(()) - }) - .unwrap(); - - sender.send((*existing_tx.id(), 0)).await.unwrap(); + lock.iter().any(|d| d.is_for(addr, shard)) } } diff --git a/dan_layer/consensus_tests/src/support/transaction.rs b/dan_layer/consensus_tests/src/support/transaction.rs index 55121b7ae..d7b2e3d7f 100644 --- a/dan_layer/consensus_tests/src/support/transaction.rs +++ b/dan_layer/consensus_tests/src/support/transaction.rs @@ -19,7 +19,7 @@ use tari_engine_types::{ fees::FeeReceipt, substate::{Substate, SubstateDiff}, }; -use tari_transaction::{SubstateRequirement, Transaction, TransactionId, VersionedSubstateId}; +use tari_transaction::{Transaction, TransactionId, VersionedSubstateId}; use crate::support::helpers::random_substate_in_shard; @@ -31,23 +31,24 @@ pub fn build_transaction_from( resulting_outputs: Vec, ) -> TransactionRecord { let mut tx = TransactionRecord::new(tx); + if decision.is_abort() { + tx.set_current_decision_to_abort("Test aborted"); + } - if !decision.is_deferred() { - let execution = create_execution_result_for_transaction( - // We're just building the execution here for DRY purposes, so genesis block id isn't used - BlockId::zero(), - *tx.id(), - decision, - fee, - resolved_inputs, - resulting_outputs.clone(), - ); + let execution = create_execution_result_for_transaction( + // We're just building the execution here for DRY purposes, so genesis block id isn't used + BlockId::zero(), + *tx.id(), + decision, + fee, + resolved_inputs, + resulting_outputs.clone(), + ); - tx.result = Some(execution.result); - tx.resulting_outputs = execution.resulting_outputs; - tx.execution_time = Some(execution.execution_time); - tx.resolved_inputs = Some(execution.resolved_inputs); - } + tx.execution_result = Some(execution.result); + tx.resulting_outputs = execution.resulting_outputs; + tx.execution_time = Some(execution.execution_time); + tx.resolved_inputs = Some(execution.resolved_inputs); tx } @@ -119,16 +120,6 @@ pub fn build_transaction( build_transaction_from(tx, decision, fee, vec![], outputs) } -pub fn build_transaction_with_inputs>( - decision: Decision, - fee: u64, - inputs: I, -) -> TransactionRecord { - let k = PrivateKey::default(); - let tx = Transaction::builder().with_inputs(inputs).sign(&k).build(); - build_transaction_from(tx, decision, fee, vec![], vec![]) -} - pub fn change_decision(tx: ExecutedTransaction, new_decision: Decision) -> TransactionRecord { let total_fees_paid = tx .result() diff --git a/dan_layer/consensus_tests/src/support/transaction_executor.rs b/dan_layer/consensus_tests/src/support/transaction_executor.rs index 1eb12c6a8..526414a55 100644 --- a/dan_layer/consensus_tests/src/support/transaction_executor.rs +++ b/dan_layer/consensus_tests/src/support/transaction_executor.rs @@ -5,6 +5,7 @@ use tari_consensus::{ hotstuff::substate_store::PendingSubstateStore, traits::{BlockTransactionExecutor, BlockTransactionExecutorError}, }; +use tari_dan_common_types::{optional::Optional, Epoch}; use tari_dan_storage::{ consensus_models::{ExecutedTransaction, TransactionRecord}, StateStore, @@ -25,21 +26,45 @@ impl TestBlockTransactionProcessor { } impl BlockTransactionExecutor for TestBlockTransactionProcessor { + fn validate( + &self, + _tx: &TStateStore::ReadTransaction<'_>, + _current_epoch: Epoch, + _transaction: &Transaction, + ) -> Result<(), BlockTransactionExecutorError> { + Ok(()) + } + + fn prepare( + &self, + transaction: Transaction, + store: &TStateStore, + ) -> Result { + let t = store.with_read_tx(|tx| TransactionRecord::get(tx, transaction.id()))?; + Ok(t) + } + fn execute( &self, transaction: Transaction, store: &PendingSubstateStore, + _current_epoch: Epoch, ) -> Result { if let Some(execution) = self.store.get(transaction.id()) { let mut rec = TransactionRecord::new(transaction); rec.resolved_inputs = Some(execution.resolved_inputs().to_vec()); - rec.result = Some(execution.result().clone()); + rec.execution_result = Some(execution.result().clone()); rec.resulting_outputs.clone_from(execution.resulting_outputs()); rec.execution_time = Some(execution.execution_time()); return Ok(rec.try_into().unwrap()); } - let executed = ExecutedTransaction::get(store.read_transaction(), transaction.id())?; + let executed = ExecutedTransaction::get(store.read_transaction(), transaction.id()) + .optional()? + .expect( + "ExecutedTransaction was not found by the test executor. Perhaps you need to explicitly add an \ + execution", + ); Ok(executed) } } diff --git a/dan_layer/consensus_tests/src/support/validator/builder.rs b/dan_layer/consensus_tests/src/support/validator/builder.rs index 368973188..cf215b3bb 100644 --- a/dan_layer/consensus_tests/src/support/validator/builder.rs +++ b/dan_layer/consensus_tests/src/support/validator/builder.rs @@ -32,8 +32,8 @@ pub struct ValidatorBuilder { pub address: TestAddress, pub secret_key: PrivateKey, pub public_key: PublicKey, - pub shard: SubstateAddress, - pub bucket: Shard, + pub shard_address: SubstateAddress, + pub shard: Shard, pub sql_url: String, pub leader_strategy: RoundRobinLeaderStrategy, pub epoch_manager: Option, @@ -46,8 +46,8 @@ impl ValidatorBuilder { address: TestAddress::new("default"), secret_key: PrivateKey::default(), public_key: PublicKey::default(), - shard: SubstateAddress::zero(), - bucket: Shard::from(0), + shard_address: SubstateAddress::zero(), + shard: Shard::from(0), sql_url: ":memory".to_string(), leader_strategy: RoundRobinLeaderStrategy::new(), epoch_manager: None, @@ -62,18 +62,13 @@ impl ValidatorBuilder { self } - pub fn with_transaction_executions(&mut self, transaction_executions: TestTransactionExecutionsStore) -> &mut Self { - self.transaction_executions = transaction_executions; - self - } - pub fn with_bucket(&mut self, bucket: Shard) -> &mut Self { - self.bucket = bucket; + self.shard = bucket; self } pub fn with_shard(&mut self, shard: SubstateAddress) -> &mut Self { - self.shard = shard; + self.shard_address = shard; self } @@ -103,7 +98,6 @@ impl ValidatorBuilder { let (tx_new_transactions, rx_new_transactions) = mpsc::channel(100); let (tx_hs_message, rx_hs_message) = mpsc::channel(100); let (tx_leader, rx_leader) = mpsc::channel(100); - let (tx_mempool, rx_mempool) = mpsc::unbounded_channel(); let (outbound_messaging, rx_loopback) = TestOutboundMessaging::create(tx_leader, tx_broadcast); let inbound_messaging = TestInboundMessaging::new(self.address.clone(), rx_hs_message, rx_loopback); @@ -113,11 +107,11 @@ impl ValidatorBuilder { let transaction_pool = TransactionPool::new(); let (tx_events, _) = broadcast::channel(100); - let epoch_manager = - self.epoch_manager - .as_ref() - .unwrap() - .clone_for(self.address.clone(), self.public_key.clone(), self.shard); + let epoch_manager = self.epoch_manager.as_ref().unwrap().clone_for( + self.address.clone(), + self.public_key.clone(), + self.shard_address, + ); let transaction_executor = TestBlockTransactionProcessor::new(self.transaction_executions.clone()); @@ -134,7 +128,6 @@ impl ValidatorBuilder { transaction_pool, transaction_executor, tx_events.clone(), - tx_mempool, NoopHooks, shutdown_signal.clone(), HotstuffConfig { @@ -156,18 +149,19 @@ impl ValidatorBuilder { let channels = ValidatorChannels { address: self.address.clone(), - bucket: self.bucket, + shard: self.shard, state_store: store.clone(), tx_new_transactions, tx_hs_message, rx_broadcast, rx_leader, - rx_mempool, }; let validator = Validator { address: self.address.clone(), - substate_address: self.shard, + shard_address: self.shard_address, + shard: self.shard, + transaction_executions: self.transaction_executions.clone(), state_store: store, epoch_manager, leader_strategy: self.leader_strategy, diff --git a/dan_layer/consensus_tests/src/support/validator/instance.rs b/dan_layer/consensus_tests/src/support/validator/instance.rs index d1fa954e9..5a48e1ff2 100644 --- a/dan_layer/consensus_tests/src/support/validator/instance.rs +++ b/dan_layer/consensus_tests/src/support/validator/instance.rs @@ -8,7 +8,7 @@ use tari_consensus::{ use tari_dan_common_types::{shard::Shard, SubstateAddress}; use tari_dan_storage::{consensus_models::LeafBlock, StateStore, StateStoreReadTransaction}; use tari_state_store_sqlite::SqliteStateStore; -use tari_transaction::{Transaction, TransactionId}; +use tari_transaction::Transaction; use tokio::{ sync::{broadcast, mpsc, watch}, task::JoinHandle, @@ -17,27 +17,29 @@ use tokio::{ use crate::support::{ address::TestAddress, epoch_manager::TestEpochManager, + executions_store::TestTransactionExecutionsStore, RoundRobinLeaderStrategy, ValidatorBuilder, }; pub struct ValidatorChannels { pub address: TestAddress, - pub bucket: Shard, + pub shard: Shard, pub state_store: SqliteStateStore, - pub tx_new_transactions: mpsc::Sender<(TransactionId, usize)>, + pub tx_new_transactions: mpsc::Sender<(Transaction, usize)>, pub tx_hs_message: mpsc::Sender<(TestAddress, HotstuffMessage)>, pub rx_broadcast: mpsc::Receiver<(Vec, HotstuffMessage)>, pub rx_leader: mpsc::Receiver<(TestAddress, HotstuffMessage)>, - pub rx_mempool: mpsc::UnboundedReceiver, } pub struct Validator { pub address: TestAddress, - pub substate_address: SubstateAddress, + pub shard_address: SubstateAddress, + pub shard: Shard, pub state_store: SqliteStateStore, + pub transaction_executions: TestTransactionExecutionsStore, pub epoch_manager: TestEpochManager, pub leader_strategy: RoundRobinLeaderStrategy, pub events: broadcast::Receiver, diff --git a/dan_layer/engine/src/lib.rs b/dan_layer/engine/src/lib.rs index 7fbce7c33..1d8dcf743 100644 --- a/dan_layer/engine/src/lib.rs +++ b/dan_layer/engine/src/lib.rs @@ -1,10 +1,6 @@ // Copyright 2022 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -// FIXME: RuntimeError is at least 144 bytes -#![allow(clippy::result_large_err)] - -mod bootstrap; pub mod fees; pub mod flow; pub mod function_definitions; @@ -15,7 +11,6 @@ pub mod traits; pub mod transaction; pub mod wasm; -pub use bootstrap::bootstrap_state; pub use tari_template_abi as abi; pub mod base_layer_hashers { diff --git a/dan_layer/engine/src/runtime/error.rs b/dan_layer/engine/src/runtime/error.rs index bdd4d8a03..50ec598b7 100644 --- a/dan_layer/engine/src/runtime/error.rs +++ b/dan_layer/engine/src/runtime/error.rs @@ -80,7 +80,8 @@ pub enum RuntimeError { #[error("Substate {address} is not owned by {requested_owner}")] SubstateNotOwned { address: SubstateId, - requested_owner: SubstateId, + // To reduce the size of this variant, we box one of the fields + requested_owner: Box, }, #[error("Expected lock {lock_id} to lock {expected_type} but it locks {address}")] LockSubstateMismatch { @@ -149,7 +150,8 @@ pub enum RuntimeError { #[error("Access Denied: attempt to set state on component {attempted_on} from another component {attempted_by}")] AccessDeniedSetComponentState { attempted_on: SubstateId, - attempted_by: SubstateId, + // To reduce the size of this variant, we box one of the fields + attempted_by: Box, }, #[error("Resource Auth Hook Denied Access for action {action_ident}: {details}")] AccessDeniedAuthHook { action_ident: ActionIdent, details: String }, diff --git a/dan_layer/engine/src/runtime/impl.rs b/dan_layer/engine/src/runtime/impl.rs index 0b83a720d..b9152d8c4 100644 --- a/dan_layer/engine/src/runtime/impl.rs +++ b/dan_layer/engine/src/runtime/impl.rs @@ -645,12 +645,16 @@ impl> RuntimeInte if *component_lock.address() != component_address { return Err(RuntimeError::AccessDeniedSetComponentState { attempted_on: component_address.into(), - attempted_by: component_lock.address().clone(), + attempted_by: Box::new(component_lock.address().clone()), }); } state.modify_component_with(&component_lock, |component| { + if component_state == *component.state() { + return false; + } component.body.set(component_state); + true })?; Ok(InvokeResult::unit()) @@ -690,7 +694,11 @@ impl> RuntimeInte .require_ownership(ComponentAction::SetAccessRules, component.as_ownership())?; state.modify_component_with(&component_lock, |component| { + if access_rules == component.access_rules { + return false; + } component.set_access_rules(access_rules); + true })?; Ok::<_, RuntimeError>(()) @@ -2215,25 +2223,6 @@ impl> RuntimeInte Ok(()) } - fn create_free_test_coins( - &self, - revealed_amount: Amount, - output: Option, - ) -> Result { - let resource = ResourceContainer::confidential( - CONFIDENTIAL_TARI_RESOURCE_ADDRESS, - output.map(|o| (o.commitment.clone(), o)), - revealed_amount, - ); - - self.tracker.write_with(|state| { - let bucket_id = state.new_bucket_id(); - state.new_bucket(bucket_id, resource)?; - state.set_last_instruction_output(IndexedValue::from_type(&bucket_id)?); - Ok::<_, RuntimeError>(bucket_id) - }) - } - fn set_fee_checkpoint(&self) -> Result<(), RuntimeError> { if self.tracker.total_fee_payments() < self.tracker.total_fee_charges() { return Err(RuntimeError::InsufficientFeesPaid { diff --git a/dan_layer/engine/src/runtime/mod.rs b/dan_layer/engine/src/runtime/mod.rs index 62ff6c9e5..cdbd6da6f 100644 --- a/dan_layer/engine/src/runtime/mod.rs +++ b/dan_layer/engine/src/runtime/mod.rs @@ -59,7 +59,7 @@ use tari_dan_common_types::Epoch; use tari_engine_types::{ commit_result::FinalizeResult, component::ComponentHeader, - confidential::{ConfidentialClaim, ConfidentialOutput}, + confidential::ConfidentialClaim, indexed_value::IndexedValue, lock::LockFlag, substate::SubstateValue, @@ -87,7 +87,7 @@ use tari_template_lib::{ WorkspaceAction, }, invoke_args, - models::{Amount, BucketId, ComponentAddress, EntityId, Metadata, NonFungibleAddress, VaultRef}, + models::{ComponentAddress, EntityId, Metadata, NonFungibleAddress, VaultRef}, }; pub use tracker::StateTracker; @@ -159,11 +159,6 @@ pub trait RuntimeInterface: Send + Sync { fn claim_validator_fees(&self, epoch: Epoch, validator_public_key: PublicKey) -> Result<(), RuntimeError>; - fn create_free_test_coins( - &self, - revealed_amount: Amount, - confidential_output: Option, - ) -> Result; fn set_fee_checkpoint(&self) -> Result<(), RuntimeError>; fn reset_to_fee_checkpoint(&self) -> Result<(), RuntimeError>; fn finalize(&self) -> Result; diff --git a/dan_layer/engine/src/runtime/state_store.rs b/dan_layer/engine/src/runtime/state_store.rs index 68ffe03a8..6b5503ab2 100644 --- a/dan_layer/engine/src/runtime/state_store.rs +++ b/dan_layer/engine/src/runtime/state_store.rs @@ -67,6 +67,41 @@ impl WorkingStateStore { Ok((lock.address().clone(), substate)) } + pub fn mutate_locked_substate_with< + R, + F: FnOnce(&SubstateId, &mut SubstateValue) -> Result, RuntimeError>, + >( + &mut self, + lock_id: LockId, + callback: F, + ) -> Result, RuntimeError> { + let lock = self.locked_substates.get(lock_id, LockFlag::Write)?; + if let Some(mut substate) = self.loaded_substates.remove(lock.address()) { + return match callback(lock.address(), &mut substate)? { + Some(ret) => { + self.new_substates.insert(lock.address().clone(), substate); + Ok(Some(ret)) + }, + None => { + // It is undefined to mutate the state and return None from the callback. We do not assert this + // however which is risky. + self.loaded_substates.insert(lock.address().clone(), substate); + Ok(None) + }, + }; + } + + let substate_mut = self + .new_substates + .get_mut(lock.address()) + .ok_or_else(|| LockError::SubstateNotLocked { + address: lock.address().clone(), + })?; + + // Since the substate is already mutated, we dont really care if the callback mutates it again or not + callback(lock.address(), substate_mut) + } + pub fn get_locked_substate(&self, lock_id: LockId) -> Result<(SubstateId, &SubstateValue), RuntimeError> { let lock = self.locked_substates.get(lock_id, LockFlag::Read)?; let substate = self.get_ref(lock.address())?; diff --git a/dan_layer/engine/src/runtime/working_state.rs b/dan_layer/engine/src/runtime/working_state.rs index bc090d1d3..5c0c68f35 100644 --- a/dan_layer/engine/src/runtime/working_state.rs +++ b/dan_layer/engine/src/runtime/working_state.rs @@ -171,26 +171,39 @@ impl WorkingState { Ok(component) } - pub fn modify_component_with R>( + pub fn modify_component_with bool>( &mut self, locked: &LockedSubstate, f: F, - ) -> Result { - let (address, substate_mut) = self.store.get_locked_substate_mut(locked.lock_id())?; - let component_mut = substate_mut - .component_mut() - .ok_or_else(|| RuntimeError::LockSubstateMismatch { - lock_id: locked.lock_id(), - address, - expected_type: "Component", + ) -> Result<(), RuntimeError> { + let maybe_before_and_after = self + .store + .mutate_locked_substate_with(locked.lock_id(), |_, substate_mut| { + let component_mut = substate_mut + .component_mut() + .ok_or_else(|| RuntimeError::LockSubstateMismatch { + lock_id: locked.lock_id(), + address: locked.address().clone(), + expected_type: "Component", + })?; + + let before = IndexedWellKnownTypes::from_value(component_mut.state())?; + if !f(component_mut) { + // rollback + return Ok(None); + } + + let after = IndexedWellKnownTypes::from_value(component_mut.state())?; + Ok(Some((before, after))) })?; - let before = IndexedWellKnownTypes::from_value(component_mut.state())?; - let ret = f(component_mut); - let after = IndexedWellKnownTypes::from_value(component_mut.state())?; + let Some((before, after))= maybe_before_and_after else { + return Ok(()); + }; + self.validate_component_state(Some(&before), &after)?; - Ok(ret) + Ok(()) } pub fn get_resource(&self, locked: &LockedSubstate) -> Result<&Resource, RuntimeError> { @@ -1176,7 +1189,7 @@ impl WorkingState { ); return Err(RuntimeError::SubstateNotOwned { address: address.clone(), - requested_owner: component_lock.address().clone(), + requested_owner: Box::new(component_lock.address().clone()), }); } diff --git a/dan_layer/engine/src/bootstrap.rs b/dan_layer/engine/src/state_store/bootstrap.rs similarity index 62% rename from dan_layer/engine/src/bootstrap.rs rename to dan_layer/engine/src/state_store/bootstrap.rs index 293f8847d..6847a9a25 100644 --- a/dan_layer/engine/src/bootstrap.rs +++ b/dan_layer/engine/src/state_store/bootstrap.rs @@ -1,21 +1,32 @@ -// Copyright 2023 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause use tari_engine_types::{ resource::Resource, substate::{Substate, SubstateId}, }; use tari_template_lib::{ - auth::{AccessRule, ResourceAccessRules}, + auth::ResourceAccessRules, constants::{CONFIDENTIAL_TARI_RESOURCE_ADDRESS, PUBLIC_IDENTITY_RESOURCE_ADDRESS}, models::Metadata, prelude::{OwnerRule, ResourceType}, resource::TOKEN_SYMBOL, }; -use crate::state_store::{StateStoreError, StateWriter}; +use crate::state_store::{memory::MemoryStateStore, AtomicDb, StateStoreError, StateWriter}; -pub fn bootstrap_state(state_db: &mut T) -> Result<(), StateStoreError> { +pub fn new_memory_store() -> MemoryStateStore { + let state_db = MemoryStateStore::new(); + // unwrap: Memory state store is infallible + let mut tx = state_db.write_access().unwrap(); + // Add shared global resources + add_global_resources(&mut tx).unwrap(); + tx.commit().unwrap(); + state_db +} + +/// These are implicitly included in every transaction. These are immutable and pledging them is not required. +fn add_global_resources(state_db: &mut T) -> Result<(), StateStoreError> { let address = SubstateId::Resource(PUBLIC_IDENTITY_RESOURCE_ADDRESS); let mut metadata = Metadata::new(); metadata.insert(TOKEN_SYMBOL, "ID".to_string()); @@ -39,8 +50,7 @@ pub fn bootstrap_state(state_db: &mut T) -> Result<(), StateStor // Create the second layer tari resource let address = SubstateId::Resource(CONFIDENTIAL_TARI_RESOURCE_ADDRESS); let mut metadata = Metadata::new(); - // TODO: decide on symbol for L2 tari - metadata.insert(TOKEN_SYMBOL, "tXTR".to_string()); + metadata.insert(TOKEN_SYMBOL, "XTR".to_string()); state_db.set_state( &address, Substate::new( @@ -49,9 +59,7 @@ pub fn bootstrap_state(state_db: &mut T) -> Result<(), StateStor ResourceType::Confidential, None, OwnerRule::None, - ResourceAccessRules::new() - .withdrawable(AccessRule::AllowAll) - .depositable(AccessRule::AllowAll), + ResourceAccessRules::new(), metadata, None, None, diff --git a/dan_layer/engine/src/state_store/memory.rs b/dan_layer/engine/src/state_store/memory.rs index e49202c6f..dbc614155 100644 --- a/dan_layer/engine/src/state_store/memory.rs +++ b/dan_layer/engine/src/state_store/memory.rs @@ -38,7 +38,7 @@ pub struct MemoryStateStore { } impl MemoryStateStore { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self::default() } diff --git a/dan_layer/engine/src/state_store/mod.rs b/dan_layer/engine/src/state_store/mod.rs index b289386b0..381913724 100644 --- a/dan_layer/engine/src/state_store/mod.rs +++ b/dan_layer/engine/src/state_store/mod.rs @@ -20,6 +20,8 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +mod bootstrap; +pub use bootstrap::*; pub mod memory; use std::{error::Error, fmt::Debug}; diff --git a/dan_layer/engine/src/transaction/processor.rs b/dan_layer/engine/src/transaction/processor.rs index b956dbe9a..3b3a65cdd 100644 --- a/dan_layer/engine/src/transaction/processor.rs +++ b/dan_layer/engine/src/transaction/processor.rs @@ -31,13 +31,13 @@ use tari_engine_types::{ commit_result::{ExecuteResult, FinalizeResult, RejectReason, TransactionResult}, component::new_component_address_from_public_key, entity_id_provider::EntityIdProvider, - indexed_value::{IndexedValue, IndexedWellKnownTypes}, + indexed_value::IndexedWellKnownTypes, instruction::Instruction, instruction_result::InstructionResult, lock::LockFlag, virtual_substate::VirtualSubstates, }; -use tari_template_abi::{FunctionDef, Type}; +use tari_template_abi::FunctionDef; use tari_template_builtin::ACCOUNT_TEMPLATE_ADDRESS; use tari_template_lib::{ arg, @@ -263,18 +263,6 @@ impl + 'static> T .claim_validator_fees(Epoch(epoch), validator_public_key)?; Ok(InstructionResult::empty()) }, - Instruction::CreateFreeTestCoins { - revealed_amount: amount, - output, - } => { - let bucket_id = runtime.interface().create_free_test_coins(amount, output)?; - Ok(InstructionResult { - indexed: IndexedValue::from_type(&bucket_id)?, - return_type: Type::Other { - name: "BucketId".to_string(), - }, - }) - }, } } diff --git a/dan_layer/engine/tests/access_rules.rs b/dan_layer/engine/tests/access_rules.rs index 83c25fe6a..b36060633 100644 --- a/dan_layer/engine/tests/access_rules.rs +++ b/dan_layer/engine/tests/access_rules.rs @@ -1060,7 +1060,7 @@ mod resource_access_rules { assert_reject_reason(result, RuntimeError::AccessDeniedSetComponentState { attempted_on: user_account.into(), - attempted_by: component_address.into(), + attempted_by: Box::new(component_address.into()), }); } diff --git a/dan_layer/engine/tests/confidential.rs b/dan_layer/engine/tests/confidential.rs index 48b412ee9..aaef35449 100644 --- a/dan_layer/engine/tests/confidential.rs +++ b/dan_layer/engine/tests/confidential.rs @@ -126,10 +126,11 @@ fn transfer_confidential_amounts_between_accounts() { let diff = result.finalize.result.expect("Failed to execute manifest"); assert_eq!(diff.up_iter().filter(|(addr, _)| *addr == account1).count(), 1); assert_eq!(diff.down_iter().filter(|(addr, _)| *addr == account1).count(), 1); - assert_eq!(diff.up_iter().filter(|(addr, _)| *addr == faucet).count(), 1); - assert_eq!(diff.down_iter().filter(|(addr, _)| *addr == faucet).count(), 1); - assert_eq!(diff.up_iter().count(), 5); - assert_eq!(diff.down_iter().count(), 3); + // Faucet is not changed, only the faucet vault. + assert_eq!(diff.up_iter().filter(|(addr, _)| *addr == faucet).count(), 0); + assert_eq!(diff.down_iter().filter(|(addr, _)| *addr == faucet).count(), 0); + assert_eq!(diff.up_iter().count(), 4); + assert_eq!(diff.down_iter().count(), 2); let withdraw_proof = generate_withdraw_proof(&proof.output_mask, Amount(100), Some(Amount(900)), Amount(0)); let split_proof = generate_withdraw_proof(&withdraw_proof.output_mask, Amount(20), Some(Amount(80)), Amount(0)); @@ -165,12 +166,12 @@ fn transfer_confidential_amounts_between_accounts() { ) .unwrap(); let diff = result.finalize.result.expect("Failed to execute manifest"); - assert_eq!(diff.up_iter().filter(|(addr, _)| *addr == account1).count(), 1); - assert_eq!(diff.down_iter().filter(|(addr, _)| *addr == account1).count(), 1); + assert_eq!(diff.up_iter().filter(|(addr, _)| *addr == account1).count(), 0); + assert_eq!(diff.down_iter().filter(|(addr, _)| *addr == account1).count(), 0); assert_eq!(diff.up_iter().filter(|(addr, _)| *addr == account2).count(), 1); assert_eq!(diff.down_iter().filter(|(addr, _)| *addr == account2).count(), 1); - assert_eq!(diff.up_iter().count(), 5); - assert_eq!(diff.down_iter().count(), 3); + assert_eq!(diff.up_iter().count(), 4); + assert_eq!(diff.down_iter().count(), 2); } #[test] diff --git a/dan_layer/engine/tests/shenanigans.rs b/dan_layer/engine/tests/shenanigans.rs index 7d7a4f2bb..0893a59e0 100644 --- a/dan_layer/engine/tests/shenanigans.rs +++ b/dan_layer/engine/tests/shenanigans.rs @@ -201,7 +201,7 @@ fn it_prevents_access_to_vault_id_in_component_context() { // take_bucket_zero fails because the component didnt create the vault assert_reject_reason(reason, RuntimeError::SubstateNotOwned { address: vault_id.into(), - requested_owner: shenanigans.into(), + requested_owner: Box::new(shenanigans.into()), }); } @@ -234,7 +234,7 @@ fn it_prevents_access_to_out_of_scope_component() { // Fails because the engine does not lock this component assert_reject_reason(reason, RuntimeError::AccessDeniedSetComponentState { attempted_on: account.into(), - attempted_by: shenanigans.into(), + attempted_by: Box::new(shenanigans.into()), }); } diff --git a/dan_layer/engine/tests/test.rs b/dan_layer/engine/tests/test.rs index 44f8724cd..da933e015 100644 --- a/dan_layer/engine/tests/test.rs +++ b/dan_layer/engine/tests/test.rs @@ -559,8 +559,8 @@ mod basic_nft { assert_eq!(diff.up_iter().filter(|(addr, _)| addr.is_resource()).count(), 1); // NFT and account components changed - assert_eq!(diff.down_iter().filter(|(addr, _)| addr.is_component()).count(), 2); - assert_eq!(diff.up_iter().filter(|(addr, _)| addr.is_component()).count(), 2); + assert_eq!(diff.down_iter().filter(|(addr, _)| addr.is_component()).count(), 1); + assert_eq!(diff.up_iter().filter(|(addr, _)| addr.is_component()).count(), 1); // One new vault created assert_eq!(diff.down_iter().filter(|(addr, _)| addr.is_vault()).count(), 0); @@ -1302,9 +1302,9 @@ mod nft_indexes { assert_eq!(diff.down_iter().filter(|(addr, _)| addr.is_resource()).count(), 1); assert_eq!(diff.up_iter().filter(|(addr, _)| addr.is_resource()).count(), 1); - // NFT and account components changed - assert_eq!(diff.down_iter().filter(|(addr, _)| addr.is_component()).count(), 2); - assert_eq!(diff.up_iter().filter(|(addr, _)| addr.is_component()).count(), 2); + // NFT component changed + assert_eq!(diff.down_iter().filter(|(addr, _)| addr.is_component()).count(), 1); + assert_eq!(diff.up_iter().filter(|(addr, _)| addr.is_component()).count(), 1); // One new vault created assert_eq!(diff.down_iter().filter(|(addr, _)| addr.is_vault()).count(), 0); @@ -1354,43 +1354,6 @@ mod nft_indexes { } } -// TODO: these tests can be removed when create free test coins is removed -mod free_test_coins { - use tari_engine_types::component::new_component_address_from_public_key; - - use super::*; - #[test] - fn it_creates_free_test_coins() { - let mut test = TemplateTest::new(Vec::<&str>::new()); - test.enable_fees(); - let account_template = test.get_template_address("Account"); - let (other, _, _) = test.create_owner_proof(); - - let owner_token = test.get_test_proof(); - let future_account_component = - new_component_address_from_public_key(&ACCOUNT_TEMPLATE_ADDRESS, test.get_test_public_key()); - - test.execute_expect_success( - Transaction::builder() - .with_fee_instructions_builder(|builder| { - builder - .add_instruction(Instruction::CreateFreeTestCoins { - revealed_amount: Amount(1000), - output: None, - }) - .put_last_instruction_output_on_workspace("free") - .create_account_with_bucket(test.get_test_public_key().clone(), "free") - .call_method(future_account_component, "pay_fee", args![Amount(1000)]) - }) - // Checking we can create an account for another user in this transaction - .call_function(account_template, "create", args![other]) - .sign(test.get_test_secret_key()) - .build(), - vec![owner_token], - ); - } -} - #[test] fn test_builtin_templates() { let mut template_test = TemplateTest::new(vec!["tests/templates/builtin_templates"]); diff --git a/dan_layer/engine_types/Cargo.toml b/dan_layer/engine_types/Cargo.toml index 424a53c13..4fb239773 100644 --- a/dan_layer/engine_types/Cargo.toml +++ b/dan_layer/engine_types/Cargo.toml @@ -30,9 +30,7 @@ thiserror = { workspace = true } ts-rs = { workspace = true, optional = true } [features] -default = ["debugging"] -# Includes the ability to create free test coins -debugging = [] +default = [] ts = ["ts-rs"] # This feature is used to temporarily fix the issue with the ts-rs crate. Because when we run cargo test --all-feature # it will trigger the ts files generation. But there are some problems that are fixed during the npm run build. But diff --git a/dan_layer/engine_types/src/instruction.rs b/dan_layer/engine_types/src/instruction.rs index b1ed67733..529feb6d5 100644 --- a/dan_layer/engine_types/src/instruction.rs +++ b/dan_layer/engine_types/src/instruction.rs @@ -8,15 +8,12 @@ use tari_common_types::types::PublicKey; use tari_crypto::tari_utilities::hex::Hex; use tari_template_lib::{ args::{Arg, LogLevel}, - models::{Amount, ComponentAddress, TemplateAddress}, + models::{ComponentAddress, TemplateAddress}, }; #[cfg(feature = "ts")] use ts_rs::TS; -use crate::{ - confidential::{ConfidentialClaim, ConfidentialOutput}, - serde_with, -}; +use crate::{confidential::ConfidentialClaim, serde_with}; #[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] @@ -61,11 +58,6 @@ pub enum Instruction { validator_public_key: PublicKey, }, DropAllProofsInWorkspace, - #[cfg(feature = "debugging")] - CreateFreeTestCoins { - revealed_amount: Amount, - output: Option, - }, } impl Display for Instruction { @@ -126,9 +118,7 @@ impl Display for Instruction { epoch, validator_public_key ) }, - Self::CreateFreeTestCoins { .. } => { - write!(f, "CreateFreeTestCoins") - }, + Self::DropAllProofsInWorkspace => { write!(f, "DropAllProofsInWorkspace") }, diff --git a/dan_layer/p2p/proto/consensus.proto b/dan_layer/p2p/proto/consensus.proto index 4e3d4a777..6a25deaec 100644 --- a/dan_layer/p2p/proto/consensus.proto +++ b/dan_layer/p2p/proto/consensus.proto @@ -14,8 +14,8 @@ message HotStuffMessage { ProposalMessage proposal = 2; ProposalMessage foreign_proposal = 3; VoteMessage vote = 4; - RequestMissingTransactionsMessage request_missing_transactions = 5; - RequestedTransactionMessage requested_transaction = 6; + MissingTransactionsRequest request_missing_transactions = 5; + MissingTransactionsResponse requested_transaction = 6; SyncRequest sync_request = 7; SyncResponse sync_response = 8; } @@ -103,7 +103,6 @@ enum Decision { UNKNOWN = 0; COMMIT = 1; ABORT = 2; - DEFERRED = 3; } message Evidence { @@ -182,16 +181,18 @@ message DownState { uint64 fees_accrued = 2; } -message RequestMissingTransactionsMessage { - uint64 epoch = 1; - bytes block_id = 2; - repeated bytes transaction_ids = 3; +message MissingTransactionsRequest { + uint32 request_id = 1; + uint64 epoch = 2; + bytes block_id = 3; + repeated bytes transaction_ids = 4; } -message RequestedTransactionMessage { - uint64 epoch = 1; - bytes block_id = 2; - repeated tari.dan.transaction.Transaction transactions = 3; +message MissingTransactionsResponse { + uint32 request_id = 1; + uint64 epoch = 2; + bytes block_id = 3; + repeated tari.dan.transaction.Transaction transactions = 4; } message Substate { diff --git a/dan_layer/p2p/proto/transaction.proto b/dan_layer/p2p/proto/transaction.proto index 365a4f620..260725b5b 100644 --- a/dan_layer/p2p/proto/transaction.proto +++ b/dan_layer/p2p/proto/transaction.proto @@ -42,7 +42,6 @@ message Instruction { CLAIM_VALIDATOR_FEES = 5; DROP_ALL_PROOFS_IN_WORKSPACE = 6; CREATE_ACCOUNT = 7; - CREATE_FREE_TEST_COINS = 101; } InstructionType instruction_type = 1; @@ -75,9 +74,6 @@ message Instruction { bytes create_account_owner_public_key = 17; string create_account_workspace_bucket = 18; - // DEBUGGING: Test coins - uint64 create_free_test_coins_amount = 101; - bytes create_free_test_coins_output_blob = 102; } message Arg { diff --git a/dan_layer/p2p/src/conversions/consensus.rs b/dan_layer/p2p/src/conversions/consensus.rs index 8883fce40..25934facb 100644 --- a/dan_layer/p2p/src/conversions/consensus.rs +++ b/dan_layer/p2p/src/conversions/consensus.rs @@ -28,10 +28,10 @@ use tari_common_types::types::PublicKey; use tari_consensus::messages::{ FullBlock, HotstuffMessage, + MissingTransactionsRequest, + MissingTransactionsResponse, NewViewMessage, ProposalMessage, - RequestMissingTransactionsMessage, - RequestedTransactionMessage, SyncRequestMessage, SyncResponseMessage, VoteMessage, @@ -69,10 +69,10 @@ impl From<&HotstuffMessage> for proto::consensus::HotStuffMessage { proto::consensus::hot_stuff_message::Message::ForeignProposal(msg.into()) }, HotstuffMessage::Vote(msg) => proto::consensus::hot_stuff_message::Message::Vote(msg.into()), - HotstuffMessage::RequestMissingTransactions(msg) => { + HotstuffMessage::MissingTransactionsRequest(msg) => { proto::consensus::hot_stuff_message::Message::RequestMissingTransactions(msg.into()) }, - HotstuffMessage::RequestedTransaction(msg) => { + HotstuffMessage::MissingTransactionsResponse(msg) => { proto::consensus::hot_stuff_message::Message::RequestedTransaction(msg.into()) }, HotstuffMessage::CatchUpSyncRequest(msg) => { @@ -99,10 +99,10 @@ impl TryFrom for HotstuffMessage { }, proto::consensus::hot_stuff_message::Message::Vote(msg) => HotstuffMessage::Vote(msg.try_into()?), proto::consensus::hot_stuff_message::Message::RequestMissingTransactions(msg) => { - HotstuffMessage::RequestMissingTransactions(msg.try_into()?) + HotstuffMessage::MissingTransactionsRequest(msg.try_into()?) }, proto::consensus::hot_stuff_message::Message::RequestedTransaction(msg) => { - HotstuffMessage::RequestedTransaction(msg.try_into()?) + HotstuffMessage::MissingTransactionsResponse(msg.try_into()?) }, proto::consensus::hot_stuff_message::Message::SyncRequest(msg) => { HotstuffMessage::CatchUpSyncRequest(msg.try_into()?) @@ -195,10 +195,11 @@ impl TryFrom for VoteMessage { } } -//---------------------------------- RequestMissingTransactionsMessage --------------------------------------------// -impl From<&RequestMissingTransactionsMessage> for proto::consensus::RequestMissingTransactionsMessage { - fn from(msg: &RequestMissingTransactionsMessage) -> Self { +//---------------------------------- MissingTransactionsRequest --------------------------------------------// +impl From<&MissingTransactionsRequest> for proto::consensus::MissingTransactionsRequest { + fn from(msg: &MissingTransactionsRequest) -> Self { Self { + request_id: msg.request_id, epoch: msg.epoch.as_u64(), block_id: msg.block_id.as_bytes().to_vec(), transaction_ids: msg.transactions.iter().map(|tx_id| tx_id.as_bytes().to_vec()).collect(), @@ -206,11 +207,12 @@ impl From<&RequestMissingTransactionsMessage> for proto::consensus::RequestMissi } } -impl TryFrom for RequestMissingTransactionsMessage { +impl TryFrom for MissingTransactionsRequest { type Error = anyhow::Error; - fn try_from(value: proto::consensus::RequestMissingTransactionsMessage) -> Result { - Ok(RequestMissingTransactionsMessage { + fn try_from(value: proto::consensus::MissingTransactionsRequest) -> Result { + Ok(MissingTransactionsRequest { + request_id: value.request_id, epoch: Epoch(value.epoch), block_id: BlockId::try_from(value.block_id)?, transactions: value @@ -221,11 +223,12 @@ impl TryFrom for RequestMis }) } } -//---------------------------------- RequestedTransactionMessage --------------------------------------------// +//---------------------------------- MissingTransactionsResponse --------------------------------------------// -impl From<&RequestedTransactionMessage> for proto::consensus::RequestedTransactionMessage { - fn from(msg: &RequestedTransactionMessage) -> Self { +impl From<&MissingTransactionsResponse> for proto::consensus::MissingTransactionsResponse { + fn from(msg: &MissingTransactionsResponse) -> Self { Self { + request_id: msg.request_id, epoch: msg.epoch.as_u64(), block_id: msg.block_id.as_bytes().to_vec(), transactions: msg.transactions.iter().map(|tx| tx.into()).collect(), @@ -233,11 +236,12 @@ impl From<&RequestedTransactionMessage> for proto::consensus::RequestedTransacti } } -impl TryFrom for RequestedTransactionMessage { +impl TryFrom for MissingTransactionsResponse { type Error = anyhow::Error; - fn try_from(value: proto::consensus::RequestedTransactionMessage) -> Result { - Ok(RequestedTransactionMessage { + fn try_from(value: proto::consensus::MissingTransactionsResponse) -> Result { + Ok(MissingTransactionsResponse { + request_id: value.request_id, epoch: Epoch(value.epoch), block_id: BlockId::try_from(value.block_id)?, transactions: value @@ -493,7 +497,6 @@ impl From for proto::consensus::Decision { match value { Decision::Commit => proto::consensus::Decision::Commit, Decision::Abort => proto::consensus::Decision::Abort, - Decision::Deferred => proto::consensus::Decision::Deferred, } } } @@ -505,7 +508,6 @@ impl TryFrom for Decision { match value { proto::consensus::Decision::Commit => Ok(Decision::Commit), proto::consensus::Decision::Abort => Ok(Decision::Abort), - proto::consensus::Decision::Deferred => Ok(Decision::Deferred), proto::consensus::Decision::Unknown => Err(anyhow!("Decision not provided")), } } diff --git a/dan_layer/p2p/src/conversions/transaction.rs b/dan_layer/p2p/src/conversions/transaction.rs index 49c1a0058..aa11601e2 100644 --- a/dan_layer/p2p/src/conversions/transaction.rs +++ b/dan_layer/p2p/src/conversions/transaction.rs @@ -27,11 +27,7 @@ use tari_bor::decode_exact; use tari_common_types::types::{Commitment, PrivateKey, PublicKey}; use tari_crypto::{ristretto::RistrettoComSig, tari_utilities::ByteArray}; use tari_dan_common_types::Epoch; -use tari_engine_types::{ - confidential::{ConfidentialClaim, ConfidentialOutput}, - instruction::Instruction, - substate::SubstateId, -}; +use tari_engine_types::{confidential::ConfidentialClaim, instruction::Instruction, substate::SubstateId}; use tari_template_lib::{ args::Arg, crypto::{BalanceProofSignature, PedersonCommitmentBytes, RistrettoPublicKeyBytes}, @@ -250,10 +246,6 @@ impl TryFrom for Instruction { .map_err(|e| anyhow!("claim_validator_fees_validator_public_key: {}", e))?, }, InstructionType::DropAllProofsInWorkspace => Instruction::DropAllProofsInWorkspace, - InstructionType::CreateFreeTestCoins => Instruction::CreateFreeTestCoins { - revealed_amount: request.create_free_test_coins_amount.try_into()?, - output: tari_bor::decode(&request.create_free_test_coins_output_blob)?, - }, }; Ok(instruction) @@ -321,19 +313,6 @@ impl From for proto::transaction::Instruction { Instruction::DropAllProofsInWorkspace => { result.instruction_type = InstructionType::DropAllProofsInWorkspace as i32; }, - // TODO: debugging feature should not be the default. Perhaps a better way to create faucet coins is to mint - // a faucet vault in the genesis state for dev networks and use faucet builtin template to withdraw - // funds. - Instruction::CreateFreeTestCoins { - revealed_amount: amount, - output, - } => { - result.instruction_type = InstructionType::CreateFreeTestCoins as i32; - result.create_free_test_coins_amount = amount.value() as u64; - result.create_free_test_coins_output_blob = output - .map(|o| tari_bor::encode(&o).unwrap()) - .unwrap_or_else(|| tari_bor::encode(&None::).unwrap()); - }, } result } diff --git a/dan_layer/state_store_sqlite/migrations/2023-06-08-091819_create_state_store/up.sql b/dan_layer/state_store_sqlite/migrations/2023-06-08-091819_create_state_store/up.sql index 622ccdf03..d1f7e3e43 100644 --- a/dan_layer/state_store_sqlite/migrations/2023-06-08-091819_create_state_store/up.sql +++ b/dan_layer/state_store_sqlite/migrations/2023-06-08-091819_create_state_store/up.sql @@ -93,7 +93,7 @@ create table block_diffs FOREIGN KEY (transaction_id) REFERENCES transactions (transaction_id), FOREIGN KEY (block_id) REFERENCES blocks (block_id) ); -create index block_diffs_idx_block_id on block_diffs (block_id); +create index block_diffs_idx_block_id_substate_id on block_diffs (block_id, substate_id); create table substates ( @@ -251,9 +251,9 @@ create table transaction_pool original_decision text not null, local_decision text null, remote_decision text null, - evidence text not null, + evidence text null, remote_evidence text null, - transaction_fee bigint not null, + transaction_fee bigint null, leader_fee bigint null, global_exhaust_burn bigint null, stage text not null, @@ -392,8 +392,8 @@ CREATE TABLE transaction_pool_history original_decision text not null, local_decision text null, remote_decision text null, - evidence text not null, - transaction_fee bigint not null, + evidence text null, + transaction_fee bigint null, leader_fee bigint null, global_exhaust_burn bigint null, stage text not null, diff --git a/dan_layer/state_store_sqlite/src/reader.rs b/dan_layer/state_store_sqlite/src/reader.rs index cee2cf6cf..b4811894f 100644 --- a/dan_layer/state_store_sqlite/src/reader.rs +++ b/dan_layer/state_store_sqlite/src/reader.rs @@ -53,6 +53,7 @@ use tari_dan_storage::{ QuorumCertificate, StateTransition, StateTransitionId, + SubstateChange, SubstateRecord, TransactionExecution, TransactionPoolRecord, @@ -1204,6 +1205,28 @@ impl<'tx, TAddr: NodeAddressable + Serialize + DeserializeOwned + 'tx> StateStor sql_models::BlockDiff::try_load(*block_id, block_diff) } + fn block_diffs_get_last_change_for_substate( + &self, + block_id: &BlockId, + substate_id: &SubstateId, + ) -> Result { + use crate::schema::block_diffs; + let commit_block = self.get_commit_block_id()?; + let block_ids = self.get_block_ids_that_change_state_between(&commit_block, block_id)?; + + let diff = block_diffs::table + .filter(block_diffs::block_id.eq_any(block_ids)) + .filter(block_diffs::substate_id.eq(substate_id.to_string())) + .order_by(block_diffs::id.desc()) + .first::(self.connection()) + .map_err(|e| SqliteStorageError::DieselError { + operation: "block_diffs_get_last_change_for_substate", + source: e, + })?; + + sql_models::BlockDiff::try_convert_change(diff) + } + fn parked_blocks_exists(&self, block_id: &BlockId) -> Result { use crate::schema::parked_blocks; @@ -1285,21 +1308,6 @@ impl<'tx, TAddr: NodeAddressable + Serialize + DeserializeOwned + 'tx> StateStor deserialize_json(&qc_json) } - fn transaction_pool_get(&self, transaction_id: &TransactionId) -> Result { - use crate::schema::transaction_pool; - - let transaction_id = serialize_hex(transaction_id); - let rec = transaction_pool::table - .filter(transaction_pool::transaction_id.eq(&transaction_id)) - .first::(self.connection()) - .map_err(|e| SqliteStorageError::DieselError { - operation: "transaction_pool_get", - source: e, - })?; - - rec.try_convert(None) - } - fn transaction_pool_get_for_blocks( &self, from_block_id: &BlockId, diff --git a/dan_layer/state_store_sqlite/src/schema.rs b/dan_layer/state_store_sqlite/src/schema.rs index 9a0cd2534..4cbdbed54 100644 --- a/dan_layer/state_store_sqlite/src/schema.rs +++ b/dan_layer/state_store_sqlite/src/schema.rs @@ -284,9 +284,9 @@ diesel::table! { original_decision -> Text, local_decision -> Nullable, remote_decision -> Nullable, - evidence -> Text, + evidence -> Nullable, remote_evidence -> Nullable, - transaction_fee -> BigInt, + transaction_fee -> Nullable, leader_fee -> Nullable, global_exhaust_burn -> Nullable, stage -> Text, diff --git a/dan_layer/state_store_sqlite/src/sql_models/block_diff.rs b/dan_layer/state_store_sqlite/src/sql_models/block_diff.rs index 71c3c9260..fee11a69c 100644 --- a/dan_layer/state_store_sqlite/src/sql_models/block_diff.rs +++ b/dan_layer/state_store_sqlite/src/sql_models/block_diff.rs @@ -22,34 +22,34 @@ pub struct BlockDiff { impl BlockDiff { pub fn try_load(block_id: BlockId, diff: Vec) -> Result { - let mut changes = Vec::with_capacity(diff.len()); - for d in diff { - let substate_id = d.substate_id.parse().map_err(|err| StorageError::DataInconsistency { - details: format!("Invalid substate id {}: {}", d.substate_id, err), - })?; - let id = VersionedSubstateId::new(substate_id, d.version as u32); - let transaction_id = deserialize_hex_try_from(&d.transaction_id)?; - match d.change.as_str() { - "Up" => { - let state = d.state.ok_or(StorageError::DataInconsistency { - details: "Block diff change type is Up but state is missing".to_string(), - })?; - changes.push(consensus_models::SubstateChange::Up { - id, - transaction_id, - substate: deserialize_json(&state)?, - }); - }, - "Down" => { - changes.push(consensus_models::SubstateChange::Down { id, transaction_id }); - }, - _ => { - return Err(StorageError::DataInconsistency { - details: format!("Invalid block diff change type: {}", d.change), - }); - }, - } - } + let changes = diff + .into_iter() + .map(Self::try_convert_change) + .collect::, _>>()?; Ok(consensus_models::BlockDiff { block_id, changes }) } + + pub fn try_convert_change(d: Self) -> Result { + let substate_id = d.substate_id.parse().map_err(|err| StorageError::DataInconsistency { + details: format!("Invalid substate id {}: {}", d.substate_id, err), + })?; + let id = VersionedSubstateId::new(substate_id, d.version as u32); + let transaction_id = deserialize_hex_try_from(&d.transaction_id)?; + match d.change.as_str() { + "Up" => { + let state = d.state.ok_or(StorageError::DataInconsistency { + details: "Block diff change type is Up but state is missing".to_string(), + })?; + Ok(consensus_models::SubstateChange::Up { + id, + transaction_id, + substate: deserialize_json(&state)?, + }) + }, + "Down" => Ok(consensus_models::SubstateChange::Down { id, transaction_id }), + _ => Err(StorageError::DataInconsistency { + details: format!("Invalid block diff change type: {}", d.change), + }), + } + } } diff --git a/dan_layer/state_store_sqlite/src/sql_models/transaction.rs b/dan_layer/state_store_sqlite/src/sql_models/transaction.rs index a34ab89b6..435f72140 100644 --- a/dan_layer/state_store_sqlite/src/sql_models/transaction.rs +++ b/dan_layer/state_store_sqlite/src/sql_models/transaction.rs @@ -112,7 +112,7 @@ impl TryFrom for consensus_models::ExecutedTransaction { fn try_from(value: Transaction) -> Result { let rec = consensus_models::TransactionRecord::try_from(value)?; - if rec.result.is_none() { + if rec.execution_result.is_none() { return Err(StorageError::QueryError { reason: format!("Transaction {} has not executed", rec.transaction.id()), }); diff --git a/dan_layer/state_store_sqlite/src/sql_models/transaction_pool.rs b/dan_layer/state_store_sqlite/src/sql_models/transaction_pool.rs index 22af9f6f6..c75e92b1e 100644 --- a/dan_layer/state_store_sqlite/src/sql_models/transaction_pool.rs +++ b/dan_layer/state_store_sqlite/src/sql_models/transaction_pool.rs @@ -4,7 +4,7 @@ use diesel::{Queryable, QueryableByName}; use tari_dan_storage::{ consensus_models, - consensus_models::{Decision, Evidence, LeaderFee, TransactionAtom}, + consensus_models::{Evidence, LeaderFee}, StorageError, }; use time::PrimitiveDateTime; @@ -18,9 +18,9 @@ pub struct TransactionPoolRecord { pub original_decision: String, pub local_decision: Option, pub remote_decision: Option, - pub evidence: String, + pub evidence: Option, pub remote_evidence: Option, - pub transaction_fee: i64, + pub transaction_fee: Option, pub leader_fee: Option, pub global_exhaust_burn: Option, pub stage: String, @@ -35,16 +35,23 @@ pub struct TransactionPoolRecord { impl TransactionPoolRecord { pub fn try_convert( - mut self, + self, update: Option, ) -> Result { - let mut evidence = deserialize_json::(&self.evidence)?; + let mut evidence = self + .evidence + .as_deref() + .map(deserialize_json::) + .transpose()? + .unwrap_or_default(); let mut pending_stage = None; + let mut local_decision = self.local_decision; + let mut is_ready = self.is_ready; if let Some(update) = update { evidence.merge(deserialize_json::(&update.evidence)?); - self.is_ready = update.is_ready; + is_ready = update.is_ready; pending_stage = Some(parse_from_string(&update.stage)?); - self.local_decision = update.local_decision; + local_decision = update.local_decision; } if let Some(ref remote_evidence) = self.remote_evidence { @@ -68,31 +75,19 @@ impl TransactionPoolRecord { }) .transpose()?; let original_decision = parse_from_string(&self.original_decision)?; - let local_decision = self.local_decision.as_deref().map(parse_from_string).transpose()?; - let remote_decision = self - .remote_decision - .as_deref() - .map(parse_from_string::) - .transpose()?; - // TODO: sucks to reimplement this logic here - let aggregate_decision = remote_decision - .filter(|d| d.is_abort()) - .or(local_decision) - .unwrap_or(original_decision); + let remote_decision = self.remote_decision.as_deref().map(parse_from_string).transpose()?; Ok(consensus_models::TransactionPoolRecord::load( - TransactionAtom { - id: deserialize_hex_try_from(&self.transaction_id)?, - decision: aggregate_decision, - evidence, - transaction_fee: self.transaction_fee as u64, - leader_fee, - }, + deserialize_hex_try_from(&self.transaction_id)?, + evidence, + self.transaction_fee.map(|f| f as u64), + leader_fee, parse_from_string(&self.stage)?, pending_stage, - local_decision, + original_decision, + local_decision.as_deref().map(parse_from_string).transpose()?, remote_decision, - self.is_ready, + is_ready, )) } } diff --git a/dan_layer/state_store_sqlite/src/writer.rs b/dan_layer/state_store_sqlite/src/writer.rs index 03163daea..55aa88ab0 100644 --- a/dan_layer/state_store_sqlite/src/writer.rs +++ b/dan_layer/state_store_sqlite/src/writer.rs @@ -14,7 +14,7 @@ use diesel::{ SqliteConnection, }; use log::*; -use tari_dan_common_types::{optional::Optional, shard::Shard, Epoch, NodeAddressable, NodeHeight}; +use tari_dan_common_types::{shard::Shard, Epoch, NodeAddressable, NodeHeight}; use tari_dan_storage::{ consensus_models::{ Block, @@ -22,7 +22,6 @@ use tari_dan_storage::{ BlockId, Decision, Evidence, - ExecutedTransaction, ForeignProposal, ForeignReceiveCounters, ForeignSendCounters, @@ -281,29 +280,31 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta use crate::schema::block_diffs; let block_id = serialize_hex(block_diff.block_id); - let values = block_diff - .changes - .iter() - .map(|ch| { - Ok(( - block_diffs::block_id.eq(&block_id), - block_diffs::transaction_id.eq(serialize_hex(ch.transaction_id())), - block_diffs::substate_id.eq(ch.versioned_substate_id().substate_id().to_string()), - block_diffs::version.eq(ch.versioned_substate_id().version() as i32), - block_diffs::change.eq(ch.as_change_string()), - block_diffs::state.eq(ch.substate().map(serialize_json).transpose()?), - )) - }) - .collect::, StorageError>>()?; + // We commit in chunks because we can hit the SQL variable limit + for chunk in block_diff.changes.chunks(1000) { + let values = chunk + .iter() + .map(|ch| { + Ok(( + block_diffs::block_id.eq(&block_id), + block_diffs::transaction_id.eq(serialize_hex(ch.transaction_id())), + block_diffs::substate_id.eq(ch.versioned_substate_id().substate_id().to_string()), + block_diffs::version.eq(ch.versioned_substate_id().version() as i32), + block_diffs::change.eq(ch.as_change_string()), + block_diffs::state.eq(ch.substate().map(serialize_json).transpose()?), + )) + }) + .collect::, StorageError>>()?; - diesel::insert_into(block_diffs::table) - .values(values) - .execute(self.connection()) - .map(|_| ()) - .map_err(|e| SqliteStorageError::DieselError { - operation: "block_diffs_insert", - source: e, - })?; + diesel::insert_into(block_diffs::table) + .values(values) + .execute(self.connection()) + .map(|_| ()) + .map_err(|e| SqliteStorageError::DieselError { + operation: "block_diffs_insert", + source: e, + })?; + } Ok(()) } @@ -610,7 +611,7 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta transactions::filled_inputs.eq(serialize_json(transaction.filled_inputs())?), transactions::resolved_inputs.eq(tx_rec.resolved_inputs().map(serialize_json).transpose()?), transactions::resulting_outputs.eq(serialize_json(tx_rec.resulting_outputs())?), - transactions::result.eq(tx_rec.result().map(serialize_json).transpose()?), + transactions::result.eq(tx_rec.execution_result().map(serialize_json).transpose()?), transactions::execution_time_ms.eq(tx_rec .execution_time() .map(|d| i64::try_from(d.as_millis()).unwrap_or(i64::MAX))), @@ -660,7 +661,7 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta } let change_set = Changes { - result: transaction_rec.result().map(serialize_json).transpose()?, + result: transaction_rec.execution_result().map(serialize_json).transpose()?, filled_inputs: serialize_json(transaction.filled_inputs())?, resulting_outputs: serialize_json(transaction_rec.resulting_outputs())?, resolved_inputs: transaction_rec.resolved_inputs().map(serialize_json).transpose()?, @@ -714,7 +715,7 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta transactions::resolved_inputs.eq(rec.resolved_inputs().map(serialize_json).transpose()?), transactions::filled_inputs.eq(serialize_json(transaction.filled_inputs())?), transactions::resulting_outputs.eq(serialize_json(rec.resulting_outputs())?), - transactions::result.eq(rec.result().map(serialize_json).transpose()?), + transactions::result.eq(rec.execution_result().map(serialize_json).transpose()?), )) }) .collect::, StorageError>>()?; @@ -741,21 +742,20 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta .into_iter() .map(|atom| { // TODO(perf): 2n queries, query is slow - let exec = self - .transaction_executions_get_pending_for_block(&atom.id, &block_id) - .optional()?; - - let exec = match exec { - Some(exec) => exec, - None => { - // Executed in the mempool. - // TODO: this is kinda hacky. Either the mempool should add a block_id=null execution or we - // should remove mempool execution - let transaction = self.transactions_get(&atom.id)?; - let executed = ExecutedTransaction::try_from(transaction)?; - executed.into_execution_for_block(block_id) - }, - }; + let exec = self.transaction_executions_get_pending_for_block(&atom.id, &block_id)?; + // .optional()?; + + // let exec = match exec { + // Some(exec) => exec, + // None => { + // // Executed in the mempool. + // // TODO: this is kinda hacky. Either the mempool should add a block_id=null execution or we + // // should remove mempool execution + // let transaction = self.transactions_get(&atom.id)?; + // let executed = ExecutedTransaction::try_from(transaction)?; + // executed.into_execution_for_block(block_id) + // }, + // }; Ok(( transactions::transaction_id.eq(serialize_hex(atom.id())), @@ -813,24 +813,18 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta Ok(()) } - fn transaction_pool_insert( + fn transaction_pool_insert_new( &mut self, - transaction: TransactionAtom, - stage: TransactionPoolStage, - is_ready: bool, + transaction_id: TransactionId, + decision: Decision, ) -> Result<(), StorageError> { use crate::schema::transaction_pool; let insert = ( - transaction_pool::transaction_id.eq(serialize_hex(transaction.id)), - transaction_pool::original_decision.eq(transaction.decision.to_string()), - transaction_pool::transaction_fee.eq(transaction.transaction_fee as i64), - transaction_pool::evidence.eq(serialize_json(&transaction.evidence)?), - transaction_pool::leader_fee.eq(transaction.leader_fee.as_ref().map(|f| f.fee as i64)), - transaction_pool::global_exhaust_burn - .eq(transaction.leader_fee.as_ref().map(|f| f.global_exhaust_burn as i64)), - transaction_pool::stage.eq(stage.to_string()), - transaction_pool::is_ready.eq(is_ready), + transaction_pool::transaction_id.eq(serialize_hex(transaction_id)), + transaction_pool::original_decision.eq(decision.to_string()), + transaction_pool::stage.eq(TransactionPoolStage::New.to_string()), + transaction_pool::is_ready.eq(true), ); diesel::insert_into(transaction_pool::table) @@ -1058,7 +1052,7 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta })?; txs.into_iter() - .map(|tx| tx.try_convert(None).map(|t| t.into_atom())) + .map(|tx| tx.try_convert(None).map(|t| t.into_local_transaction_atom())) .collect() } @@ -1261,29 +1255,44 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta ) -> Result<(), StorageError> { use crate::schema::substate_locks; - let locks = locks - .into_iter() - .flat_map(|(id, locks)| { - locks.into_iter().map(move |lock| { - ( - substate_locks::block_id.eq(serialize_hex(block_id)), - substate_locks::substate_id.eq(id.to_string()), - substate_locks::version.eq(lock.version() as i32), - substate_locks::transaction_id.eq(serialize_hex(lock.transaction_id())), - substate_locks::lock.eq(lock.substate_lock().to_string()), - substate_locks::is_local_only.eq(lock.is_local_only()), - ) + let mut iter = locks.into_iter(); + const CHUNK_SIZE: usize = 100; + // We have to break up into multiple queries because we can hit max SQL variable limit + loop { + let locks = iter + .by_ref() + .take(CHUNK_SIZE) + .flat_map(|(id, locks)| { + locks.into_iter().map(move |lock| { + ( + substate_locks::block_id.eq(serialize_hex(block_id)), + substate_locks::substate_id.eq(id.to_string()), + substate_locks::version.eq(lock.version() as i32), + substate_locks::transaction_id.eq(serialize_hex(lock.transaction_id())), + substate_locks::lock.eq(lock.substate_lock().to_string()), + substate_locks::is_local_only.eq(lock.is_local_only()), + ) + }) }) - }) - .collect::>(); + .collect::>(); - diesel::insert_into(substate_locks::table) - .values(locks) - .execute(self.connection()) - .map_err(|e| SqliteStorageError::DieselError { - operation: "substate_locks_insert_all", - source: e, - })?; + let count = locks.len(); + if count == 0 { + break; + } + + diesel::insert_into(substate_locks::table) + .values(locks) + .execute(self.connection()) + .map_err(|e| SqliteStorageError::DieselError { + operation: "substate_locks_insert_all", + source: e, + })?; + + if count < CHUNK_SIZE { + break; + } + } Ok(()) } diff --git a/dan_layer/state_store_sqlite/tests/tests.rs b/dan_layer/state_store_sqlite/tests/tests.rs index 97d2f5d41..7e36d1a21 100644 --- a/dan_layer/state_store_sqlite/tests/tests.rs +++ b/dan_layer/state_store_sqlite/tests/tests.rs @@ -3,7 +3,7 @@ use rand::{rngs::OsRng, RngCore}; use tari_common_types::types::FixedHash; -use tari_dan_common_types::{Epoch, NodeHeight}; +use tari_dan_common_types::{shard::Shard, Epoch, NodeHeight}; use tari_dan_storage::{ consensus_models::{Block, Command, Decision, TransactionAtom, TransactionPoolStage, TransactionPoolStatusUpdate}, StateStore, @@ -12,6 +12,7 @@ use tari_dan_storage::{ }; use tari_state_store_sqlite::SqliteStateStore; use tari_transaction::TransactionId; +use tari_utilities::epoch_time::EpochTime; fn create_db() -> SqliteStateStore { SqliteStateStore::connect(":memory:").unwrap() @@ -30,8 +31,6 @@ fn create_tx_atom() -> TransactionAtom { } mod confirm_all_transitions { - use tari_dan_common_types::shard::Shard; - use tari_utilities::epoch_time::EpochTime; use super::*; @@ -70,12 +69,9 @@ mod confirm_all_transitions { ); block1.insert(&mut tx).unwrap(); - tx.transaction_pool_insert(atom1.clone(), TransactionPoolStage::New, false) - .unwrap(); - tx.transaction_pool_insert(atom2.clone(), TransactionPoolStage::New, false) - .unwrap(); - tx.transaction_pool_insert(atom3.clone(), TransactionPoolStage::New, false) - .unwrap(); + tx.transaction_pool_insert_new(atom1.id, atom1.decision).unwrap(); + tx.transaction_pool_insert_new(atom2.id, atom2.decision).unwrap(); + tx.transaction_pool_insert_new(atom3.id, atom3.decision).unwrap(); let block_id = *block1.id(); tx.transaction_pool_add_pending_update(&TransactionPoolStatusUpdate { @@ -112,13 +108,13 @@ mod confirm_all_transitions { let rec = tx .transaction_pool_get_for_blocks(zero_block.id(), &block_id, &atom1.id) .unwrap(); - assert!(rec.stage().is_new()); + assert!(rec.committed_stage().is_new()); assert!(rec.pending_stage().unwrap().is_local_prepared()); let rec = tx .transaction_pool_get_for_blocks(zero_block.id(), &block_id, &atom2.id) .unwrap(); - assert!(rec.stage().is_new()); + assert!(rec.committed_stage().is_new()); assert!(rec.pending_stage().unwrap().is_prepared()); tx.transaction_pool_set_all_transitions(&zero_block.as_locked_block(), &block1.as_locked_block(), &[ @@ -129,19 +125,19 @@ mod confirm_all_transitions { let rec = tx .transaction_pool_get_for_blocks(zero_block.id(), &block_id, &atom1.id) .unwrap(); - assert!(rec.stage().is_local_prepared()); + assert!(rec.committed_stage().is_local_prepared()); assert!(rec.pending_stage().is_none()); let rec = tx .transaction_pool_get_for_blocks(zero_block.id(), &block_id, &atom2.id) .unwrap(); - assert!(rec.stage().is_new()); + assert!(rec.committed_stage().is_new()); assert!(rec.pending_stage().unwrap().is_prepared()); let rec = tx .transaction_pool_get_for_blocks(zero_block.id(), &block_id, &atom3.id) .unwrap(); - assert!(rec.stage().is_prepared()); + assert!(rec.committed_stage().is_prepared()); assert!(rec.pending_stage().is_none()); tx.rollback().unwrap(); diff --git a/dan_layer/storage/src/consensus_models/block.rs b/dan_layer/storage/src/consensus_models/block.rs index a1da04691..ec3ed0467 100644 --- a/dan_layer/storage/src/consensus_models/block.rs +++ b/dan_layer/storage/src/consensus_models/block.rs @@ -606,7 +606,7 @@ impl Block { }); } - // block_diff.remove(tx)?; + block_diff.remove(tx)?; for change in block_diff.into_changes() { match change { @@ -922,7 +922,7 @@ impl Block { Ok(transactions .into_iter() // TODO: following two should never be None - .filter_map(|t_rec| t_rec.result) + .filter_map(|t_rec| t_rec.execution_result) .filter_map(|t_res| t_res.finalize.into_accept()) .collect()) } diff --git a/dan_layer/storage/src/consensus_models/block_diff.rs b/dan_layer/storage/src/consensus_models/block_diff.rs index 2eea187b6..51819a838 100644 --- a/dan_layer/storage/src/consensus_models/block_diff.rs +++ b/dan_layer/storage/src/consensus_models/block_diff.rs @@ -4,9 +4,11 @@ use std::fmt::Debug; use tari_dan_common_types::committee::CommitteeInfo; +use tari_engine_types::substate::SubstateId; use crate::{ consensus_models::{substate_change::SubstateChange, BlockId}, + StateStoreReadTransaction, StateStoreWriteTransaction, StorageError, }; @@ -67,4 +69,12 @@ impl BlockDiff { pub fn remove(&self, tx: &mut TTx) -> Result<(), StorageError> { tx.block_diffs_remove(&self.block_id) } + + pub fn get_for_substate( + tx: &TTx, + block_id: &BlockId, + substate_id: &SubstateId, + ) -> Result { + tx.block_diffs_get_last_change_for_substate(block_id, substate_id) + } } diff --git a/dan_layer/storage/src/consensus_models/command.rs b/dan_layer/storage/src/consensus_models/command.rs index 45554b8c2..f33658aa8 100644 --- a/dan_layer/storage/src/consensus_models/command.rs +++ b/dan_layer/storage/src/consensus_models/command.rs @@ -3,7 +3,6 @@ use std::{ cmp::Ordering, - collections::HashMap, fmt::{Display, Formatter}, }; @@ -11,8 +10,6 @@ use indexmap::{IndexMap, IndexSet}; use serde::{Deserialize, Serialize}; use tari_dan_common_types::SubstateAddress; use tari_transaction::{TransactionId, VersionedSubstateId}; -#[cfg(feature = "ts")] -use ts_rs::TS; use super::{ ExecutedTransaction, @@ -29,7 +26,11 @@ use crate::{ }; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub struct Evidence { evidence: IndexMap, } @@ -42,34 +43,31 @@ impl Evidence { } pub fn from_inputs_and_outputs( - transaction_id: TransactionId, resolved_inputs: &[VersionedSubstateIdLockIntent], resulting_outputs: &[VersionedSubstateId], ) -> Self { - let mut deduped_evidence = HashMap::new(); - deduped_evidence.extend(resolved_inputs.iter().map(|input| { - (input.to_substate_address(), ShardEvidence { - qc_ids: IndexSet::new(), - lock: input.lock_flag(), + resolved_inputs + .iter() + .map(|input| { + (input.to_substate_address(), ShardEvidence { + qc_ids: IndexSet::new(), + lock: input.lock_flag(), + }) }) - })); - - let tx_reciept_address = SubstateAddress::for_transaction_receipt(transaction_id.into_receipt_address()); - deduped_evidence.extend( - resulting_outputs + .chain( + resulting_outputs .iter() + // Exclude transaction receipt from evidence since all involved shards will commit it + .filter(|output| !output.substate_id.is_transaction_receipt()) .map(|output| output.to_substate_address()) - // Exclude transaction receipt address from evidence since all involved shards will commit it - .filter(|output| *output != tx_reciept_address) .map(|output| { (output, ShardEvidence { qc_ids: IndexSet::new(), lock: SubstateLockFlag::Write, }) }), - ); - - deduped_evidence.into_iter().collect() + ) + .collect() } pub fn all_shards_justified(&self) -> bool { @@ -133,7 +131,11 @@ impl FromIterator<(SubstateAddress, ShardEvidence)> for Evidence { } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub struct ShardEvidence { #[cfg_attr(feature = "ts", ts(type = "Array"))] pub qc_ids: IndexSet, @@ -159,7 +161,11 @@ impl ShardEvidence { } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub struct TransactionAtom { #[cfg_attr(feature = "ts", ts(type = "string"))] pub id: TransactionId, @@ -171,16 +177,6 @@ pub struct TransactionAtom { } impl TransactionAtom { - pub fn deferred(transaction_id: TransactionId) -> Self { - Self { - id: transaction_id, - decision: Decision::Deferred, - evidence: Evidence::empty(), - transaction_fee: 0, - leader_fee: None, - } - } - pub fn id(&self) -> &TransactionId { &self.id } @@ -222,7 +218,11 @@ impl Display for TransactionAtom { } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub enum Command { /// Command to prepare a transaction. Prepare(TransactionAtom), diff --git a/dan_layer/storage/src/consensus_models/executed_transaction.rs b/dan_layer/storage/src/consensus_models/executed_transaction.rs index 47720e200..45ccec92e 100644 --- a/dan_layer/storage/src/consensus_models/executed_transaction.rs +++ b/dan_layer/storage/src/consensus_models/executed_transaction.rs @@ -164,7 +164,7 @@ impl ExecutedTransaction { } pub fn to_initial_evidence(&self) -> Evidence { - Evidence::from_inputs_and_outputs(*self.id(), &self.resolved_inputs, &self.resulting_outputs) + Evidence::from_inputs_and_outputs(&self.resolved_inputs, &self.resulting_outputs) } pub fn transaction_fee(&self) -> u64 { @@ -239,7 +239,7 @@ impl ExecutedTransaction { pub fn get(tx: &TTx, tx_id: &TransactionId) -> Result { let rec = tx.transactions_get(tx_id)?; - if rec.result.is_none() { + if rec.execution_result.is_none() { return Err(StorageError::NotFound { item: "ExecutedTransaction".to_string(), key: tx_id.to_string(), @@ -256,7 +256,7 @@ impl ExecutedTransaction { ) -> Result { // TODO(perf): consider optimising let rec = tx.transactions_get(tx_id)?; - let Some(result) = rec.result else { + let Some(result) = rec.execution_result else { return Err(StorageError::NotFound { item: "ExecutedTransaction result".to_string(), key: tx_id.to_string(), @@ -292,7 +292,7 @@ impl ExecutedTransaction { tx_id: &TransactionId, ) -> Result { match tx.transactions_get(tx_id).optional()? { - Some(rec) => Ok(rec.result.is_some()), + Some(rec) => Ok(rec.execution_result.is_some()), None => Ok(false), } } @@ -360,7 +360,7 @@ impl TryFrom for ExecutedTransaction { Ok(Self { transaction: value.transaction, - result: value.result.unwrap(), + result: value.execution_result.unwrap(), execution_time: value.execution_time.unwrap_or_default(), resolved_inputs, final_decision: value.final_decision, diff --git a/dan_layer/storage/src/consensus_models/substate.rs b/dan_layer/storage/src/consensus_models/substate.rs index 3837641e2..4781077d7 100644 --- a/dan_layer/storage/src/consensus_models/substate.rs +++ b/dan_layer/storage/src/consensus_models/substate.rs @@ -14,7 +14,7 @@ use std::{ use serde::{Deserialize, Serialize}; use tari_common_types::types::FixedHash; -use tari_dan_common_types::{optional::Optional, shard::Shard, Epoch, NodeHeight, SubstateAddress}; +use tari_dan_common_types::{shard::Shard, Epoch, NodeHeight, SubstateAddress}; use tari_engine_types::substate::{hash_substate, Substate, SubstateId, SubstateValue}; use tari_transaction::{SubstateRequirement, TransactionId, VersionedSubstateId}; @@ -177,10 +177,9 @@ impl SubstateRecord { pub fn exists( tx: &TTx, - address: &SubstateAddress, + id: &VersionedSubstateId, ) -> Result { - // TODO: optimise - Ok(Self::get(tx, address).optional()?.is_some()) + Self::any_exist(tx, Some(id)) } pub fn any_exist< diff --git a/dan_layer/storage/src/consensus_models/transaction.rs b/dan_layer/storage/src/consensus_models/transaction.rs index d24d10f7d..69840fd13 100644 --- a/dan_layer/storage/src/consensus_models/transaction.rs +++ b/dan_layer/storage/src/consensus_models/transaction.rs @@ -18,7 +18,7 @@ use crate::{ #[derive(Debug, Clone, Deserialize)] pub struct TransactionRecord { pub transaction: Transaction, - pub result: Option, + pub execution_result: Option, pub execution_time: Option, pub resulting_outputs: Vec, pub resolved_inputs: Option>, @@ -31,7 +31,7 @@ impl TransactionRecord { pub fn new(transaction: Transaction) -> Self { Self { transaction, - result: None, + execution_result: None, resolved_inputs: None, execution_time: None, final_decision: None, @@ -54,7 +54,7 @@ impl TransactionRecord { Self { transaction, resolved_inputs, - result, + execution_result: result, execution_time, final_decision, finalized_time, @@ -79,12 +79,12 @@ impl TransactionRecord { self.transaction } - pub fn result(&self) -> Option<&ExecuteResult> { - self.result.as_ref() + pub fn execution_result(&self) -> Option<&ExecuteResult> { + self.execution_result.as_ref() } pub fn has_executed(&self) -> bool { - self.result.is_some() + self.execution_result.is_some() } pub fn resulting_outputs(&self) -> &[VersionedSubstateId] { @@ -95,6 +95,18 @@ impl TransactionRecord { self.resolved_inputs.as_deref() } + pub fn execution_decision(&self) -> Option { + self.execution_result().map(|r| Decision::from(&r.finalize.result)) + } + + pub fn current_decision(&self) -> Decision { + self.final_decision + .or_else(|| self.abort_details.as_ref().map(|_| Decision::Abort)) + .or_else(|| self.execution_decision()) + // We will choose to commit a transaction unless (1) we aborted it, (2) the execution has failed + .unwrap_or(Decision::Commit) + } + pub fn final_decision(&self) -> Option { self.final_decision } @@ -112,7 +124,7 @@ impl TransactionRecord { } pub fn is_executed(&self) -> bool { - self.result.is_some() + self.execution_result.is_some() } pub fn abort_details(&self) -> Option<&String> { @@ -125,6 +137,11 @@ impl TransactionRecord { self } + pub fn set_current_decision_to_abort>(&mut self, details: T) -> &mut Self { + self.abort_details = Some(details.into()); + self + } + pub fn into_final_result(self) -> Option { // TODO: This is hacky, result should be broken up into execution result, validation (mempool) result, finality // result. These results are independent of each other. @@ -132,12 +149,15 @@ impl TransactionRecord { if d.is_commit() { // Is is expected that the result is ACCEPT. // TODO: Handle (elsewhere) the edge-case where our execution failed but the committee decided to COMMIT - // (fetch the state transitions from a peer) - self.result + // (fetch the state transitions from a peer?) + self.execution_result } else { // Only use rejected results for the transaction. If execution ACCEPTed but the final decision is ABORT, // then use abort_details (which should have been set in this case). - let finalize_result = self.result.map(|r| r.finalize).filter(|f| !f.result.is_accept()); + let finalize_result = self + .execution_result + .map(|r| r.finalize) + .filter(|f| !f.result.is_accept()); Some(ExecuteResult { finalize: finalize_result.unwrap_or_else(|| { FinalizeResult::new_rejected( @@ -234,6 +254,15 @@ impl TransactionRecord { Ok((recs, tx_ids)) } + pub fn get_missing<'a, TTx: StateStoreReadTransaction, I: IntoIterator>( + tx: &TTx, + tx_ids: I, + ) -> Result, StorageError> { + // TODO(perf): optimise + let (_, missing) = Self::get_any(tx, tx_ids)?; + Ok(missing) + } + pub fn get_paginated( tx: &TTx, limit: u64, @@ -263,7 +292,7 @@ impl From for TransactionRecord { Self { transaction, - result: Some(result), + execution_result: Some(result), execution_time: Some(execution_time), resolved_inputs: Some(resolved_inputs), final_decision, diff --git a/dan_layer/storage/src/consensus_models/transaction_decision.rs b/dan_layer/storage/src/consensus_models/transaction_decision.rs index 1f2eea037..1e7b62a6a 100644 --- a/dan_layer/storage/src/consensus_models/transaction_decision.rs +++ b/dan_layer/storage/src/consensus_models/transaction_decision.rs @@ -19,8 +19,6 @@ pub enum Decision { Commit, /// Decision to ABORT the transaction Abort, - /// Decision has not yet been reached - Deferred, } impl Decision { @@ -32,15 +30,10 @@ impl Decision { matches!(self, Decision::Abort) } - pub fn is_deferred(&self) -> bool { - matches!(self, Decision::Deferred) - } - pub fn and(self, other: Self) -> Self { match self { Decision::Commit => other, Decision::Abort => Decision::Abort, - Decision::Deferred => Decision::Deferred, } } @@ -48,7 +41,6 @@ impl Decision { match self { Decision::Commit => "Commit", Decision::Abort => "Abort", - Decision::Deferred => "Deferred", } } } @@ -66,7 +58,6 @@ impl FromStr for Decision { match s { "Commit" => Ok(Decision::Commit), "Abort" => Ok(Decision::Abort), - "Deferred" => Ok(Decision::Deferred), _ => Err(()), } } diff --git a/dan_layer/storage/src/consensus_models/transaction_execution.rs b/dan_layer/storage/src/consensus_models/transaction_execution.rs index 33baefaf1..43f408c72 100644 --- a/dan_layer/storage/src/consensus_models/transaction_execution.rs +++ b/dan_layer/storage/src/consensus_models/transaction_execution.rs @@ -71,7 +71,7 @@ impl TransactionExecution { } pub fn to_initial_evidence(&self) -> Evidence { - Evidence::from_inputs_and_outputs(self.transaction_id, &self.resolved_inputs, &self.resulting_outputs) + Evidence::from_inputs_and_outputs(&self.resolved_inputs, &self.resulting_outputs) } pub fn transaction_fee(&self) -> u64 { diff --git a/dan_layer/storage/src/consensus_models/transaction_pool.rs b/dan_layer/storage/src/consensus_models/transaction_pool.rs index 9534792fb..793d3e6f3 100644 --- a/dan_layer/storage/src/consensus_models/transaction_pool.rs +++ b/dan_layer/storage/src/consensus_models/transaction_pool.rs @@ -18,12 +18,14 @@ use tari_transaction::TransactionId; use crate::{ consensus_models::{ + BlockId, Decision, Evidence, LeafBlock, LockedBlock, QcId, TransactionAtom, + TransactionExecution, TransactionPoolStatusUpdate, TransactionRecord, }, @@ -159,12 +161,13 @@ impl TransactionPool { Ok(exists) } - pub fn insert( + pub fn insert_new( &self, tx: &mut TStateStore::WriteTransaction<'_>, - transaction: TransactionAtom, + tx_id: TransactionId, + decision: Decision, ) -> Result<(), TransactionPoolError> { - tx.transaction_pool_insert(transaction, TransactionPoolStage::New, true)?; + tx.transaction_pool_insert_new(tx_id, decision)?; Ok(()) } @@ -271,9 +274,15 @@ impl TransactionPool { ts(export, export_to = "../../bindings/src/types/") )] pub struct TransactionPoolRecord { - atom: TransactionAtom, + #[cfg_attr(feature = "ts", ts(type = "string"))] + transaction_id: TransactionId, + evidence: Evidence, + #[cfg_attr(feature = "ts", ts(type = "number"))] + transaction_fee: u64, + leader_fee: Option, stage: TransactionPoolStage, pending_stage: Option, + original_decision: Decision, local_decision: Option, remote_decision: Option, is_ready: bool, @@ -281,17 +290,25 @@ pub struct TransactionPoolRecord { impl TransactionPoolRecord { pub fn load( - transaction: TransactionAtom, + id: TransactionId, + evidence: Evidence, + transaction_fee: Option, + leader_fee: Option, stage: TransactionPoolStage, pending_stage: Option, + original_decision: Decision, local_decision: Option, remote_decision: Option, is_ready: bool, ) -> Self { Self { - atom: transaction, + transaction_id: id, + evidence, + transaction_fee: transaction_fee.unwrap_or(0), + leader_fee, stage, pending_stage, + original_decision, local_decision, remote_decision, is_ready, @@ -302,12 +319,7 @@ impl TransactionPoolRecord { self.remote_decision() // Prioritize remote ABORT i.e. if accept we look at our local decision .filter(|d| d.is_abort()) - .or_else(|| self.local_decision()) - .unwrap_or(self.original_decision()) - } - - pub fn is_deferred(&self) -> bool { - self.current_local_decision().is_deferred() + .unwrap_or_else(|| self.current_local_decision()) } pub fn current_local_decision(&self) -> Decision { @@ -315,7 +327,7 @@ impl TransactionPoolRecord { } pub fn original_decision(&self) -> Decision { - self.atom.decision + self.original_decision } pub fn local_decision(&self) -> Option { @@ -327,21 +339,24 @@ impl TransactionPoolRecord { } pub fn transaction_id(&self) -> &TransactionId { - &self.atom.id + &self.transaction_id } - pub fn atom(&self) -> &TransactionAtom { - &self.atom + pub fn evidence(&self) -> &Evidence { + &self.evidence } - pub fn into_atom(self) -> TransactionAtom { - self.atom + pub fn transaction_fee(&self) -> u64 { + self.transaction_fee } - pub fn stage(&self) -> TransactionPoolStage { + /// Returns the committed stage of the transaction. This is the stage that has been confirmed by the local shard. + pub fn committed_stage(&self) -> TransactionPoolStage { self.stage } + /// Returns the pending stage of the transaction. This is the stage that the transaction is current but has not been + /// confirmed by the local shard. pub fn pending_stage(&self) -> Option { self.pending_stage } @@ -356,23 +371,37 @@ impl TransactionPoolRecord { pub fn get_final_transaction_atom(&self, leader_fee: LeaderFee) -> TransactionAtom { TransactionAtom { + id: self.transaction_id, decision: self.current_decision(), + evidence: self.evidence.clone(), + transaction_fee: self.transaction_fee, leader_fee: Some(leader_fee), - ..self.atom.clone() } } pub fn get_local_transaction_atom(&self) -> TransactionAtom { TransactionAtom { + id: self.transaction_id, + decision: self.current_local_decision(), + evidence: self.evidence.clone(), + transaction_fee: self.transaction_fee, + leader_fee: None, + } + } + + pub fn into_local_transaction_atom(self) -> TransactionAtom { + TransactionAtom { + id: self.transaction_id, decision: self.current_local_decision(), - ..self.atom.clone() + evidence: self.evidence, + transaction_fee: self.transaction_fee, + leader_fee: None, } } pub fn calculate_leader_fee(&self, num_involved_shards: NonZeroU64, exhaust_divisor: u64) -> LeaderFee { - let transaction_fee = self.atom.transaction_fee; - let target_burn = transaction_fee.checked_div(exhaust_divisor).unwrap_or(0); - let block_fee_after_burn = transaction_fee - target_burn; + let target_burn = self.transaction_fee.checked_div(exhaust_divisor).unwrap_or(0); + let block_fee_after_burn = self.transaction_fee - target_burn; let mut leader_fee = block_fee_after_burn / num_involved_shards; // The extra amount that is burnt from dividing the number of shards involved @@ -386,7 +415,7 @@ impl TransactionPoolRecord { // If the div floor burn accounts for 1 less fee for more than half of number of shards, and ... excess_remainder_burn >= num_involved_shards.get() / 2 && // ... if there are enough fees to pay out an additional 1 to all shards - (leader_fee + 1) * num_involved_shards.get() <= transaction_fee + (leader_fee + 1) * num_involved_shards.get() <= self.transaction_fee { // Pay each leader 1 more leader_fee += 1; @@ -415,18 +444,18 @@ impl TransactionPoolRecord { self } - pub fn set_initial_evidence(&mut self, evidence: Evidence) -> &mut Self { - self.atom.evidence = evidence; + pub fn set_evidence(&mut self, evidence: Evidence) -> &mut Self { + self.evidence = evidence; self } pub fn set_transaction_fee(&mut self, transaction_fee: u64) -> &mut Self { - self.atom.transaction_fee = transaction_fee; + self.transaction_fee = transaction_fee; self } pub fn add_evidence(&mut self, committee_info: &CommitteeInfo, qc_id: QcId) -> &mut Self { - let evidence = &mut self.atom.evidence; + let evidence = &mut self.evidence; for (address, evidence_mut) in evidence.iter_mut() { if committee_info.includes_substate_address(address) { evidence_mut.qc_ids.insert(qc_id); @@ -467,9 +496,9 @@ impl TransactionPoolRecord { let update = TransactionPoolStatusUpdate { block_id: block.block_id, block_height: block.height, - transaction_id: self.atom.id, + transaction_id: self.transaction_id, stage: pending_stage, - evidence: self.atom.evidence.clone(), + evidence: self.evidence.clone(), is_ready, local_decision: self.current_local_decision(), }; @@ -489,7 +518,7 @@ impl TransactionPoolRecord { ) -> Result<(), TransactionPoolError> { self.add_evidence(foreign_committee_info, foreign_qc_id); self.set_remote_decision(decision); - tx.transaction_pool_update(&self.atom.id, None, Some(decision), Some(&self.atom.evidence))?; + tx.transaction_pool_update(&self.transaction_id, None, Some(decision), Some(&self.evidence))?; Ok(()) } @@ -500,13 +529,13 @@ impl TransactionPoolRecord { ) -> Result<(), TransactionPoolError> { if self.local_decision.map(|d| d != decision).unwrap_or(true) { self.set_local_decision(decision); - tx.transaction_pool_update(&self.atom.id, Some(decision), None, None)?; + tx.transaction_pool_update(&self.transaction_id, Some(decision), None, None)?; } Ok(()) } pub fn remove(&self, tx: &mut TTx) -> Result<(), TransactionPoolError> { - tx.transaction_pool_remove(&self.atom.id)?; + tx.transaction_pool_remove(&self.transaction_id)?; Ok(()) } @@ -542,12 +571,13 @@ impl TransactionPoolRecord { Ok(transaction) } - pub fn get( + pub fn get_execution_for_block( + &self, tx: &TTx, - id: &TransactionId, - ) -> Result { - let rec = tx.transaction_pool_get(id)?; - Ok(rec) + from_block_id: &BlockId, + ) -> Result { + let exec = TransactionExecution::get_pending_for_block(tx, self.transaction_id(), from_block_id)?; + Ok(exec) } } @@ -612,13 +642,11 @@ mod tests { fn create_record_with_fee(fee: u64) -> TransactionPoolRecord { TransactionPoolRecord { - atom: TransactionAtom { - id: TransactionId::new([0; 32]), - decision: Decision::Commit, - evidence: Default::default(), - transaction_fee: fee, - leader_fee: None, - }, + transaction_id: TransactionId::new([0; 32]), + original_decision: Decision::Commit, + evidence: Default::default(), + transaction_fee: fee, + leader_fee: None, stage: TransactionPoolStage::New, pending_stage: None, local_decision: None, diff --git a/dan_layer/storage/src/state_store/mod.rs b/dan_layer/storage/src/state_store/mod.rs index 91e26b740..d5ef26b0a 100644 --- a/dan_layer/storage/src/state_store/mod.rs +++ b/dan_layer/storage/src/state_store/mod.rs @@ -40,6 +40,7 @@ use crate::{ QuorumCertificate, StateTransition, StateTransitionId, + SubstateChange, SubstateRecord, TransactionAtom, TransactionExecution, @@ -179,6 +180,11 @@ pub trait StateStoreReadTransaction: Sized { fn blocks_max_height(&self) -> Result; fn block_diffs_get(&self, block_id: &BlockId) -> Result; + fn block_diffs_get_last_change_for_substate( + &self, + block_id: &BlockId, + substate_id: &SubstateId, + ) -> Result; fn parked_blocks_exists(&self, block_id: &BlockId) -> Result; @@ -191,7 +197,6 @@ pub trait StateStoreReadTransaction: Sized { fn quorum_certificates_get_by_block_id(&self, block_id: &BlockId) -> Result; // -------------------------------- Transaction Pools -------------------------------- // - fn transaction_pool_get(&self, transaction_id: &TransactionId) -> Result; fn transaction_pool_get_for_blocks( &self, from_block_id: &BlockId, @@ -351,12 +356,7 @@ pub trait StateStoreWriteTransaction { ) -> Result<(), StorageError>; // -------------------------------- Transaction Pool -------------------------------- // - fn transaction_pool_insert( - &mut self, - transaction: TransactionAtom, - stage: TransactionPoolStage, - is_ready: bool, - ) -> Result<(), StorageError>; + fn transaction_pool_insert_new(&mut self, tx_id: TransactionId, decision: Decision) -> Result<(), StorageError>; fn transaction_pool_set_atom(&mut self, transaction: TransactionAtom) -> Result<(), StorageError>; fn transaction_pool_add_pending_update( &mut self, diff --git a/dan_layer/template_builtin/build.rs b/dan_layer/template_builtin/build.rs index 6dfd477c2..6dc1265c1 100644 --- a/dan_layer/template_builtin/build.rs +++ b/dan_layer/template_builtin/build.rs @@ -11,7 +11,7 @@ use std::{ process::Command, }; -const TEMPLATE_BUILTINS: &[&str] = &["templates/account", "templates/account_nfts"]; +const TEMPLATE_BUILTINS: &[&str] = &["templates/account", "templates/account_nfts", "templates/faucet"]; fn main() -> Result<(), Box> { // Rebuild templates if abi or lib changes diff --git a/dan_layer/template_builtin/src/lib.rs b/dan_layer/template_builtin/src/lib.rs index 47834baf1..b965d4fc0 100644 --- a/dan_layer/template_builtin/src/lib.rs +++ b/dan_layer/template_builtin/src/lib.rs @@ -25,13 +25,32 @@ pub const ACCOUNT_TEMPLATE_ADDRESS: TemplateAddress = TemplateAddress::from_arra pub const ACCOUNT_NFT_TEMPLATE_ADDRESS: TemplateAddress = TemplateAddress::from_array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]); +pub const FAUCET_TEMPLATE_ADDRESS: TemplateAddress = TemplateAddress::from_array([ + 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +]); pub fn get_template_builtin(address: &TemplateAddress) -> &'static [u8] { - if *address == ACCOUNT_TEMPLATE_ADDRESS { - include_bytes!("../templates/account/account.wasm") - } else if *address == ACCOUNT_NFT_TEMPLATE_ADDRESS { - include_bytes!("../templates/account_nfts/account_nfts.wasm") - } else { - panic!("Unknown builtin template address") - } + try_get_template_builtin(address).unwrap_or_else(|| panic!("Unknown builtin template address {address}")) +} + +pub fn try_get_template_builtin(address: &TemplateAddress) -> Option<&'static [u8]> { + all_builtin_templates().find(|(a, _)| a == address).map(|(_, b)| b) +} + +pub fn all_builtin_templates() -> impl Iterator { + [ + ( + ACCOUNT_TEMPLATE_ADDRESS, + include_bytes!("../templates/account/account.wasm").as_slice(), + ), + ( + ACCOUNT_NFT_TEMPLATE_ADDRESS, + include_bytes!("../templates/account_nfts/account_nfts.wasm").as_slice(), + ), + ( + FAUCET_TEMPLATE_ADDRESS, + include_bytes!("../templates/faucet/faucet.wasm").as_slice(), + ), + ] + .into_iter() } diff --git a/dan_layer/template_builtin/templates/.gitignore b/dan_layer/template_builtin/templates/.gitignore index 2568bd685..ea5a805b1 100644 --- a/dan_layer/template_builtin/templates/.gitignore +++ b/dan_layer/template_builtin/templates/.gitignore @@ -1,3 +1,4 @@ # Ignore the generated wasms account/account.wasm -account_nfts/account_nfts.wasm \ No newline at end of file +account_nfts/account_nfts.wasm +faucet/faucet.wasm diff --git a/dan_layer/template_builtin/templates/faucet/Cargo.toml b/dan_layer/template_builtin/templates/faucet/Cargo.toml new file mode 100644 index 000000000..bf974d68d --- /dev/null +++ b/dan_layer/template_builtin/templates/faucet/Cargo.toml @@ -0,0 +1,20 @@ +[workspace] +[package] +name = "faucet" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tari_template_lib = { path = "../../../../dan_layer/template_lib" } + +[profile.release] +opt-level = 's' # Optimize for size. +lto = true # Enable Link Time Optimization. +codegen-units = 1 # Reduce number of codegen units to increase optimizations. +panic = 'abort' # Abort on panic. +strip = "debuginfo" # Strip debug info. + +[lib] +crate-type = ["cdylib", "lib"] \ No newline at end of file diff --git a/dan_layer/template_builtin/templates/faucet/src/lib.rs b/dan_layer/template_builtin/templates/faucet/src/lib.rs new file mode 100644 index 000000000..b2987e472 --- /dev/null +++ b/dan_layer/template_builtin/templates/faucet/src/lib.rs @@ -0,0 +1,34 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use tari_template_lib::prelude::*; + +#[template] +mod template { + use tari_template_lib::crypto::BalanceProofSignature; + + use super::*; + + pub struct XtrFaucet { + vault: Vault, + } + + impl XtrFaucet { + pub fn take(&self, amount: Amount) -> Bucket { + debug!("Withdrawing {} coins from faucet", amount); + self.vault.withdraw(amount) + } + + pub fn take_confidential( + &self, + amount: Amount, + output: ConfidentialOutputStatement, + balance_proof: BalanceProofSignature, + ) -> Bucket { + // Withdraws revealed funds into the given confidential output + let proof = ConfidentialWithdrawProof::revealed_to_confidential(amount, output, balance_proof); + debug!("Withdrawing {} coins from faucet into confidential output", amount); + self.vault.withdraw_confidential(proof) + } + } +} diff --git a/dan_layer/template_lib/src/auth/access_rules.rs b/dan_layer/template_lib/src/auth/access_rules.rs index eda246815..498b6c677 100644 --- a/dan_layer/template_lib/src/auth/access_rules.rs +++ b/dan_layer/template_lib/src/auth/access_rules.rs @@ -9,7 +9,7 @@ use ts_rs::TS; use crate::models::{ComponentAddress, NonFungibleAddress, ResourceAddress, TemplateAddress}; /// Represents the types of possible access control rules over a component method or resource -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub enum AccessRule { AllowAll, @@ -40,7 +40,7 @@ impl AccessRule { } /// An enum that represents the possible ways to restrict access to components or resources -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub enum RestrictedAccessRule { Require(RequireRule), @@ -59,7 +59,7 @@ impl RestrictedAccessRule { } /// Specifies a requirement for a [RequireRule]. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub enum RuleRequirement { /// Requires ownership of a specific resource @@ -97,7 +97,7 @@ impl From for RuleRequirement { } /// An enum that represents the possible ways to require access to components or resources -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub enum RequireRule { Require(RuleRequirement), @@ -106,7 +106,7 @@ pub enum RequireRule { } /// Information needed to specify access rules to methods of a component -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub struct ComponentAccessRules { #[cfg_attr(feature = "ts", ts(type = "Record"))] diff --git a/dan_layer/template_lib/src/constants.rs b/dan_layer/template_lib/src/constants.rs index b8d8d06af..5d524341b 100644 --- a/dan_layer/template_lib/src/constants.rs +++ b/dan_layer/template_lib/src/constants.rs @@ -3,7 +3,7 @@ //! A collection of convenient constant values -use crate::models::{ObjectKey, ResourceAddress}; +use crate::models::{ComponentAddress, ObjectKey, ResourceAddress, VaultId}; // TODO: This is set pretty arbitrarily. @@ -19,3 +19,13 @@ pub const CONFIDENTIAL_TARI_RESOURCE_ADDRESS: ResourceAddress = /// Shorthand version of the `CONFIDENTIAL_TARI_RESOURCE_ADDRESS` constant pub const XTR: ResourceAddress = CONFIDENTIAL_TARI_RESOURCE_ADDRESS; + +/// Address of testnet faucet component +pub const XTR_FAUCET_COMPONENT_ADDRESS: ComponentAddress = ComponentAddress::new(ObjectKey::from_array([ + 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +])); + +/// Address of the faucet vault +pub const XTR_FAUCET_VAULT_ADDRESS: VaultId = VaultId::new(ObjectKey::from_array([ + 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, +])); diff --git a/dan_layer/template_lib/src/models/confidential_proof.rs b/dan_layer/template_lib/src/models/confidential_proof.rs index e1deaba1e..e1f273dcd 100644 --- a/dan_layer/template_lib/src/models/confidential_proof.rs +++ b/dan_layer/template_lib/src/models/confidential_proof.rs @@ -159,6 +159,19 @@ impl ConfidentialWithdrawProof { } } + pub fn revealed_to_confidential>( + input_revealed_amount: T, + output_proof: ConfidentialOutputStatement, + balance_proof: BalanceProofSignature, + ) -> Self { + Self { + inputs: vec![], + input_revealed_amount: input_revealed_amount.into(), + output_proof, + balance_proof, + } + } + pub fn revealed_input_amount(&self) -> Amount { self.input_revealed_amount } diff --git a/dan_layer/template_test_tooling/src/lib.rs b/dan_layer/template_test_tooling/src/lib.rs index 617df7741..7dcb5f8b8 100644 --- a/dan_layer/template_test_tooling/src/lib.rs +++ b/dan_layer/template_test_tooling/src/lib.rs @@ -6,6 +6,7 @@ mod read_only_state_store; pub mod support; mod template_test; mod track_calls; + pub use package_builder::Package; pub use template_test::{test_faucet_component, SubstateType, TemplateTest}; diff --git a/dan_layer/template_test_tooling/src/template_test.rs b/dan_layer/template_test_tooling/src/template_test.rs index b6cfd0b60..3c485dd8e 100644 --- a/dan_layer/template_test_tooling/src/template_test.rs +++ b/dan_layer/template_test_tooling/src/template_test.rs @@ -20,11 +20,11 @@ use tari_crypto::{ }; use tari_dan_common_types::crypto::create_key_pair_from_seed; use tari_dan_engine::{ - bootstrap_state, fees::{FeeModule, FeeTable}, runtime::{AuthParams, RuntimeModule}, state_store::{ memory::{MemoryStateStore, MemoryWriteTransaction}, + new_memory_store, AtomicDb, StateWriter, }, @@ -95,11 +95,11 @@ impl TemplateTest { let package = builder.build(); let test = Self::from_package(package); - test.bootstrap_faucet(100_000.into()); + test.bootstrap_state(100_000.into()); test } - pub fn from_package(package: Package) -> Self { + fn from_package(package: Package) -> Self { let secret_key = RistrettoSecretKey::from_hex("8a39567509bf2f7074e5fd153337405292cdc9f574947313b62fbf8fb4cffc02").unwrap(); @@ -116,13 +116,6 @@ impl TemplateTest { } } - let state_store = MemoryStateStore::default(); - { - let mut tx = state_store.write_access().unwrap(); - bootstrap_state(&mut tx).unwrap(); - tx.commit().unwrap(); - } - let mut virtual_substates = VirtualSubstates::new(); virtual_substates.insert(VirtualSubstateId::CurrentEpoch, VirtualSubstate::CurrentEpoch(0)); @@ -133,7 +126,7 @@ impl TemplateTest { secret_key, name_to_template, last_outputs: HashSet::new(), - state_store, + state_store: new_memory_store(), virtual_substates, enable_fees: false, fee_table: FeeTable { @@ -146,7 +139,7 @@ impl TemplateTest { } } - pub fn bootstrap_faucet(&self, amount: Amount) { + pub fn bootstrap_state(&self, amount: Amount) { let mut tx = self.state_store.write_access().unwrap(); Self::initial_tari_faucet_supply( &mut tx, @@ -480,7 +473,6 @@ impl TemplateTest { { let access = self.state_store.read_access().unwrap(); - transaction.filled_inputs_mut().extend( access .iter_raw() diff --git a/dan_layer/transaction/src/substate.rs b/dan_layer/transaction/src/substate.rs index 8d674831b..58ae6a553 100644 --- a/dan_layer/transaction/src/substate.rs +++ b/dan_layer/transaction/src/substate.rs @@ -27,9 +27,16 @@ impl SubstateRequirement { } } - pub fn with_version(address: SubstateId, version: u32) -> Self { + pub fn unversioned>(id: T) -> Self { Self { - substate_id: address, + substate_id: id.into(), + version: None, + } + } + + pub fn with_version>(id: T, version: u32) -> Self { + Self { + substate_id: id.into(), version: Some(version), } } diff --git a/dan_layer/wallet/sdk/src/apis/substate.rs b/dan_layer/wallet/sdk/src/apis/substate.rs index d30c4c5d1..ecf3f12f0 100644 --- a/dan_layer/wallet/sdk/src/apis/substate.rs +++ b/dan_layer/wallet/sdk/src/apis/substate.rs @@ -14,7 +14,7 @@ use tari_engine_types::{ transaction_receipt::TransactionReceiptAddress, TemplateAddress, }; -use tari_transaction::TransactionId; +use tari_transaction::{SubstateRequirement, TransactionId}; use crate::{ models::{SubstateModel, VersionedSubstateId}, @@ -79,7 +79,7 @@ where pub async fn locate_dependent_substates( &self, parents: &[SubstateId], - ) -> Result, SubstateApiError> { + ) -> Result, SubstateApiError> { let mut substate_addresses = HashMap::with_capacity(parents.len()); for parent_addr in parents { @@ -151,10 +151,7 @@ where Ok(substate_addresses .into_iter() - .map(|(address, version)| VersionedSubstateId { - substate_id: address, - version, - }) + .map(|(address, version)| SubstateRequirement::with_version(address, version)) .collect()) } diff --git a/dan_layer/wallet/sdk/src/apis/transaction.rs b/dan_layer/wallet/sdk/src/apis/transaction.rs index e24f66941..8f5fc3ab3 100644 --- a/dan_layer/wallet/sdk/src/apis/transaction.rs +++ b/dan_layer/wallet/sdk/src/apis/transaction.rs @@ -333,11 +333,22 @@ where } } - for (addr, substate) in rest { + for (id, substate) in rest { + if id.is_vault() { + if let Some(vault) = tx.vaults_get(id).optional()? { + // The vault for an account may have been mutated without mutating the account component + // If we know this vault, set it as a child of the account + tx.substates_upsert_child(transaction_id, vault.account_address, VersionedSubstateId { + substate_id: id.clone(), + version: substate.version(), + })?; + continue; + } + } tx.substates_upsert_root( transaction_id, VersionedSubstateId { - substate_id: addr.clone(), + substate_id: id.clone(), version: substate.version(), }, None, diff --git a/integration_tests/src/wallet_daemon_cli.rs b/integration_tests/src/wallet_daemon_cli.rs index 45fa27227..ab02e27a4 100644 --- a/integration_tests/src/wallet_daemon_cli.rs +++ b/integration_tests/src/wallet_daemon_cli.rs @@ -699,7 +699,7 @@ pub async fn confidential_transfer( resource_address: CONFIDENTIAL_TARI_RESOURCE_ADDRESS, proof_from_badge_resource: None, dry_run: false, - input_selection: ConfidentialTransferInputSelection::ConfidentialOnly, + input_selection: ConfidentialTransferInputSelection::PreferRevealed, output_to_revealed: false, }; diff --git a/integration_tests/tests/features/claim_fees.feature b/integration_tests/tests/features/claim_fees.feature.ignore similarity index 97% rename from integration_tests/tests/features/claim_fees.feature rename to integration_tests/tests/features/claim_fees.feature.ignore index fb183cc95..34d1c4448 100644 --- a/integration_tests/tests/features/claim_fees.feature +++ b/integration_tests/tests/features/claim_fees.feature.ignore @@ -1,5 +1,8 @@ # Copyright 2022 The Tari Project # SPDX-License-Identifier: BSD-3-Clause + +# TODO: This feature is currently ignored because the fee claiming needs to be reworked + @claim_fees Feature: Claim Fees diff --git a/integration_tests/tests/features/transfer.feature b/integration_tests/tests/features/transfer.feature index 16e234489..7e4f02752 100644 --- a/integration_tests/tests/features/transfer.feature +++ b/integration_tests/tests/features/transfer.feature @@ -56,14 +56,14 @@ Feature: Account transfers # Fund the sender account with faucet tokens When I print the cucumber world When I submit a transaction manifest via wallet daemon WALLET_D with inputs "FAUCET, ACCOUNT" named "TX1" - ``` - let faucet = global!["FAUCET/components/TestFaucet"]; - let mut acc1 = global!["ACCOUNT/components/Account"]; + ``` + let faucet = global!["FAUCET/components/TestFaucet"]; + let mut acc1 = global!["ACCOUNT/components/Account"]; - // get tokens from the faucet - let faucet_bucket = faucet.take_free_coins(); - acc1.deposit(faucet_bucket); - ``` + // get tokens from the faucet + let faucet_bucket = faucet.take_free_coins(); + acc1.deposit(faucet_bucket); + ``` # Wait for the wallet daemon account monitor to update the sender account information @@ -76,11 +76,11 @@ Feature: Account transfers # Check that ACC_2 component was created and has funds When I submit a transaction manifest via wallet daemon WALLET_D with inputs "FAUCET, TRANSFER" named "TX2" - ``` - let mut acc2 = global!["TRANSFER/components/Account"]; - let faucet_resource = global!["FAUCET/resources/0"]; - acc2.balance(faucet_resource); - ``` + ``` + let mut acc2 = global!["TRANSFER/components/Account"]; + let faucet_resource = global!["FAUCET/resources/0"]; + acc2.balance(faucet_resource); + ``` When I print the cucumber world @serial @@ -137,14 +137,14 @@ Feature: Account transfers # Fund the sender account with faucet tokens When I print the cucumber world When I submit a transaction manifest via wallet daemon WALLET_D with inputs "FAUCET, ACCOUNT_1" named "TX1" - ``` - let faucet = global!["FAUCET/components/TestFaucet"]; - let mut acc1 = global!["ACCOUNT_1/components/Account"]; + ``` + let faucet = global!["FAUCET/components/TestFaucet"]; + let mut acc1 = global!["ACCOUNT_1/components/Account"]; - // get tokens from the faucet - let faucet_bucket = faucet.take_free_coins(); - acc1.deposit(faucet_bucket); - ``` + // get tokens from the faucet + let faucet_bucket = faucet.take_free_coins(); + acc1.deposit(faucet_bucket); + ``` When I wait 3 seconds @@ -153,11 +153,11 @@ Feature: Account transfers # Check that ACCOUNT_2 component now has funds When I submit a transaction manifest via wallet daemon WALLET_D with inputs "FAUCET, ACCOUNT_2" named "TX2" - ``` - let mut acc2 = global!["ACCOUNT_2/components/Account"]; - let faucet_resource = global!["FAUCET/resources/0"]; - acc2.balance(faucet_resource); - ``` + ``` + let mut acc2 = global!["ACCOUNT_2/components/Account"]; + let faucet_resource = global!["FAUCET/resources/0"]; + acc2.balance(faucet_resource); + ``` When I print the cucumber world @serial diff --git a/networking/rpc_framework/src/client/mod.rs b/networking/rpc_framework/src/client/mod.rs index 9d241e12c..aac9e8c91 100644 --- a/networking/rpc_framework/src/client/mod.rs +++ b/networking/rpc_framework/src/client/mod.rs @@ -466,9 +466,9 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send _ = &mut self.shutdown_signal => { break; }, - Some(server_msg) = self.framed.next() => { + server_msg = self.framed.next() => { match server_msg { - Ok(msg) => { + Some(Ok(msg)) => { if let Err(err) = self.handle_interrupt_server_message(msg) { #[cfg(feature = "metrics")] metrics::handshake_errors(&self.peer_id, &self.protocol_id).inc(); @@ -476,10 +476,14 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send break; } }, - Err(err) => { + Some(Err(err)) => { debug!(target: LOG_TARGET, "(peer={}) IO Error: {}. Worker is terminating.", self.peer_id, err); break; }, + None => { + debug!(target: LOG_TARGET, "(peer={}) Substream closed. Worker is terminating.", self.peer_id); + break; + } } }, req = self.request_rx.recv() => { diff --git a/utilities/tariswap_test_bench/src/accounts.rs b/utilities/tariswap_test_bench/src/accounts.rs index 265db1a7c..2da601092 100644 --- a/utilities/tariswap_test_bench/src/accounts.rs +++ b/utilities/tariswap_test_bench/src/accounts.rs @@ -7,8 +7,13 @@ use tari_crypto::{keys::PublicKey as _, ristretto::RistrettoPublicKey}; use tari_dan_wallet_sdk::{apis::key_manager::TRANSACTION_BRANCH, models::Account}; use tari_engine_types::component::new_component_address_from_public_key; use tari_template_builtin::ACCOUNT_TEMPLATE_ADDRESS; -use tari_template_lib::{args, models::Amount}; -use tari_transaction::{Instruction, Transaction}; +use tari_template_lib::{ + args, + constants::{XTR, XTR_FAUCET_COMPONENT_ADDRESS, XTR_FAUCET_VAULT_ADDRESS}, + models::Amount, + resource::ResourceType, +}; +use tari_transaction::{SubstateRequirement, Transaction}; use crate::{faucet::Faucet, runner::Runner}; @@ -22,22 +27,31 @@ impl Runner { let transaction = Transaction::builder() .with_fee_instructions_builder(|builder| { builder - .add_instruction(Instruction::CreateFreeTestCoins { - revealed_amount: 1_000_000_000.into(), - output: None, - }) + .call_method(XTR_FAUCET_COMPONENT_ADDRESS, "take", args![Amount(1_000_000_000)]) .put_last_instruction_output_on_workspace("coins") .create_account_with_bucket(owner_public_key, "coins") .call_method(account_address, "pay_fee", args![Amount(1000)]) }) + .with_inputs([ + SubstateRequirement::unversioned(XTR_FAUCET_COMPONENT_ADDRESS), + SubstateRequirement::unversioned(XTR_FAUCET_VAULT_ADDRESS), + ]) .sign(&key.key) .build(); let finalize = self.submit_transaction_and_wait(transaction).await?; let diff = finalize.result.accept().unwrap(); let (account, _) = diff.up_iter().find(|(addr, _)| addr.is_component()).unwrap(); + let (vault, _) = diff.up_iter().find(|(addr, _)| addr.is_vault()).unwrap(); self.sdk.accounts_api().add_account(None, account, 0, true)?; + self.sdk.accounts_api().add_vault( + account.clone(), + vault.clone(), + XTR, + ResourceType::Confidential, + Some("XTR".to_string()), + )?; let account = self.sdk.accounts_api().get_account_by_address(account)?; Ok(account) @@ -65,7 +79,10 @@ impl Runner { for owner in &owners { builder = builder.create_account(RistrettoPublicKey::from_secret_key(&owner.key)); } - let transaction = builder.sign(&key.key).build(); + let transaction = builder + .with_inputs([SubstateRequirement::unversioned(pay_fee_account.address.clone())]) + .sign(&key.key) + .build(); let finalize = self.submit_transaction_and_wait(transaction).await?; let diff = finalize.result.accept().unwrap(); @@ -113,17 +130,20 @@ impl Runner { .call_method(account.address.as_component_address().unwrap(), "deposit", args![ Workspace("faucet") ]) - .add_instruction(Instruction::CreateFreeTestCoins { - revealed_amount: 1_000_000.into(), - output: None, - }) + .call_method(XTR_FAUCET_COMPONENT_ADDRESS, "take", args![Amount(1_000_000)]) .put_last_instruction_output_on_workspace("xtr") .call_method(account.address.as_component_address().unwrap(), "deposit", args![ Workspace("xtr") ]); } - let transaction = builder.sign(&key.key).build(); + let transaction = builder + .with_inputs([ + SubstateRequirement::unversioned(faucet.component_address), + SubstateRequirement::unversioned(faucet.resource_address), + ]) + .sign(&key.key) + .build(); self.submit_transaction_and_wait(transaction).await?; diff --git a/utilities/tariswap_test_bench/src/cli.rs b/utilities/tariswap_test_bench/src/cli.rs index 1874ca602..af79e98b5 100644 --- a/utilities/tariswap_test_bench/src/cli.rs +++ b/utilities/tariswap_test_bench/src/cli.rs @@ -4,6 +4,7 @@ use std::path::PathBuf; use clap::{Args, Parser, Subcommand}; +use tari_engine_types::TemplateAddress; use url::Url; #[derive(Parser, Debug)] @@ -35,6 +36,8 @@ pub struct CommonArgs { pub indexer_url: Url, #[clap(long, short = 'v', alias = "vn", default_value = "http://localhost:18200/json_rpc")] pub validator_node_url: Url, + #[clap(long, short = 't', alias = "faucet")] + pub faucet_template: Option, } #[derive(Subcommand, Debug)] diff --git a/utilities/tariswap_test_bench/src/faucet.rs b/utilities/tariswap_test_bench/src/faucet.rs index fdb8a79f0..78fdcf58a 100644 --- a/utilities/tariswap_test_bench/src/faucet.rs +++ b/utilities/tariswap_test_bench/src/faucet.rs @@ -21,7 +21,7 @@ impl Runner { let transaction = Transaction::builder() .fee_transaction_pay_from_component(in_account.address.as_component_address().unwrap(), Amount(1000)) - .call_function(self.faucet_template.address, "mint", args![Amount(1_000_000_000)]) + .call_function(self._faucet_template.address, "mint", args![Amount(1_000_000_000)]) .sign(&key.key) .build(); diff --git a/utilities/tariswap_test_bench/src/runner.rs b/utilities/tariswap_test_bench/src/runner.rs index 7c81e4558..c9301b54c 100644 --- a/utilities/tariswap_test_bench/src/runner.rs +++ b/utilities/tariswap_test_bench/src/runner.rs @@ -19,7 +19,7 @@ type WalletSdk = DanWalletSdk pub struct Runner { pub(crate) sdk: WalletSdk, pub(crate) _cli: CommonArgs, - pub(crate) faucet_template: TemplateMetadata, + pub(crate) _faucet_template: TemplateMetadata, pub(crate) tariswap_template: TemplateMetadata, pub(crate) stats: Stats, } @@ -27,11 +27,11 @@ pub struct Runner { impl Runner { pub async fn init(cli: CommonArgs) -> anyhow::Result { let sdk = initialize_wallet_sdk(&cli.db_path, cli.indexer_url.clone())?; - let (faucet_template, tariswap_template) = get_templates(&cli.validator_node_url).await?; + let (faucet_template, tariswap_template) = get_templates(&cli).await?; Ok(Self { sdk, _cli: cli, - faucet_template, + _faucet_template: faucet_template, tariswap_template, stats: Stats::default(), }) @@ -44,6 +44,7 @@ impl Runner { } pub async fn submit_transaction(&mut self, transaction: Transaction) -> anyhow::Result { + // TODO: remove the filled inputs here and allow consensus to figure out input versions let inputs = transaction .to_referenced_substates()? .into_iter() diff --git a/utilities/tariswap_test_bench/src/templates.rs b/utilities/tariswap_test_bench/src/templates.rs index 2f0596b3f..26287cfd9 100644 --- a/utilities/tariswap_test_bench/src/templates.rs +++ b/utilities/tariswap_test_bench/src/templates.rs @@ -2,24 +2,33 @@ // SPDX-License-Identifier: BSD-3-Clause use tari_validator_node_client::types::{GetTemplatesRequest, GetTemplatesResponse, TemplateMetadata}; -use url::Url; -pub async fn get_templates(vn_url: &Url) -> anyhow::Result<(TemplateMetadata, TemplateMetadata)> { - let mut client = tari_validator_node_client::ValidatorNodeClient::connect(vn_url.clone())?; +use crate::cli::CommonArgs; + +pub async fn get_templates(cli: &CommonArgs) -> anyhow::Result<(TemplateMetadata, TemplateMetadata)> { + let mut client = tari_validator_node_client::ValidatorNodeClient::connect(cli.validator_node_url.clone())?; let GetTemplatesResponse { templates } = client.get_active_templates(GetTemplatesRequest { limit: 100 }).await?; + let tariswap = if let Some(template_address) = cli.faucet_template { + templates + .iter() + .find(|t| t.address == template_address) + .ok_or(anyhow::anyhow!("Tariswap template not found"))? + .clone() + } else { + templates + .iter() + .find(|t| t.name == "TariSwapPool") + .ok_or(anyhow::anyhow!("Tariswap template not found"))? + .clone() + }; + let faucet = templates .iter() - .find(|t| t.name.to_ascii_lowercase().contains("faucet")) + .find(|t| t.name == "TestFaucet") .ok_or(anyhow::anyhow!("Faucet template not found"))? .clone(); - let tariswap = templates - .iter() - .find(|t| t.name.to_ascii_lowercase().contains("tariswap")) - .ok_or(anyhow::anyhow!("Tariswap template not found"))? - .clone(); - log::info!("Faucet template: {}", faucet.address); log::info!("Tariswap template: {}", tariswap.address); diff --git a/utilities/tariswap_test_bench/templates/faucet/Cargo.toml b/utilities/tariswap_test_bench/templates/faucet/Cargo.toml new file mode 100644 index 000000000..bf974d68d --- /dev/null +++ b/utilities/tariswap_test_bench/templates/faucet/Cargo.toml @@ -0,0 +1,20 @@ +[workspace] +[package] +name = "faucet" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tari_template_lib = { path = "../../../../dan_layer/template_lib" } + +[profile.release] +opt-level = 's' # Optimize for size. +lto = true # Enable Link Time Optimization. +codegen-units = 1 # Reduce number of codegen units to increase optimizations. +panic = 'abort' # Abort on panic. +strip = "debuginfo" # Strip debug info. + +[lib] +crate-type = ["cdylib", "lib"] \ No newline at end of file diff --git a/utilities/tariswap_test_bench/templates/faucet/src/lib.rs b/utilities/tariswap_test_bench/templates/faucet/src/lib.rs new file mode 100644 index 000000000..da5b4e92f --- /dev/null +++ b/utilities/tariswap_test_bench/templates/faucet/src/lib.rs @@ -0,0 +1,55 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use tari_template_lib::prelude::*; + +#[template] +mod template { + use super::*; + + pub struct TestFaucet { + vault: Vault, + } + + impl TestFaucet { + pub fn mint(initial_supply: Amount) -> Component { + let coins = ResourceBuilder::fungible() + .with_token_symbol("FAUCET") + .initial_supply(initial_supply); + + Component::new(Self { + vault: Vault::from_bucket(coins), + }) + .with_access_rules(AccessRules::allow_all()) + .create() + } + + pub fn take_free_coins(&self) -> Bucket { + self.take_amount_of_free_coins(Amount(1000)) + } + + pub fn take_amount_of_free_coins(&self, amount: Amount) -> Bucket { + debug!("Withdrawing {} coins from faucet", amount); + self.vault.withdraw(amount) + } + } +} diff --git a/utilities/transaction_generator/src/transaction_builders/free_coins.rs b/utilities/transaction_generator/src/transaction_builders/free_coins.rs index 764280fec..31cd32a56 100644 --- a/utilities/transaction_generator/src/transaction_builders/free_coins.rs +++ b/utilities/transaction_generator/src/transaction_builders/free_coins.rs @@ -5,8 +5,12 @@ use rand::rngs::OsRng; use tari_crypto::{keys::PublicKey, ristretto::RistrettoPublicKey}; use tari_engine_types::{component::new_component_address_from_public_key, instruction::Instruction}; use tari_template_builtin::ACCOUNT_TEMPLATE_ADDRESS; -use tari_template_lib::{args, models::Amount}; -use tari_transaction::Transaction; +use tari_template_lib::{ + args, + constants::{XTR_FAUCET_COMPONENT_ADDRESS, XTR_FAUCET_VAULT_ADDRESS}, + models::Amount, +}; +use tari_transaction::{SubstateRequirement, Transaction}; pub fn builder(_: u64) -> Transaction { let (signer_secret_key, signer_public_key) = RistrettoPublicKey::random_keypair(&mut OsRng); @@ -16,14 +20,19 @@ pub fn builder(_: u64) -> Transaction { Transaction::builder() .with_fee_instructions_builder(|builder| { builder - .add_instruction(Instruction::CreateFreeTestCoins { - revealed_amount: Amount::new(1000), - output: None, + .add_instruction(Instruction::CallMethod { + component_address: XTR_FAUCET_COMPONENT_ADDRESS, + method: "take".to_string(), + args: args![Amount(5000)], }) .put_last_instruction_output_on_workspace(b"free_coins") .create_account_with_bucket(signer_public_key, "free_coins") .call_method(account_address, "pay_fee", args![Amount(1000)]) }) + .with_inputs([ + SubstateRequirement::unversioned(XTR_FAUCET_COMPONENT_ADDRESS), + SubstateRequirement::unversioned(XTR_FAUCET_VAULT_ADDRESS), + ]) .sign(&signer_secret_key) .build() } diff --git a/utilities/transaction_submitter/Cargo.toml b/utilities/transaction_submitter/Cargo.toml index 129c3dd57..8d06a9a4b 100644 --- a/utilities/transaction_submitter/Cargo.toml +++ b/utilities/transaction_submitter/Cargo.toml @@ -12,6 +12,7 @@ license.workspace = true transaction_generator = { workspace = true } tari_validator_node_client = { workspace = true } tari_transaction = { workspace = true } +tari_dan_common_types = { workspace = true } anyhow = { workspace = true } # if we set clap version 4 in the workspace it would break other crates diff --git a/utilities/transaction_submitter/src/main.rs b/utilities/transaction_submitter/src/main.rs index 39dd4af13..c8a97889f 100644 --- a/utilities/transaction_submitter/src/main.rs +++ b/utilities/transaction_submitter/src/main.rs @@ -4,6 +4,7 @@ use std::{cmp, fs::File, io::Write, time::Duration}; use anyhow::bail; +use tari_dan_common_types::optional::Optional; use tari_transaction::TransactionId; use tari_validator_node_client::{ types::{GetTransactionResultRequest, SubmitTransactionRequest, SubmitTransactionResponse}, @@ -132,6 +133,7 @@ async fn stress_test(args: StressTestArgs) -> anyhow::Result, mut submitted_rx: mpsc::UnboundedReceiver, @@ -183,8 +185,9 @@ async fn fetch_result_summary( match client .get_transaction_result(GetTransactionResultRequest { transaction_id }) .await + .optional() { - Ok(result) => { + Ok(Some(result)) => { if let Some(ref exec_result) = result.result { let result = if let Some(diff) = exec_result.finalize.result.accept() { TxFinalized { @@ -210,6 +213,14 @@ async fn fetch_result_summary( sleep(Duration::from_secs(1)).await; } }, + Ok(None) => { + println!( + "Transaction result not found: {}. This is likely due to a race condition. Retrying \ + later...", + transaction_id + ); + sleep(Duration::from_secs(1)).await; + }, Err(e) => { println!("Failed to get transaction result: {}", e); results_tx