diff --git a/Cargo.lock b/Cargo.lock index b9565ad40..9981c2869 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1581,7 +1581,7 @@ dependencies = [ "fern", "futures 0.3.31", "humantime 2.1.0", - "itertools 0.11.0", + "itertools 0.13.0", "log", "rand", "serde", @@ -5771,7 +5771,7 @@ dependencies = [ [[package]] name = "minotari_app_grpc" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "argon2", "base64 0.13.1", @@ -5802,7 +5802,7 @@ dependencies = [ [[package]] name = "minotari_app_utilities" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "clap 3.2.25", "dialoguer 0.10.4", @@ -5824,7 +5824,7 @@ dependencies = [ [[package]] name = "minotari_console_wallet" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "blake2", "chrono", @@ -5881,7 +5881,7 @@ dependencies = [ [[package]] name = "minotari_ledger_wallet_common" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "bs58 0.5.1", ] @@ -5889,7 +5889,7 @@ dependencies = [ [[package]] name = "minotari_ledger_wallet_comms" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "borsh", "dialoguer 0.11.0", @@ -5912,7 +5912,7 @@ dependencies = [ [[package]] name = "minotari_node" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "anyhow", "async-trait", @@ -5960,7 +5960,7 @@ dependencies = [ [[package]] name = "minotari_node_grpc_client" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "minotari_app_grpc", ] @@ -5968,7 +5968,7 @@ dependencies = [ [[package]] name = "minotari_wallet" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "argon2", "async-trait", @@ -6019,7 +6019,7 @@ dependencies = [ [[package]] name = "minotari_wallet_grpc_client" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "minotari_app_grpc", "tari_common_types", @@ -9308,7 +9308,7 @@ dependencies = [ [[package]] name = "tari_common" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "anyhow", "cargo_toml", @@ -9334,7 +9334,7 @@ dependencies = [ [[package]] name = "tari_common_sqlite" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "diesel", "diesel_migrations", @@ -9348,7 +9348,7 @@ dependencies = [ [[package]] name = "tari_common_types" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "base64 0.21.7", "bitflags 2.6.0", @@ -9374,7 +9374,7 @@ dependencies = [ [[package]] name = "tari_comms" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "anyhow", "async-trait", @@ -9418,7 +9418,7 @@ dependencies = [ [[package]] name = "tari_comms_dht" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "anyhow", "bitflags 2.6.0", @@ -9453,7 +9453,7 @@ dependencies = [ [[package]] name = "tari_comms_rpc_macros" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "proc-macro2", "quote", @@ -9487,7 +9487,7 @@ dependencies = [ [[package]] name = "tari_contacts" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "chrono", "diesel", @@ -9520,7 +9520,7 @@ dependencies = [ [[package]] name = "tari_core" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "async-trait", "bincode 1.3.3", @@ -9735,6 +9735,7 @@ dependencies = [ "tari_dan_common_types", "tari_dan_storage", "tari_engine_types", + "tari_jellyfish", "tari_networking", "tari_template_lib", "tari_transaction", @@ -9945,6 +9946,7 @@ version = "0.7.0" dependencies = [ "base64 0.21.7", "blake2", + "borsh", "digest", "hex", "indexmap 2.6.0", @@ -9959,7 +9961,6 @@ dependencies = [ "tari_hashing", "tari_template_abi", "tari_template_lib", - "tari_utilities", "thiserror 1.0.68", "ts-rs", ] @@ -9988,7 +9989,7 @@ dependencies = [ [[package]] name = "tari_features" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" [[package]] name = "tari_generate" @@ -10010,7 +10011,7 @@ dependencies = [ [[package]] name = "tari_hashing" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "blake2", "borsh", @@ -10111,10 +10112,24 @@ dependencies = [ "tokio", ] +[[package]] +name = "tari_jellyfish" +version = "1.7.0-pre.3" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" +dependencies = [ + "borsh", + "digest", + "indexmap 2.6.0", + "serde", + "tari_crypto", + "tari_hashing", + "thiserror 2.0.3", +] + [[package]] name = "tari_key_manager" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "argon2", "async-trait", @@ -10147,7 +10162,7 @@ dependencies = [ [[package]] name = "tari_libtor" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "derivative", "libtor", @@ -10162,7 +10177,7 @@ dependencies = [ [[package]] name = "tari_max_size" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "borsh", "serde", @@ -10173,7 +10188,7 @@ dependencies = [ [[package]] name = "tari_metrics" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "once_cell", "prometheus", @@ -10183,7 +10198,7 @@ dependencies = [ [[package]] name = "tari_mmr" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "borsh", "digest", @@ -10213,7 +10228,7 @@ dependencies = [ [[package]] name = "tari_p2p" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "anyhow", "fs2", @@ -10317,7 +10332,7 @@ dependencies = [ [[package]] name = "tari_script" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "blake2", "borsh", @@ -10335,7 +10350,7 @@ dependencies = [ [[package]] name = "tari_service_framework" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "anyhow", "async-trait", @@ -10350,7 +10365,7 @@ dependencies = [ [[package]] name = "tari_shutdown" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "futures 0.3.31", ] @@ -10358,7 +10373,7 @@ dependencies = [ [[package]] name = "tari_sidechain" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "borsh", "log", @@ -10366,6 +10381,7 @@ dependencies = [ "tari_common_types", "tari_crypto", "tari_hashing", + "tari_jellyfish", "tari_utilities", "thiserror 2.0.3", ] @@ -10422,16 +10438,13 @@ dependencies = [ name = "tari_state_tree" version = "0.7.0" dependencies = [ - "blake2", "indexmap 2.6.0", - "itertools 0.11.0", "log", "serde", - "tari_bor", "tari_common_types", - "tari_crypto", "tari_dan_common_types", "tari_engine_types", + "tari_jellyfish", "tari_template_lib", "thiserror 1.0.68", ] @@ -10439,7 +10452,7 @@ dependencies = [ [[package]] name = "tari_storage" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "bincode 1.3.3", "lmdb-zero", @@ -10522,6 +10535,7 @@ dependencies = [ name = "tari_template_lib" version = "0.7.0" dependencies = [ + "borsh", "newtype-ops", "serde", "serde_json", @@ -10569,7 +10583,7 @@ dependencies = [ [[package]] name = "tari_test_utils" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#0e72c119054c835481f21d94926163dea0a1f73c" dependencies = [ "futures 0.3.31", "rand", diff --git a/Cargo.toml b/Cargo.toml index 482506f89..8150c0fb6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -122,6 +122,7 @@ tari_common = { git = "https://github.com/tari-project/tari.git", branch = "feat tari_common_types = { git = "https://github.com/tari-project/tari.git", branch = "feature-dan2" } tari_hashing = { git = "https://github.com/tari-project/tari.git", branch = "feature-dan2" } tari_sidechain = { git = "https://github.com/tari-project/tari.git", branch = "feature-dan2" } +tari_jellyfish = { git = "https://github.com/tari-project/tari.git", branch = "feature-dan2" } # avoid including default features so each crate can choose which ones to import tari_core = { git = "https://github.com/tari-project/tari.git", branch = "feature-dan2", default-features = false } @@ -184,7 +185,6 @@ humantime-serde = "1.1.1" include_dir = "0.7.2" indexmap = "2.6.0" indoc = "1.0.6" -itertools = "0.11.0" lazy_static = "1.4.0" # Use Tari's libp2p fork that adds support for Schnorr-Ristretto libp2p-identity = { git = "https://github.com/tari-project/rust-libp2p.git", rev = "3d918ccbf5ae1cbec0815a2156079b0fba4ba558" } @@ -279,29 +279,3 @@ overflow-checks = true #tari_hashing = { git = "https://github.com/account/tari.git", branch = "my-branch" } #tari_sidechain = { git = "https://github.com/account/tari.git", branch = "my-branch" } - -#[patch."https://github.com/tari-project/tari.git"] -#minotari_app_grpc = { path = "../tari/applications/minotari_app_grpc" } -#minotari_wallet_grpc_client = { path = "../tari/clients/rust/wallet_grpc_client" } -#minotari_node_grpc_client = { path = "../tari/clients/rust/base_node_grpc_client" } -#tari_common = { path = "../tari/common" } -#tari_common_types = { path = "../tari/base_layer/common_types" } -#tari_comms = { path = "../tari/comms/core" } -#tari_comms_rpc_macros = { path = "../tari/comms/rpc_macros" } -#tari_core = { path = "../tari/base_layer/core" } -#tari_key_manager = { path = "../tari/base_layer/key_manager" } -#tari_mmr = { path = "../tari/base_layer/mmr" } -#tari_p2p = { path = "../tari/base_layer/p2p" } -#tari_shutdown = { path = "../tari/infrastructure/shutdown" } -#tari_storage = { path = "../tari/infrastructure/storage" } -#tari_script = { path = "../tari/infrastructure/tari_script" } -#minotari_wallet = { path = "../tari/base_layer/wallet" } -#minotari_console_wallet = { path = "../tari/applications/minotari_console_wallet" } -#tari_service_framework = { path = "../tari/base_layer/service_framework" } -#tari_comms_dht = { path = "../tari/comms/dht" } -#minotari_app_utilities = { path = "../tari/applications/minotari_app_utilities" } -#minotari_node = { path = "../tari/applications/minotari_node" } -#tari_metrics = { path = "../tari/infrastructure/metrics" } -#tari_libtor = { path = "../tari/infrastructure/libtor" } -#tari_hashing = { path = "../tari/hashing" } -#tari_sidechain = { path = "../tari/base_layer/sidechain" } diff --git a/dan_layer/common_types/src/versioned_substate_id.rs b/dan_layer/common_types/src/versioned_substate_id.rs index 20fcdd3f3..6c7e526c6 100644 --- a/dan_layer/common_types/src/versioned_substate_id.rs +++ b/dan_layer/common_types/src/versioned_substate_id.rs @@ -3,6 +3,7 @@ use std::{borrow::Borrow, fmt::Display, str::FromStr}; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; use tari_engine_types::{serde_with, substate::SubstateId}; @@ -159,7 +160,7 @@ impl std::hash::Hash for SubstateRequirement { #[error("Failed to parse substate requirement {0}")] pub struct SubstateRequirementParseError(String); -#[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] +#[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize, BorshSerialize)] #[cfg_attr( feature = "ts", derive(ts_rs::TS), diff --git a/dan_layer/consensus/src/hotstuff/common.rs b/dan_layer/consensus/src/hotstuff/common.rs index cc9053c44..84ba9f363 100644 --- a/dan_layer/consensus/src/hotstuff/common.rs +++ b/dan_layer/consensus/src/hotstuff/common.rs @@ -36,7 +36,7 @@ use tari_dan_storage::{ StorageError, }; use tari_engine_types::substate::SubstateDiff; -use tari_state_tree::{Hash, JellyfishMerkleTree, StateTreeError}; +use tari_state_tree::{JellyfishMerkleTree, StateTreeError}; use crate::{ hotstuff::{ @@ -234,7 +234,7 @@ pub fn calculate_state_merkle_root<'a, TTx: StateStoreReadTransaction, I: IntoIt shard_group: ShardGroup, pending_tree_diffs: HashMap>, changes: I, -) -> Result<(Hash, IndexMap), StateTreeError> { +) -> Result<(FixedHash, IndexMap), StateTreeError> { let mut change_map = IndexMap::new(); changes.into_iter().for_each(|ch| { @@ -244,7 +244,10 @@ pub fn calculate_state_merkle_root<'a, TTx: StateStoreReadTransaction, I: IntoIt let mut sharded_tree = ShardedStateTree::new(tx).with_pending_diffs(pending_tree_diffs); let root_hash = sharded_tree.put_substate_tree_changes(shard_group, change_map)?; - Ok((root_hash, sharded_tree.into_shard_tree_diffs())) + Ok(( + FixedHash::new(root_hash.into_array()), + sharded_tree.into_shard_tree_diffs(), + )) } pub(crate) fn create_epoch_checkpoint( diff --git a/dan_layer/consensus/src/hotstuff/eviction_proof.rs b/dan_layer/consensus/src/hotstuff/eviction_proof.rs index 629c298ce..d5c384250 100644 --- a/dan_layer/consensus/src/hotstuff/eviction_proof.rs +++ b/dan_layer/consensus/src/hotstuff/eviction_proof.rs @@ -28,7 +28,7 @@ pub fn generate_eviction_proofs( ) -> Result, HotStuffError> { let num_evictions = committed_blocks_with_evictions .iter() - .map(|b| b.all_evict_nodes().count()) + .map(|b| b.all_node_evictions().count()) .sum(); let mut proofs = Vec::with_capacity(num_evictions); @@ -36,11 +36,14 @@ pub fn generate_eviction_proofs( // First generate a commit proof for the block which is shared by all EvictionProofs let block_commit_proof = generate_block_commit_proof(tx, tip_qc, block)?; - for atom in block.all_evict_nodes() { + for (idx, command) in block.commands().iter().enumerate() { + let Some(atom) = command.evict_node() else { + continue; + }; info!(target: LOG_TARGET, "🦶 Generating eviction proof for validator: {atom}"); - // TODO: command inclusion proof + let inclusion_proof = block.compute_command_inclusion_proof(idx)?; let atom = EvictNodeAtom::new(atom.public_key.clone()); - let commit_command_proof = CommandCommitProof::new(atom, block_commit_proof.clone()); + let commit_command_proof = CommandCommitProof::new(atom, block_commit_proof.clone(), inclusion_proof); let proof = EvictionProof::new(commit_command_proof); proofs.push(proof); } diff --git a/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs b/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs index dc57ca8ab..7d326ce1c 100644 --- a/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs +++ b/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs @@ -160,7 +160,7 @@ where TConsensusSpec: ConsensusSpec if commit_block.is_epoch_end() { end_of_epoch = Some(commit_block.epoch()); } - if commit_block.all_evict_nodes().next().is_some() { + if commit_block.all_node_evictions().next().is_some() { committed_blocks_with_evictions.push(commit_block); } if !committed.is_empty() { @@ -1751,7 +1751,7 @@ where TConsensusSpec: ConsensusSpec atom.delete(tx)?; } - for atom in block.all_evict_nodes() { + for atom in block.all_node_evictions() { atom.mark_as_committed_in_epoch(tx, block.epoch())?; } diff --git a/dan_layer/consensus/src/hotstuff/substate_store/sharded_state_tree.rs b/dan_layer/consensus/src/hotstuff/substate_store/sharded_state_tree.rs index 42c7cb74f..276324506 100644 --- a/dan_layer/consensus/src/hotstuff/substate_store/sharded_state_tree.rs +++ b/dan_layer/consensus/src/hotstuff/substate_store/sharded_state_tree.rs @@ -13,7 +13,6 @@ use tari_dan_storage::{ }; use tari_state_tree::{ memory_store::MemoryTreeStore, - Hash, JmtStorageError, RootStateTree, SpreadPrefixStateTree, @@ -21,6 +20,7 @@ use tari_state_tree::{ StateHashTreeDiff, StateTreeError, SubstateTreeChange, + TreeHash, TreeStoreWriter, Version, SPARSE_MERKLE_PLACEHOLDER_HASH, @@ -84,7 +84,7 @@ impl ShardedStateTree<&TTx> { &mut self, shard_group: ShardGroup, changes: IndexMap>, - ) -> Result { + ) -> Result { let mut shard_state_roots = HashMap::with_capacity(changes.len()); for (shard, changes) in changes { let current_version = self.get_current_version(shard)?; @@ -113,7 +113,7 @@ impl ShardedStateTree<&TTx> { let mut state_tree = SpreadPrefixStateTree::new(&mut store); debug!(target: LOG_TARGET, "v{next_version} contains {} tree change(s) for shard {shard}", changes.len()); let shard_state_hash = state_tree.put_substate_changes(current_version, next_version, changes)?; - shard_state_roots.insert(shard, shard_state_hash); + shard_state_roots.insert(shard, TreeHash::from(shard_state_hash.into_array())); self.shard_tree_diffs .insert(shard, VersionedStateHashTreeDiff::new(next_version, store.into_diff())); } @@ -125,8 +125,8 @@ impl ShardedStateTree<&TTx> { fn get_shard_group_root( &self, shard_group: ShardGroup, - mut shard_state_roots: HashMap, - ) -> Result { + mut shard_state_roots: HashMap, + ) -> Result { let mut mem_store = MemoryTreeStore::new(); let mut root_tree = RootStateTree::new(&mut mem_store); let mut hashes = Vec::with_capacity(shard_group.len()); @@ -143,7 +143,7 @@ impl ShardedStateTree<&TTx> { Ok(hash) } - fn get_state_root_for_shard(&self, shard: Shard) -> Result { + fn get_state_root_for_shard(&self, shard: Shard) -> Result { let Some(version) = self.get_current_version(shard)? else { // At v0 there have been no state changes return Ok(SPARSE_MERKLE_PLACEHOLDER_HASH); diff --git a/dan_layer/consensus/src/hotstuff/worker.rs b/dan_layer/consensus/src/hotstuff/worker.rs index 9614223f5..5e83650cf 100644 --- a/dan_layer/consensus/src/hotstuff/worker.rs +++ b/dan_layer/consensus/src/hotstuff/worker.rs @@ -7,6 +7,7 @@ use std::{ }; use log::*; +use tari_common_types::types::FixedHash; use tari_dan_common_types::{ committee::{Committee, CommitteeInfo}, optional::Optional, @@ -30,6 +31,7 @@ use tari_dan_storage::{ }; use tari_epoch_manager::{EpochManagerEvent, EpochManagerReader}; use tari_shutdown::ShutdownSignal; +use tari_state_tree::SPARSE_MERKLE_PLACEHOLDER_HASH; use tari_transaction::{Transaction, TransactionId}; use tokio::sync::{broadcast, mpsc}; @@ -940,7 +942,7 @@ impl HotstuffWorker { let state_merkle_root = checkpoint .map(|cp| cp.compute_state_merkle_root()) .transpose()? - .unwrap_or_default(); + .unwrap_or(SPARSE_MERKLE_PLACEHOLDER_HASH); // The parent for genesis blocks refer to this zero block let mut zero_block = Block::zero_block(self.config.network, self.config.consensus_constants.num_preshards); if !zero_block.exists(&**tx)? { @@ -955,7 +957,7 @@ impl HotstuffWorker { self.config.network, epoch, shard_group, - state_merkle_root, + FixedHash::from(state_merkle_root.into_array()), self.config.sidechain_id.clone(), ); if !genesis.exists(&**tx)? { diff --git a/dan_layer/consensus_tests/Cargo.toml b/dan_layer/consensus_tests/Cargo.toml index f44564edb..a260ae958 100644 --- a/dan_layer/consensus_tests/Cargo.toml +++ b/dan_layer/consensus_tests/Cargo.toml @@ -36,5 +36,5 @@ rand = { workspace = true } futures = { workspace = true } fern = { workspace = true } humantime = { workspace = true } -itertools = { workspace = true } +itertools = "0.13.0" serde_json = { workspace = true } diff --git a/dan_layer/engine_types/Cargo.toml b/dan_layer/engine_types/Cargo.toml index 3917f3632..040f2edd5 100644 --- a/dan_layer/engine_types/Cargo.toml +++ b/dan_layer/engine_types/Cargo.toml @@ -14,12 +14,12 @@ tari_common_types = { workspace = true } tari_crypto = { workspace = true, features = ["borsh"] } tari_hashing = { workspace = true } tari_template_abi = { workspace = true, features = ["std"] } -tari_template_lib = { workspace = true } -tari_utilities = { workspace = true } +tari_template_lib = { workspace = true, features = ["borsh"] } # if we set this version in the workspace it would break other crates base64 = "0.21.0" blake2 = { workspace = true } +borsh = { workspace = true } rand = { workspace = true } digest = { workspace = true } hex = { workspace = true, features = ["serde"] } @@ -32,7 +32,7 @@ ts-rs = { workspace = true, optional = true } [features] default = [] -ts = ["ts-rs"] +ts = ["ts-rs", "tari_template_lib/ts", "tari_template_abi/ts"] # This feature is used to temporarily fix the issue with the ts-rs crate. Because when we run cargo test --all-feature # it will trigger the ts files generation. But there are some problems that are fixed during the npm run build. But # not on cargo test. diff --git a/dan_layer/engine_types/src/confidential/elgamal.rs b/dan_layer/engine_types/src/confidential/elgamal.rs index f739a1e0a..a2bc1dd7d 100644 --- a/dan_layer/engine_types/src/confidential/elgamal.rs +++ b/dan_layer/engine_types/src/confidential/elgamal.rs @@ -5,8 +5,7 @@ use std::convert; use tari_bor::{Deserialize, Serialize}; use tari_common_types::types::{PrivateKey, PublicKey}; -use tari_crypto::keys::PublicKey as _; -use tari_utilities::ByteArray; +use tari_crypto::{keys::PublicKey as _, tari_utilities::ByteArray}; use crate::confidential::value_lookup_table::ValueLookupTable; diff --git a/dan_layer/engine_types/src/fee_claim.rs b/dan_layer/engine_types/src/fee_claim.rs index 79649cfe2..429350834 100644 --- a/dan_layer/engine_types/src/fee_claim.rs +++ b/dan_layer/engine_types/src/fee_claim.rs @@ -66,6 +66,12 @@ impl FromStr for FeeClaimAddress { } } +impl borsh::BorshSerialize for FeeClaimAddress { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + borsh::BorshSerialize::serialize(self.as_object_key().array(), writer) + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub struct FeeClaim { diff --git a/dan_layer/engine_types/src/instruction.rs b/dan_layer/engine_types/src/instruction.rs index 1af7ebc6c..e8ed1f79d 100644 --- a/dan_layer/engine_types/src/instruction.rs +++ b/dan_layer/engine_types/src/instruction.rs @@ -5,7 +5,6 @@ use std::fmt::{Display, Formatter}; use serde::{Deserialize, Serialize}; use tari_common_types::types::PublicKey; -use tari_crypto::tari_utilities::hex::Hex; use tari_template_lib::{ args::{Arg, LogLevel}, auth::OwnerRule, @@ -122,9 +121,9 @@ impl Display for Instruction { f, "ClaimBurn {{ commitment_address: {}, proof_of_knowledge: nonce({}), u({}) v({}) }}", claim.output_address, - claim.proof_of_knowledge.public_nonce().to_hex(), - claim.proof_of_knowledge.u().to_hex(), - claim.proof_of_knowledge.v().to_hex() + claim.proof_of_knowledge.public_nonce().as_public_key(), + claim.proof_of_knowledge.u().reveal(), + claim.proof_of_knowledge.v().reveal(), ) }, Self::ClaimValidatorFees { diff --git a/dan_layer/engine_types/src/serde_with/hex.rs b/dan_layer/engine_types/src/serde_with/hex.rs index b14272647..6e4c3da2b 100644 --- a/dan_layer/engine_types/src/serde_with/hex.rs +++ b/dan_layer/engine_types/src/serde_with/hex.rs @@ -21,11 +21,10 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use serde::{Deserialize, Deserializer, Serializer}; -use tari_utilities::hex::{from_hex, to_hex}; pub fn serialize>(v: &T, s: S) -> Result { if s.is_human_readable() { - let st = to_hex(v.as_ref()); + let st = hex::encode(v.as_ref()); s.serialize_str(&st) } else { s.serialize_bytes(v.as_ref()) @@ -40,7 +39,7 @@ where { let bytes = if d.is_human_readable() { let hex = ::deserialize(d)?; - from_hex(&hex).map_err(serde::de::Error::custom)? + hex::decode(&hex).map_err(serde::de::Error::custom)? } else { as Deserialize>::deserialize(d)? }; @@ -58,7 +57,7 @@ pub mod vec { if s.is_human_readable() { let mut seq = s.serialize_seq(Some(v.len()))?; for item in v { - seq.serialize_element(&to_hex(item.as_ref()))?; + seq.serialize_element(&hex::encode(item.as_ref()))?; } seq.end() } else { @@ -78,7 +77,7 @@ pub mod vec { let vec = if d.is_human_readable() { let strs = as Deserialize>::deserialize(d)?; strs.iter() - .map(|s| from_hex(s).map_err(serde::de::Error::custom)) + .map(|s| hex::decode(s).map_err(serde::de::Error::custom)) .collect::, _>>()? } else { > as Deserialize>::deserialize(d)? @@ -99,7 +98,7 @@ pub mod option { if s.is_human_readable() { match v { Some(v) => { - let st = to_hex(v.as_ref()); + let st = hex::encode(v.as_ref()); s.serialize_some(&st) }, None => s.serialize_none(), @@ -120,7 +119,7 @@ pub mod option { let bytes = if d.is_human_readable() { let hex = as Deserialize>::deserialize(d)?; hex.as_ref() - .map(|s| from_hex(s)) + .map(hex::decode) .transpose() .map_err(serde::de::Error::custom)? } else { diff --git a/dan_layer/engine_types/src/substate.rs b/dan_layer/engine_types/src/substate.rs index 040e1cb21..038c0ed5f 100644 --- a/dan_layer/engine_types/src/substate.rs +++ b/dan_layer/engine_types/src/substate.rs @@ -25,6 +25,7 @@ use std::{ str::FromStr, }; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; use tari_bor::{decode, decode_exact, encode, BorError}; use tari_common_types::types::FixedHash; @@ -41,8 +42,6 @@ use tari_template_lib::{ prelude::PUBLIC_IDENTITY_RESOURCE_ADDRESS, Hash, }; -#[cfg(feature = "ts")] -use ts_rs::TS; use crate::{ component::ComponentHeader, @@ -59,7 +58,11 @@ use crate::{ }; #[derive(Debug, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub struct Substate { substate: SubstateValue, version: u32, @@ -108,8 +111,12 @@ pub fn hash_substate(substate: &SubstateValue, version: u32) -> FixedHash { } /// Base object address, version tuples -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, BorshSerialize)] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub enum SubstateId { Component(#[serde(with = "serde_with::string")] ComponentAddress), Resource(#[serde(with = "serde_with::string")] ResourceAddress), @@ -442,7 +449,11 @@ impl_partial_eq!(FeeClaimAddress, FeeClaim); impl_partial_eq!(PublishedTemplateAddress, Template); #[derive(Debug, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub enum SubstateValue { Component(ComponentHeader), Resource(Resource), @@ -698,7 +709,11 @@ impl Display for SubstateValue { } #[derive(Debug, Clone, Default, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub struct SubstateDiff { up_substates: Vec<(SubstateId, Substate)>, down_substates: Vec<(SubstateId, u32)>, diff --git a/dan_layer/engine_types/src/transaction_receipt.rs b/dan_layer/engine_types/src/transaction_receipt.rs index 23d8b3d12..b12df887c 100644 --- a/dan_layer/engine_types/src/transaction_receipt.rs +++ b/dan_layer/engine_types/src/transaction_receipt.rs @@ -60,6 +60,12 @@ impl FromStr for TransactionReceiptAddress { } } +impl borsh::BorshSerialize for TransactionReceiptAddress { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + borsh::BorshSerialize::serialize(self.as_object_key().array(), writer) + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub struct TransactionReceipt { diff --git a/dan_layer/p2p/Cargo.toml b/dan_layer/p2p/Cargo.toml index 766dc215a..690ade552 100644 --- a/dan_layer/p2p/Cargo.toml +++ b/dan_layer/p2p/Cargo.toml @@ -18,6 +18,7 @@ tari_engine_types = { workspace = true } tari_networking = { workspace = true } tari_template_lib = { workspace = true } tari_transaction = { workspace = true } +tari_jellyfish = { workspace = true } anyhow = { workspace = true } serde = { workspace = true, default-features = true } diff --git a/dan_layer/p2p/src/conversions/rpc.rs b/dan_layer/p2p/src/conversions/rpc.rs index aee2f0405..097b1494b 100644 --- a/dan_layer/p2p/src/conversions/rpc.rs +++ b/dan_layer/p2p/src/conversions/rpc.rs @@ -4,7 +4,6 @@ use std::convert::{TryFrom, TryInto}; use anyhow::anyhow; -use tari_common_types::types::FixedHash; use tari_dan_common_types::{shard::Shard, Epoch}; use tari_dan_storage::consensus_models::{ EpochCheckpoint, @@ -16,6 +15,7 @@ use tari_dan_storage::consensus_models::{ SubstateUpdate, }; use tari_engine_types::substate::{SubstateId, SubstateValue}; +use tari_jellyfish::TreeHash; use crate::proto; @@ -183,7 +183,7 @@ impl TryFrom for EpochCheckpoint { let shard_roots = value .shard_roots .into_iter() - .map(|(k, v)| FixedHash::try_from(v).map(|h| (Shard::from(k), h))) + .map(|(k, v)| TreeHash::try_from_bytes(&v).map(|h| (Shard::from(k), h))) .collect::>()?; Ok(Self::new( diff --git a/dan_layer/rpc_state_sync/src/error.rs b/dan_layer/rpc_state_sync/src/error.rs index 27173b24f..7c69efb40 100644 --- a/dan_layer/rpc_state_sync/src/error.rs +++ b/dan_layer/rpc_state_sync/src/error.rs @@ -8,7 +8,7 @@ use tari_dan_storage::{ }; use tari_epoch_manager::EpochManagerError; use tari_rpc_framework::{RpcError, RpcStatus}; -use tari_state_tree::{Hash, JmtStorageError}; +use tari_state_tree::{JmtStorageError, TreeHash}; use tari_validator_node_rpc::ValidatorNodeRpcClientError; #[derive(Debug, thiserror::Error)] @@ -34,7 +34,7 @@ pub enum CommsRpcConsensusSyncError { #[error("State tree error: {0}")] StateTreeError(#[from] tari_state_tree::StateTreeError), #[error("State root mismatch. Expected: {expected}, actual: {actual}")] - StateRootMismatch { expected: Hash, actual: Hash }, + StateRootMismatch { expected: TreeHash, actual: TreeHash }, } impl CommsRpcConsensusSyncError { diff --git a/dan_layer/rpc_state_sync/src/manager.rs b/dan_layer/rpc_state_sync/src/manager.rs index 21e16773d..bf94441cd 100644 --- a/dan_layer/rpc_state_sync/src/manager.rs +++ b/dan_layer/rpc_state_sync/src/manager.rs @@ -42,7 +42,7 @@ use tari_dan_storage::{ use tari_engine_types::substate::hash_substate; use tari_epoch_manager::EpochManagerReader; use tari_rpc_framework::RpcError; -use tari_state_tree::{Hash, SpreadPrefixStateTree, SubstateTreeChange, Version, SPARSE_MERKLE_PLACEHOLDER_HASH}; +use tari_state_tree::{SpreadPrefixStateTree, SubstateTreeChange, TreeHash, Version, SPARSE_MERKLE_PLACEHOLDER_HASH}; use tari_validator_node_rpc::{ client::{TariValidatorNodeRpcClientFactory, ValidatorNodeClientFactory}, rpc_service::ValidatorNodeRpcClient, @@ -249,7 +249,7 @@ where TConsensusSpec: ConsensusSpec &self, shard: Shard, version: Option, - ) -> Result { + ) -> Result { let Some(version) = version else { return Ok(SPARSE_MERKLE_PLACEHOLDER_HASH); }; @@ -457,7 +457,7 @@ where TConsensusSpec: ConsensusSpec + Send + Sync + 'static actual = state_root, ); last_error = Some(CommsRpcConsensusSyncError::StateRootMismatch { - expected: *checkpoint.block().state_merkle_root(), + expected: TreeHash::from(checkpoint.block().state_merkle_root().into_array()), actual: state_root, }); // TODO: rollback state diff --git a/dan_layer/state_tree/Cargo.toml b/dan_layer/state_tree/Cargo.toml index 5423c461f..f2e44f379 100644 --- a/dan_layer/state_tree/Cargo.toml +++ b/dan_layer/state_tree/Cargo.toml @@ -7,19 +7,15 @@ repository.workspace = true license.workspace = true [dependencies] +tari_common_types = { workspace = true } tari_dan_common_types = { workspace = true } tari_engine_types = { workspace = true } +tari_jellyfish = { workspace = true } tari_template_lib = { workspace = true } -tari_common_types = { workspace = true } -tari_crypto = { workspace = true } -tari_bor = { workspace = true } -blake2 = { workspace = true } thiserror = { workspace = true } serde = { workspace = true, features = ["derive"] } log = { workspace = true } -indexmap = { workspace = true, features = ["serde"] } [dev-dependencies] -indexmap = { workspace = true } -itertools = { workspace = true } \ No newline at end of file +indexmap = { workspace = true } \ No newline at end of file diff --git a/dan_layer/state_tree/src/bit_iter.rs b/dan_layer/state_tree/src/bit_iter.rs deleted file mode 100644 index bd77b2b89..000000000 --- a/dan_layer/state_tree/src/bit_iter.rs +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2024 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use std::ops::Range; - -/// An iterator over a hash value that generates one bit for each iteration. -pub struct BitIterator<'a> { - /// The reference to the bytes that represent the `HashValue`. - bytes: &'a [u8], - pos: Range, - // invariant hash_bytes.len() == HashValue::LENGTH; - // invariant pos.end == hash_bytes.len() * 8; -} - -impl<'a> BitIterator<'a> { - /// Constructs a new `BitIterator` using given `HashValue`. - pub fn new(bytes: &'a [u8]) -> Self { - BitIterator { - bytes, - pos: 0..bytes.len() * 8, - } - } - - /// Returns the `index`-th bit in the bytes. - fn get_bit(&self, index: usize) -> bool { - // MIRAI annotations - important? - // assume!(index < self.pos.end); // assumed precondition - // assume!(self.hash_bytes.len() == 32); // invariant - // assume!(self.pos.end == self.hash_bytes.len() * 8); // invariant - let pos = index / 8; - let bit = 7 - index % 8; - (self.bytes[pos] >> bit) & 1 != 0 - } -} - -impl<'a> Iterator for BitIterator<'a> { - type Item = bool; - - fn next(&mut self) -> Option { - self.pos.next().map(|x| self.get_bit(x)) - } - - fn size_hint(&self) -> (usize, Option) { - self.pos.size_hint() - } -} - -impl<'a> DoubleEndedIterator for BitIterator<'a> { - fn next_back(&mut self) -> Option { - self.pos.next_back().map(|x| self.get_bit(x)) - } -} - -impl<'a> ExactSizeIterator for BitIterator<'a> {} diff --git a/dan_layer/state_tree/src/error.rs b/dan_layer/state_tree/src/error.rs index e911e256c..f04f8bcde 100644 --- a/dan_layer/state_tree/src/error.rs +++ b/dan_layer/state_tree/src/error.rs @@ -2,8 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause use tari_dan_common_types::optional::IsNotFoundError; - -use crate::jellyfish::JmtStorageError; +use tari_jellyfish::JmtStorageError; #[derive(Debug, thiserror::Error)] pub enum StateTreeError { @@ -13,9 +12,6 @@ pub enum StateTreeError { impl IsNotFoundError for StateTreeError { fn is_not_found_error(&self) -> bool { - #[allow(clippy::single_match)] - match self { - StateTreeError::JmtStorageError(err) => err.is_not_found_error(), - } + matches!(self, StateTreeError::JmtStorageError(JmtStorageError::NotFound(_))) } } diff --git a/dan_layer/state_tree/src/jellyfish/error.rs b/dan_layer/state_tree/src/jellyfish/error.rs deleted file mode 100644 index 7f1f53011..000000000 --- a/dan_layer/state_tree/src/jellyfish/error.rs +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2024 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use crate::{Hash, LeafKey}; - -#[derive(Debug, thiserror::Error)] -pub enum JmtProofVerifyError { - #[error("Sparse Merkle Tree proof has more than 256 ({num_siblings}) siblings.")] - TooManySiblings { num_siblings: usize }, - #[error("Keys do not match. Key in proof: {actual_key}. Expected key: {expected_key}.")] - KeyMismatch { actual_key: LeafKey, expected_key: LeafKey }, - #[error("Value hashes do not match. Value hash in proof: {actual}. Expected value hash: {expected}.")] - ValueMismatch { actual: Hash, expected: Hash }, - #[error("Expected inclusion proof. Found non-inclusion proof.")] - ExpectedInclusionProof, - #[error("Expected non-inclusion proof, but key exists in proof.")] - ExpectedNonInclusionProof, - #[error( - "Key would not have ended up in the subtree where the provided key in proof is the only existing key, if it \ - existed. So this is not a valid non-inclusion proof." - )] - InvalidNonInclusionProof, - #[error( - "Root hashes do not match. Actual root hash: {actual_root_hash}. Expected root hash: {expected_root_hash}." - )] - RootHashMismatch { - actual_root_hash: Hash, - expected_root_hash: Hash, - }, -} diff --git a/dan_layer/state_tree/src/jellyfish/hash.rs b/dan_layer/state_tree/src/jellyfish/hash.rs deleted file mode 100644 index 823dbb6e5..000000000 --- a/dan_layer/state_tree/src/jellyfish/hash.rs +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2024 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -// Copyright 2022. The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use std::{ - convert::TryFrom, - fmt::{Display, Formatter}, - ops::{Deref, DerefMut}, -}; - -const ZERO_HASH: [u8; TreeHash::byte_size()] = [0u8; TreeHash::byte_size()]; - -#[derive(thiserror::Error, Debug)] -#[error("Invalid size")] -pub struct TreeHashSizeError; - -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Default, Hash)] -pub struct TreeHash([u8; TreeHash::byte_size()]); - -impl TreeHash { - pub const fn new(hash: [u8; TreeHash::byte_size()]) -> Self { - Self(hash) - } - - pub const fn byte_size() -> usize { - 32 - } - - pub const fn zero() -> Self { - Self(ZERO_HASH) - } - - pub fn as_slice(&self) -> &[u8] { - &self.0 - } -} - -impl From<[u8; TreeHash::byte_size()]> for TreeHash { - fn from(hash: [u8; TreeHash::byte_size()]) -> Self { - Self(hash) - } -} - -impl TryFrom> for TreeHash { - type Error = TreeHashSizeError; - - fn try_from(value: Vec) -> Result { - TryFrom::try_from(value.as_slice()) - } -} - -impl TryFrom<&[u8]> for TreeHash { - type Error = TreeHashSizeError; - - fn try_from(bytes: &[u8]) -> Result { - if bytes.len() != TreeHash::byte_size() { - return Err(TreeHashSizeError); - } - - let mut buf = [0u8; TreeHash::byte_size()]; - buf.copy_from_slice(bytes); - Ok(Self(buf)) - } -} - -impl PartialEq<[u8]> for TreeHash { - fn eq(&self, other: &[u8]) -> bool { - self.0[..].eq(other) - } -} - -impl PartialEq for [u8] { - fn eq(&self, other: &TreeHash) -> bool { - self[..].eq(&other.0) - } -} - -impl PartialEq> for TreeHash { - fn eq(&self, other: &Vec) -> bool { - self == other.as_slice() - } -} -impl PartialEq for Vec { - fn eq(&self, other: &TreeHash) -> bool { - self == other.as_slice() - } -} - -impl AsRef<[u8]> for TreeHash { - fn as_ref(&self) -> &[u8] { - self.as_slice() - } -} - -impl Deref for TreeHash { - type Target = [u8; TreeHash::byte_size()]; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for TreeHash { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl Display for TreeHash { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - hex::encode(&self.0).fmt(f) - } -} diff --git a/dan_layer/state_tree/src/jellyfish/mod.rs b/dan_layer/state_tree/src/jellyfish/mod.rs deleted file mode 100644 index 54b5754c4..000000000 --- a/dan_layer/state_tree/src/jellyfish/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2024 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -// mod hash; -// pub use hash::*; - -mod tree; -pub use tree::*; - -mod types; -pub use types::*; - -mod error; -mod store; - -pub use store::*; diff --git a/dan_layer/state_tree/src/jellyfish/store.rs b/dan_layer/state_tree/src/jellyfish/store.rs deleted file mode 100644 index 7fa23663c..000000000 --- a/dan_layer/state_tree/src/jellyfish/store.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2024 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use serde::{Deserialize, Serialize}; - -use crate::jellyfish::{JmtStorageError, Node, NodeKey}; - -/// Implementers are able to read nodes from a tree store. -pub trait TreeStoreReader

{ - /// Gets node by key, if it exists. - fn get_node(&self, key: &NodeKey) -> Result, JmtStorageError>; -} - -/// Implementers are able to insert nodes to a tree store. -pub trait TreeStoreWriter

{ - /// Inserts the node under a new, unique key (i.e. never an update). - fn insert_node(&mut self, key: NodeKey, node: Node

) -> Result<(), JmtStorageError>; - - /// Marks the given tree part for a (potential) future removal by an arbitrary external pruning - /// process. - fn record_stale_tree_node(&mut self, part: StaleTreeNode) -> Result<(), JmtStorageError>; -} - -/// Implementers are able to read and write nodes to a tree store. -pub trait TreeStore

: TreeStoreReader

+ TreeStoreWriter

{} -impl + TreeStoreWriter

> TreeStore

for S {} - -/// A part of a tree that may become stale (i.e. need eventual pruning). -#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] -pub enum StaleTreeNode { - /// A single node to be removed. - Node(NodeKey), - /// An entire subtree of descendants of a specific node (including itself). - Subtree(NodeKey), -} - -impl StaleTreeNode { - pub fn into_node_key(self) -> NodeKey { - match self { - Self::Node(key) | Self::Subtree(key) => key, - } - } - - pub fn as_node_key(&self) -> &NodeKey { - match self { - Self::Node(key) | Self::Subtree(key) => key, - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum TreeNode

{ - V1(Node

), -} - -impl

TreeNode

{ - pub fn new_latest(node: Node

) -> Self { - Self::new_v1(node) - } - - pub fn new_v1(node: Node

) -> Self { - Self::V1(node) - } - - pub fn as_node(&self) -> &Node

{ - match self { - Self::V1(node) => node, - } - } - - pub fn into_node(self) -> Node

{ - match self { - Self::V1(node) => node, - } - } -} diff --git a/dan_layer/state_tree/src/jellyfish/tree.rs b/dan_layer/state_tree/src/jellyfish/tree.rs deleted file mode 100644 index 2636d32e5..000000000 --- a/dan_layer/state_tree/src/jellyfish/tree.rs +++ /dev/null @@ -1,790 +0,0 @@ -// Copyright 2024 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -// Copyright 2021 Radix Publishing Ltd incorporated in Jersey (Channel Islands). -// -// Licensed under the Radix License, Version 1.0 (the "License"); you may not use this -// file except in compliance with the License. You may obtain a copy of the License at: -// -// radixfoundation.org/licenses/LICENSE-v1 -// -// The Licensor hereby grants permission for the Canonical version of the Work to be -// published, distributed and used under or by reference to the Licensor's trademark -// Radix ® and use of any unregistered trade names, logos or get-up. -// -// The Licensor provides the Work (and each Contributor provides its Contributions) on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -// including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, -// MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. -// -// Whilst the Work is capable of being deployed, used and adopted (instantiated) to create -// a distributed ledger it is your responsibility to test and validate the code, together -// with all logic and performance of that code under all foreseeable scenarios. -// -// The Licensor does not make or purport to make and hereby excludes liability for all -// and any representation, warranty or undertaking in any form whatsoever, whether express -// or implied, to any entity or person, including any representation, warranty or -// undertaking, as to the functionality security use, value or other characteristics of -// any distributed ledger nor in respect the functioning or value of any tokens which may -// be created stored or transferred using the Work. The Licensor does not warrant that the -// Work or any use of the Work complies with any law or regulation in any territory where -// it may be implemented or used or that it will be appropriate for any specific purpose. -// -// Neither the licensor nor any current or former employees, officers, directors, partners, -// trustees, representatives, agents, advisors, contractors, or volunteers of the Licensor -// shall be liable for any direct or indirect, special, incidental, consequential or other -// losses of any kind, in tort, contract or otherwise (including but not limited to loss -// of revenue, income or profits, or loss of use or data, or loss of reputation, or loss -// of any economic or other opportunity of whatsoever nature or howsoever arising), arising -// out of or in connection with (without limitation of any use, misuse, of any ledger system -// or use made or its functionality or any performance or operation of any code or protocol -// caused by bugs or programming or logic errors or otherwise); -// -// A. any offer, purchase, holding, use, sale, exchange or transmission of any -// cryptographic keys, tokens or assets created, exchanged, stored or arising from any -// interaction with the Work; -// -// B. any failure in a transmission or loss of any token or assets keys or other digital -// artefacts due to errors in transmission; -// -// C. bugs, hacks, logic errors or faults in the Work or any communication; -// -// D. system software or apparatus including but not limited to losses caused by errors -// in holding or transmitting tokens by any third-party; -// -// E. breaches or failure of security including hacker attacks, loss or disclosure of -// password, loss of private key, unauthorised use or misuse of such passwords or keys; -// -// F. any losses including loss of anticipated savings or other benefits resulting from -// use of the Work or any changes to the Work (however implemented). -// -// You are solely responsible for; testing, validating and evaluation of all operation -// logic, functionality, security and appropriateness of using the Work for any commercial -// or non-commercial purpose and for any reproduction or redistribution by You of the -// Work. You assume all risks associated with Your use of the Work and the exercise of -// permissions under this License. - -// This file contains code sourced from https://github.com/aptos-labs/aptos-core/tree/1.0.4 -// This original source is licensed under https://github.com/aptos-labs/aptos-core/blob/1.0.4/LICENSE -// -// The code in this file has been implemented by Radix® pursuant to an Apache 2 licence and has -// been modified by Radix® and is now licensed pursuant to the Radix® Open-Source Licence. -// -// Each sourced code fragment includes an inline attribution to the original source file in a -// comment starting "SOURCE: ..." -// -// Modifications from the original source are captured in two places: -// * Initial changes to get the code functional/integrated are marked by inline "INITIAL-MODIFICATION: ..." comments -// * Subsequent changes to the code are captured in the git commit history -// -// The following notice is retained from the original source -// Copyright (c) Aptos -// SPDX-License-Identifier: Apache-2.0 - -use std::{ - collections::{BTreeMap, HashMap}, - marker::PhantomData, -}; - -use super::{ - store::TreeStoreReader, - types::{ - Child, - InternalNode, - IteratedLeafKey, - JmtStorageError, - LeafKey, - LeafNode, - Nibble, - NibblePath, - Node, - NodeKey, - SparseMerkleProof, - SparseMerkleProofExt, - SparseMerkleRangeProof, - Version, - SPARSE_MERKLE_PLACEHOLDER_HASH, - }, - Hash, - LeafKeyRef, -}; - -// INITIAL-MODIFICATION: the original used a known key size (32) as a limit -const SANITY_NIBBLE_LIMIT: usize = 1000; - -pub type ProofValue

= (Hash, P, Version); - -// SOURCE: https://github.com/radixdlt/radixdlt-scrypto/blob/ca8e553c31a956c0851c1855291efe4a47fb5c97/radix-engine-stores/src/hash_tree/jellyfish.rs -// SOURCE: https://github.com/aptos-labs/aptos-core/blob/1.0.4/storage/jellyfish-merkle/src/lib.rs#L329 -/// The Jellyfish Merkle tree data structure. See [`crate`] for description. -pub struct JellyfishMerkleTree<'a, R, P> { - reader: &'a R, - _payload: PhantomData

, -} - -impl<'a, R: 'a + TreeStoreReader

, P: Clone> JellyfishMerkleTree<'a, R, P> { - /// Creates a `JellyfishMerkleTree` backed by the given [`TreeReader`](trait.TreeReader.html). - pub fn new(reader: &'a R) -> Self { - Self { - reader, - _payload: PhantomData, - } - } - - /// Get the node hash from the cache if cache is provided, otherwise (for test only) compute it. - fn get_hash(node_key: &NodeKey, node: &Node

, hash_cache: Option<&HashMap>) -> Hash { - if let Some(cache) = hash_cache { - match cache.get(node_key.nibble_path()) { - Some(hash) => *hash, - None => unreachable!("{:?} can not be found in hash cache", node_key), - } - } else { - node.hash() - } - } - - /// For each value set: - /// Returns the new nodes and values in a batch after applying `value_set`. For - /// example, if after transaction `T_i` the committed state of tree in the persistent storage - /// looks like the following structure: - /// - /// ```text - /// S_i - /// / \ - /// . . - /// . . - /// / \ - /// o x - /// / \ - /// A B - /// storage (disk) - /// ``` - /// - /// where `A` and `B` denote the states of two adjacent accounts, and `x` is a sibling subtree - /// of the path from root to A and B in the tree. Then a `value_set` produced by the next - /// transaction `T_{i+1}` modifies other accounts `C` and `D` exist in the subtree under `x`, a - /// new partial tree will be constructed in memory and the structure will be: - /// - /// ```text - /// S_i | S_{i+1} - /// / \ | / \ - /// . . | . . - /// . . | . . - /// / \ | / \ - /// / x | / x' - /// o<-------------+- / \ - /// / \ | C D - /// A B | - /// storage (disk) | cache (memory) - /// ``` - /// - /// With this design, we are able to query the global state in persistent storage and - /// generate the proposed tree delta based on a specific root hash and `value_set`. For - /// example, if we want to execute another transaction `T_{i+1}'`, we can use the tree `S_i` in - /// storage and apply the `value_set` of transaction `T_{i+1}`. Then if the storage commits - /// the returned batch, the state `S_{i+1}` is ready to be read from the tree by calling - /// [`get_with_proof`](struct.JellyfishMerkleTree.html#method.get_with_proof). Anything inside - /// the batch is not reachable from public interfaces before being committed. - pub fn batch_put_value_set)>>( - &self, - value_set: I, - node_hashes: Option<&HashMap>, - persisted_version: Option, - version: Version, - ) -> Result<(Hash, TreeUpdateBatch

), JmtStorageError> { - let value_set = value_set.into_iter().collect::>(); - let deduped_and_sorted_kvs = value_set.iter().map(|(k, v)| (k, v.as_ref())).collect::>(); - - let mut batch = TreeUpdateBatch::new(); - let root_node_opt = if let Some(persisted_version) = persisted_version { - self.batch_insert_at( - &NodeKey::new_empty_path(persisted_version), - version, - deduped_and_sorted_kvs.as_slice(), - 0, - node_hashes, - &mut batch, - )? - } else { - Self::batch_update_subtree( - &NodeKey::new_empty_path(version), - version, - deduped_and_sorted_kvs.as_slice(), - 0, - node_hashes, - &mut batch, - )? - }; - - let node_key = NodeKey::new_empty_path(version); - let root_hash = if let Some(root_node) = root_node_opt { - let hash = root_node.hash(); - batch.put_node(node_key, root_node); - hash - } else { - batch.put_node(node_key, Node::Null); - SPARSE_MERKLE_PLACEHOLDER_HASH - }; - - Ok((root_hash, batch)) - } - - fn batch_insert_at( - &self, - node_key: &NodeKey, - version: Version, - kvs: &[(&LeafKey, Option<&(Hash, P)>)], - depth: usize, - hash_cache: Option<&HashMap>, - batch: &mut TreeUpdateBatch

, - ) -> Result>, JmtStorageError> { - let node = self.reader.get_node(node_key)?; - batch.put_stale_node(node_key.clone(), version, &node); - - match node { - Node::Internal(internal_node) => { - // There is a small possibility that the old internal node is intact. - // Traverse all the path touched by `kvs` from this internal node. - let range_iter = NibbleRangeIterator::new(kvs, depth); - // INITIAL-MODIFICATION: there was a par_iter (conditionally) used here - let new_children = range_iter - .map(|(left, right)| { - self.insert_at_child( - node_key, - &internal_node, - version, - kvs, - left, - right, - depth, - hash_cache, - batch, - ) - }) - .collect::, JmtStorageError>>()?; - - // Reuse the current `InternalNode` in memory to create a new internal node. - let mut old_children = internal_node.into_children(); - let mut new_created_children: Vec<(Nibble, Node

)> = Vec::new(); - for (child_nibble, child_option) in new_children { - if let Some(child) = child_option { - new_created_children.push((child_nibble, child)); - } else { - old_children.swap_remove(&child_nibble); - } - } - - if old_children.is_empty() && new_created_children.is_empty() { - return Ok(None); - } - if old_children.len() <= 1 && new_created_children.len() <= 1 { - if let Some((new_nibble, new_child)) = new_created_children.first() { - if let Some((old_nibble, _old_child)) = old_children.iter().next() { - if old_nibble == new_nibble && new_child.is_leaf() { - return Ok(Some(new_child.clone())); - } - } else if new_child.is_leaf() { - return Ok(Some(new_child.clone())); - } else { - // Nothing to do - } - } else { - let (old_child_nibble, old_child) = old_children.iter().next().expect("must exist"); - if old_child.is_leaf() { - let old_child_node_key = node_key.gen_child_node_key(old_child.version, *old_child_nibble); - let old_child_node = self.reader.get_node(&old_child_node_key)?; - batch.put_stale_node(old_child_node_key, version, &old_child_node); - return Ok(Some(old_child_node)); - } - } - } - - let mut new_children = old_children; - for (child_index, new_child_node) in new_created_children { - let new_child_node_key = node_key.gen_child_node_key(version, child_index); - new_children.insert( - child_index, - Child::new( - Self::get_hash(&new_child_node_key, &new_child_node, hash_cache), - version, - new_child_node.node_type(), - ), - ); - batch.put_node(new_child_node_key, new_child_node); - } - let new_internal_node = InternalNode::new(new_children); - Ok(Some(new_internal_node.into())) - }, - Node::Leaf(leaf_node) => Self::batch_update_subtree_with_existing_leaf( - node_key, version, leaf_node, kvs, depth, hash_cache, batch, - ), - Node::Null => { - assert_eq!(depth, 0, "Null node can only exist at depth 0"); - Self::batch_update_subtree(node_key, version, kvs, 0, hash_cache, batch) - }, - } - } - - fn insert_at_child( - &self, - node_key: &NodeKey, - internal_node: &InternalNode, - version: Version, - kvs: &[(&LeafKey, Option<&(Hash, P)>)], - left: usize, - right: usize, - depth: usize, - hash_cache: Option<&HashMap>, - batch: &mut TreeUpdateBatch

, - ) -> Result<(Nibble, Option>), JmtStorageError> { - let child_index = kvs[left].0.get_nibble(depth); - let child = internal_node.child(child_index); - - let new_child_node_option = match child { - Some(child) => self.batch_insert_at( - &node_key.gen_child_node_key(child.version, child_index), - version, - &kvs[left..=right], - depth + 1, - hash_cache, - batch, - )?, - None => Self::batch_update_subtree( - &node_key.gen_child_node_key(version, child_index), - version, - &kvs[left..=right], - depth + 1, - hash_cache, - batch, - )?, - }; - - Ok((child_index, new_child_node_option)) - } - - fn batch_update_subtree_with_existing_leaf( - node_key: &NodeKey, - version: Version, - existing_leaf_node: LeafNode

, - kvs: &[(&LeafKey, Option<&(Hash, P)>)], - depth: usize, - hash_cache: Option<&HashMap>, - batch: &mut TreeUpdateBatch

, - ) -> Result>, JmtStorageError> { - let existing_leaf_key = existing_leaf_node.leaf_key(); - - if kvs.len() == 1 && kvs[0].0 == existing_leaf_key { - if let (key, Some((value_hash, payload))) = kvs[0] { - let new_leaf_node = Node::new_leaf(*key, *value_hash, payload.clone(), version); - Ok(Some(new_leaf_node)) - } else { - Ok(None) - } - } else { - let existing_leaf_bucket = existing_leaf_key.get_nibble(depth); - let mut isolated_existing_leaf = true; - let mut children = vec![]; - for (left, right) in NibbleRangeIterator::new(kvs, depth) { - let child_index = kvs[left].0.get_nibble(depth); - let child_node_key = node_key.gen_child_node_key(version, child_index); - if let Some(new_child_node) = if existing_leaf_bucket == child_index { - isolated_existing_leaf = false; - Self::batch_update_subtree_with_existing_leaf( - &child_node_key, - version, - existing_leaf_node.clone(), - &kvs[left..=right], - depth + 1, - hash_cache, - batch, - )? - } else { - Self::batch_update_subtree( - &child_node_key, - version, - &kvs[left..=right], - depth + 1, - hash_cache, - batch, - )? - } { - children.push((child_index, new_child_node)); - } - } - if isolated_existing_leaf { - children.push((existing_leaf_bucket, existing_leaf_node.into())); - } - - if children.is_empty() { - Ok(None) - } else if children.len() == 1 && children[0].1.is_leaf() { - let (_, child) = children.pop().expect("Must exist"); - Ok(Some(child)) - } else { - let new_internal_node = InternalNode::new( - children - .into_iter() - .map(|(child_index, new_child_node)| { - let new_child_node_key = node_key.gen_child_node_key(version, child_index); - let result = ( - child_index, - Child::new( - Self::get_hash(&new_child_node_key, &new_child_node, hash_cache), - version, - new_child_node.node_type(), - ), - ); - batch.put_node(new_child_node_key, new_child_node); - result - }) - .collect(), - ); - Ok(Some(new_internal_node.into())) - } - } - } - - fn batch_update_subtree( - node_key: &NodeKey, - version: Version, - kvs: &[(&LeafKey, Option<&(Hash, P)>)], - depth: usize, - hash_cache: Option<&HashMap>, - batch: &mut TreeUpdateBatch

, - ) -> Result>, JmtStorageError> { - if kvs.len() == 1 { - if let (key, Some((value_hash, payload))) = kvs[0] { - let new_leaf_node = Node::new_leaf(*key, *value_hash, payload.clone(), version); - Ok(Some(new_leaf_node)) - } else { - Ok(None) - } - } else { - let mut children = vec![]; - for (left, right) in NibbleRangeIterator::new(kvs, depth) { - let child_index = kvs[left].0.get_nibble(depth); - let child_node_key = node_key.gen_child_node_key(version, child_index); - if let Some(new_child_node) = Self::batch_update_subtree( - &child_node_key, - version, - &kvs[left..=right], - depth + 1, - hash_cache, - batch, - )? { - children.push((child_index, new_child_node)) - } - } - if children.is_empty() { - Ok(None) - } else if children.len() == 1 && children[0].1.is_leaf() { - let (_, child) = children.pop().expect("Must exist"); - Ok(Some(child)) - } else { - let new_internal_node = InternalNode::new( - children - .into_iter() - .map(|(child_index, new_child_node)| { - let new_child_node_key = node_key.gen_child_node_key(version, child_index); - let result = ( - child_index, - Child::new( - Self::get_hash(&new_child_node_key, &new_child_node, hash_cache), - version, - new_child_node.node_type(), - ), - ); - batch.put_node(new_child_node_key, new_child_node); - result - }) - .collect(), - ); - Ok(Some(new_internal_node.into())) - } - } - } - - /// Returns the value (if applicable) and the corresponding merkle proof. - pub fn get_with_proof( - &self, - key: LeafKeyRef<'_>, - version: Version, - ) -> Result<(Option>, SparseMerkleProof), JmtStorageError> { - self.get_with_proof_ext(key, version) - .map(|(value, proof_ext)| (value, proof_ext.into())) - } - - pub fn get_with_proof_ext( - &self, - key: LeafKeyRef<'_>, - version: Version, - ) -> Result<(Option>, SparseMerkleProofExt), JmtStorageError> { - // Empty tree just returns proof with no sibling hash. - let mut next_node_key = NodeKey::new_empty_path(version); - let mut siblings = vec![]; - let nibble_path = NibblePath::new_even(key.bytes.to_vec()); - let mut nibble_iter = nibble_path.nibbles(); - - for _nibble_depth in 0..SANITY_NIBBLE_LIMIT { - let next_node = self.reader.get_node(&next_node_key)?; - match next_node { - Node::Internal(internal_node) => { - let queried_child_index = nibble_iter.next().ok_or(JmtStorageError::InconsistentState)?; - let (child_node_key, mut siblings_in_internal) = internal_node.get_child_with_siblings( - &next_node_key, - queried_child_index, - Some(self.reader), - )?; - siblings.append(&mut siblings_in_internal); - next_node_key = match child_node_key { - Some(node_key) => node_key, - None => { - return Ok(( - None, - SparseMerkleProofExt::new(None, { - siblings.reverse(); - siblings - }), - )) - }, - }; - }, - Node::Leaf(leaf_node) => { - return Ok(( - if leaf_node.leaf_key().as_ref() == key { - Some((leaf_node.value_hash(), leaf_node.payload().clone(), leaf_node.version())) - } else { - None - }, - SparseMerkleProofExt::new(Some(leaf_node.into()), { - siblings.reverse(); - siblings - }), - )); - }, - Node::Null => { - return Ok((None, SparseMerkleProofExt::new(None, vec![]))); - }, - } - } - Err(JmtStorageError::InconsistentState) - } - - /// Gets the proof that shows a list of keys up to `rightmost_key_to_prove` exist at `version`. - pub fn get_range_proof( - &self, - rightmost_key_to_prove: LeafKeyRef<'_>, - version: Version, - ) -> Result { - let (leaf, proof) = self.get_with_proof(rightmost_key_to_prove, version)?; - assert!(leaf.is_some(), "rightmost_key_to_prove must exist."); - - let siblings = proof - .siblings() - .iter() - .rev() - .zip(rightmost_key_to_prove.iter_bits()) - .filter_map(|(sibling, bit)| { - // We only need to keep the siblings on the right. - if bit { - None - } else { - Some(*sibling) - } - }) - .rev() - .collect(); - Ok(SparseMerkleRangeProof::new(siblings)) - } - - fn get_root_node(&self, version: Version) -> Result, JmtStorageError> { - let root_node_key = NodeKey::new_empty_path(version); - self.reader.get_node(&root_node_key) - } - - pub fn get_root_hash(&self, version: Version) -> Result { - self.get_root_node(version).map(|n| n.hash()) - } - - pub fn get_leaf_count(&self, version: Version) -> Result { - self.get_root_node(version).map(|n| n.leaf_count()) - } - - pub fn get_all_nodes_referenced(&self, key: NodeKey) -> Result, JmtStorageError> { - let mut out_keys = vec![]; - self.get_all_nodes_referenced_impl(key, &mut out_keys)?; - Ok(out_keys) - } - - fn get_all_nodes_referenced_impl(&self, key: NodeKey, out_keys: &mut Vec) -> Result<(), JmtStorageError> { - match self.reader.get_node(&key)? { - Node::Internal(internal_node) => { - for (child_nibble, child) in internal_node.children_sorted() { - self.get_all_nodes_referenced_impl(key.gen_child_node_key(child.version, *child_nibble), out_keys)?; - } - }, - Node::Leaf(_) | Node::Null => {}, - }; - - out_keys.push(key); - Ok(()) - } -} - -/// An iterator that iterates the index range (inclusive) of each different nibble at given -/// `nibble_idx` of all the keys in a sorted key-value pairs which have the identical Hash -/// prefix (up to nibble_idx). -struct NibbleRangeIterator<'a, P> { - sorted_kvs: &'a [(&'a LeafKey, P)], - nibble_idx: usize, - pos: usize, -} - -impl<'a, P> NibbleRangeIterator<'a, P> { - fn new(sorted_kvs: &'a [(&'a LeafKey, P)], nibble_idx: usize) -> Self { - NibbleRangeIterator { - sorted_kvs, - nibble_idx, - pos: 0, - } - } -} - -impl<'a, P> Iterator for NibbleRangeIterator<'a, P> { - type Item = (usize, usize); - - fn next(&mut self) -> Option { - let left = self.pos; - if self.pos < self.sorted_kvs.len() { - let cur_nibble = self.sorted_kvs[left].0.get_nibble(self.nibble_idx); - let (mut i, mut j) = (left, self.sorted_kvs.len() - 1); - // Find the last index of the cur_nibble. - while i < j { - let mid = j - (j - i) / 2; - if self.sorted_kvs[mid].0.get_nibble(self.nibble_idx) > cur_nibble { - j = mid - 1; - } else { - i = mid; - } - } - self.pos = i + 1; - Some((left, i)) - } else { - None - } - } -} - -#[derive(Clone, Debug, Default, Eq, PartialEq)] -pub struct TreeUpdateBatch

{ - pub node_batch: Vec<(NodeKey, Node

)>, - pub stale_node_index_batch: Vec, - pub num_new_leaves: usize, - pub num_stale_leaves: usize, -} - -impl TreeUpdateBatch

{ - pub fn new() -> Self { - Self { - node_batch: vec![], - stale_node_index_batch: vec![], - num_new_leaves: 0, - num_stale_leaves: 0, - } - } - - fn inc_num_new_leaves(&mut self) { - self.num_new_leaves += 1; - } - - fn inc_num_stale_leaves(&mut self) { - self.num_stale_leaves += 1; - } - - pub fn put_node(&mut self, node_key: NodeKey, node: Node

) { - if node.is_leaf() { - self.inc_num_new_leaves(); - } - self.node_batch.push((node_key, node)) - } - - pub fn put_stale_node(&mut self, node_key: NodeKey, stale_since_version: Version, node: &Node

) { - if node.is_leaf() { - self.inc_num_stale_leaves(); - } - self.stale_node_index_batch.push(StaleNodeIndex { - node_key, - stale_since_version, - }); - } -} - -/// Indicates a node becomes stale since `stale_since_version`. -#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct StaleNodeIndex { - /// The version since when the node is overwritten and becomes stale. - pub stale_since_version: Version, - /// The [`NodeKey`](node_type/struct.NodeKey.html) identifying the node associated with this - /// record. - pub node_key: NodeKey, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{jmt_node_hash, memory_store::MemoryTreeStore, StaleTreeNode, TreeStoreWriter}; - - fn leaf_key(seed: u64) -> LeafKey { - LeafKey::new(jmt_node_hash(&seed)) - } - - #[test] - fn check_merkle_proof() { - // Evaluating the functionality of the JMT Merkle proof. - let mut mem = MemoryTreeStore::new(); - let jmt = JellyfishMerkleTree::new(&mem); - - let values = [ - (leaf_key(1), Some((jmt_node_hash(&10), Some(1u64)))), - (leaf_key(2), Some((jmt_node_hash(&11), Some(2)))), - (leaf_key(3), Some((jmt_node_hash(&12), Some(3)))), - ]; - let (_, diff) = jmt.batch_put_value_set(values, None, None, 1).unwrap(); - for (k, v) in diff.node_batch { - mem.insert_node(k, v).unwrap(); - } - - for a in diff.stale_node_index_batch { - mem.record_stale_tree_node(StaleTreeNode::Node(a.node_key)).unwrap(); - } - mem.clear_stale_nodes(); - - let jmt = JellyfishMerkleTree::new(&mem); - - // This causes get_with_proof to fail with node NotFound. - let values = [ - (leaf_key(4), Some((jmt_node_hash(&13), Some(4u64)))), - (leaf_key(5), Some((jmt_node_hash(&14), Some(5)))), - (leaf_key(6), Some((jmt_node_hash(&15), Some(6)))), - ]; - let (_mr, diff) = jmt.batch_put_value_set(values, None, Some(1), 2).unwrap(); - - for (k, v) in diff.node_batch { - mem.insert_node(k, v).unwrap(); - } - for a in diff.stale_node_index_batch { - mem.record_stale_tree_node(StaleTreeNode::Node(a.node_key)).unwrap(); - } - mem.clear_stale_nodes(); - let jmt = JellyfishMerkleTree::new(&mem); - - let k = leaf_key(3); - let (_value, sparse) = jmt.get_with_proof(k.as_ref(), 2).unwrap(); - - let leaf = sparse.leaf().unwrap(); - assert_eq!(*leaf.key(), k); - assert_eq!(*leaf.value_hash(), jmt_node_hash(&12)); - // Unanswered: How do we verify the proof root matches a Merkle root? - // assert!(sparse.siblings().iter().any(|h| *h == mr)); - } -} diff --git a/dan_layer/state_tree/src/jellyfish/types.rs b/dan_layer/state_tree/src/jellyfish/types.rs deleted file mode 100644 index 07cc644b6..000000000 --- a/dan_layer/state_tree/src/jellyfish/types.rs +++ /dev/null @@ -1,1405 +0,0 @@ -// Copyright 2024 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -// Copyright 2021 Radix Publishing Ltd incorporated in Jersey (Channel Islands). -// -// Licensed under the Radix License, Version 1.0 (the "License"); you may not use this -// file except in compliance with the License. You may obtain a copy of the License at: -// -// radixfoundation.org/licenses/LICENSE-v1 -// -// The Licensor hereby grants permission for the Canonical version of the Work to be -// published, distributed and used under or by reference to the Licensor's trademark -// Radix ® and use of any unregistered trade names, logos or get-up. -// -// The Licensor provides the Work (and each Contributor provides its Contributions) on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -// including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, -// MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. -// -// Whilst the Work is capable of being deployed, used and adopted (instantiated) to create -// a distributed ledger it is your responsibility to test and validate the code, together -// with all logic and performance of that code under all foreseeable scenarios. -// -// The Licensor does not make or purport to make and hereby excludes liability for all -// and any representation, warranty or undertaking in any form whatsoever, whether express -// or implied, to any entity or person, including any representation, warranty or -// undertaking, as to the functionality security use, value or other characteristics of -// any distributed ledger nor in respect the functioning or value of any tokens which may -// be created stored or transferred using the Work. The Licensor does not warrant that the -// Work or any use of the Work complies with any law or regulation in any territory where -// it may be implemented or used or that it will be appropriate for any specific purpose. -// -// Neither the licensor nor any current or former employees, officers, directors, partners, -// trustees, representatives, agents, advisors, contractors, or volunteers of the Licensor -// shall be liable for any direct or indirect, special, incidental, consequential or other -// losses of any kind, in tort, contract or otherwise (including but not limited to loss -// of revenue, income or profits, or loss of use or data, or loss of reputation, or loss -// of any economic or other opportunity of whatsoever nature or howsoever arising), arising -// out of or in connection with (without limitation of any use, misuse, of any ledger system -// or use made or its functionality or any performance or operation of any code or protocol -// caused by bugs or programming or logic errors or otherwise); -// -// A. any offer, purchase, holding, use, sale, exchange or transmission of any -// cryptographic keys, tokens or assets created, exchanged, stored or arising from any -// interaction with the Work; -// -// B. any failure in a transmission or loss of any token or assets keys or other digital -// artefacts due to errors in transmission; -// -// C. bugs, hacks, logic errors or faults in the Work or any communication; -// -// D. system software or apparatus including but not limited to losses caused by errors -// in holding or transmitting tokens by any third-party; -// -// E. breaches or failure of security including hacker attacks, loss or disclosure of -// password, loss of private key, unauthorised use or misuse of such passwords or keys; -// -// F. any losses including loss of anticipated savings or other benefits resulting from -// use of the Work or any changes to the Work (however implemented). -// -// You are solely responsible for; testing, validating and evaluation of all operation -// logic, functionality, security and appropriateness of using the Work for any commercial -// or non-commercial purpose and for any reproduction or redistribution by You of the -// Work. You assume all risks associated with Your use of the Work and the exercise of -// permissions under this License. - -// This file contains code sourced from https://github.com/aptos-labs/aptos-core/tree/1.0.4 -// This original source is licensed under https://github.com/aptos-labs/aptos-core/blob/1.0.4/LICENSE -// -// The code in this file has been implemented by Radix® pursuant to an Apache 2 licence and has -// been modified by Radix® and is now licensed pursuant to the Radix® Open-Source Licence. -// -// Each sourced code fragment includes an inline attribution to the original source file in a -// comment starting "SOURCE: ..." -// -// Modifications from the original source are captured in two places: -// * Initial changes to get the code functional/integrated are marked by inline "INITIAL-MODIFICATION: ..." comments -// * Subsequent changes to the code are captured in the git commit history -// -// The following notice is retained from the original source -// Copyright (c) Aptos -// SPDX-License-Identifier: Apache-2.0 - -use std::{fmt, fmt::Display, io, ops::Range}; - -use blake2::{digest::consts::U32, Blake2b}; -use indexmap::IndexMap; -use serde::{Deserialize, Serialize}; -use tari_crypto::{ - hash_domain, - hashing::{AsFixedBytes, DomainSeparatedHasher}, - tari_utilities::ByteArray, -}; -use tari_dan_common_types::optional::IsNotFoundError; -use tari_engine_types::serde_with; - -use crate::{ - bit_iter::BitIterator, - jellyfish::{error::JmtProofVerifyError, store::TreeStoreReader}, -}; - -pub type Hash = tari_common_types::types::FixedHash; - -hash_domain!(ValidatorJmtHashDomain, "com.tari.jmt", 0); - -pub type JmtHasher = DomainSeparatedHasher, ValidatorJmtHashDomain>; - -fn jmt_node_hasher() -> JmtHasher { - JmtHasher::new_with_label("Node") -} - -struct HashWriter<'a>(&'a mut JmtHasher); - -impl io::Write for HashWriter<'_> { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.update(buf); - Ok(buf.len()) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -pub fn jmt_node_hash(data: &T) -> Hash { - let mut hasher = jmt_node_hasher(); - let mut hash_writer = HashWriter(&mut hasher); - tari_bor::encode_into_std_writer(data, &mut hash_writer).expect("encoding failed"); - let bytes: [u8; 32] = hasher.finalize().as_fixed_bytes().expect("hash is 32 bytes"); - bytes.into() -} - -pub fn jmt_node_hash2(d1: &[u8], d2: &[u8]) -> Hash { - let hasher = jmt_node_hasher().chain(d1).chain(d2); - let bytes: [u8; 32] = hasher.finalize().as_fixed_bytes().expect("hash is 32 bytes"); - bytes.into() -} - -// SOURCE: https://github.com/aptos-labs/aptos-core/blob/1.0.4/types/src/proof/definition.rs#L182 -/// A more detailed version of `SparseMerkleProof` with the only difference that all the leaf -/// siblings are explicitly set as `SparseMerkleLeafNode` instead of its hash value. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct SparseMerkleProofExt { - leaf: Option, - /// All siblings in this proof, including the default ones. Siblings are ordered from the bottom - /// level to the root level. - siblings: Vec, -} - -impl SparseMerkleProofExt { - /// Constructs a new `SparseMerkleProofExt` using leaf and a list of sibling nodes. - pub(crate) fn new(leaf: Option, siblings: Vec) -> Self { - Self { leaf, siblings } - } - - /// Returns the leaf node in this proof. - pub fn leaf(&self) -> Option { - self.leaf.clone() - } - - /// Returns the list of siblings in this proof. - pub fn siblings(&self) -> &[NodeInProof] { - &self.siblings - } - - /// Verifies an element whose key is `element_key` and value is `element_value` exists in the Sparse Merkle Tree - /// using the provided proof - pub fn verify_inclusion( - &self, - expected_root_hash: &Hash, - element_key: &LeafKey, - element_value_hash: &Hash, - ) -> Result<(), JmtProofVerifyError> { - self.verify(expected_root_hash, element_key, Some(element_value_hash)) - } - - /// Verifies the proof is a valid non-inclusion proof that shows this key doesn't exist in the tree. - pub fn verify_exclusion( - &self, - expected_root_hash: &Hash, - element_key: &LeafKey, - ) -> Result<(), JmtProofVerifyError> { - self.verify(expected_root_hash, element_key, None) - } - - /// If `element_value` is present, verifies an element whose key is `element_key` and value is - /// `element_value` exists in the Sparse Merkle Tree using the provided proof. Otherwise, - /// verifies the proof is a valid non-inclusion proof that shows this key doesn't exist in the - /// tree. - fn verify( - &self, - expected_root_hash: &Hash, - element_key: &LeafKey, - element_value: Option<&Hash>, - ) -> Result<(), JmtProofVerifyError> { - if self.siblings.len() > 256 { - return Err(JmtProofVerifyError::TooManySiblings { - num_siblings: self.siblings.len(), - }); - } - - match (element_value, &self.leaf) { - (Some(value_hash), Some(leaf)) => { - // This is an inclusion proof, so the key and value hash provided in the proof - // should match element_key and element_value_hash. `siblings` should prove the - // route from the leaf node to the root. - if element_key != leaf.key() { - return Err(JmtProofVerifyError::KeyMismatch { - actual_key: *leaf.key(), - expected_key: *element_key, - }); - } - if *value_hash != leaf.value_hash { - return Err(JmtProofVerifyError::ValueMismatch { - actual: leaf.value_hash, - expected: *value_hash, - }); - } - }, - (Some(_), None) => return Err(JmtProofVerifyError::ExpectedInclusionProof), - (None, Some(leaf)) => { - // This is a non-inclusion proof. The proof intends to show that if a leaf node - // representing `element_key` is inserted, it will break a currently existing leaf - // node represented by `proof_key` into a branch. `siblings` should prove the - // route from that leaf node to the root. - if element_key == leaf.key() { - return Err(JmtProofVerifyError::ExpectedNonInclusionProof); - } - if element_key.common_prefix_bits_len(leaf.key()) < self.siblings.len() { - return Err(JmtProofVerifyError::InvalidNonInclusionProof); - } - }, - (None, None) => { - // This is a non-inclusion proof. The proof intends to show that if a leaf node - // representing `element_key` is inserted, it will show up at a currently empty - // position. `sibling` should prove the route from this empty position to the root. - }, - } - - let current_hash = self - .leaf - .clone() - .map_or(SPARSE_MERKLE_PLACEHOLDER_HASH, |leaf| leaf.hash()); - let actual_root_hash = self - .siblings - .iter() - .zip(element_key.iter_bits().rev().skip(256 - self.siblings.len())) - .fold(current_hash, |hash, (sibling_node, bit)| { - if bit { - SparseMerkleInternalNode::new(sibling_node.hash(), hash).hash() - } else { - SparseMerkleInternalNode::new(hash, sibling_node.hash()).hash() - } - }); - - if actual_root_hash != *expected_root_hash { - return Err(JmtProofVerifyError::RootHashMismatch { - actual_root_hash, - expected_root_hash: *expected_root_hash, - }); - } - - Ok(()) - } -} - -impl From for SparseMerkleProof { - fn from(proof_ext: SparseMerkleProofExt) -> Self { - Self::new( - proof_ext.leaf, - proof_ext.siblings.into_iter().map(|node| node.hash()).collect(), - ) - } -} - -// SOURCE: https://github.com/aptos-labs/aptos-core/blob/1.0.4/types/src/proof/definition.rs#L135 -impl SparseMerkleProof { - /// Constructs a new `SparseMerkleProof` using leaf and a list of siblings. - pub fn new(leaf: Option, siblings: Vec) -> Self { - SparseMerkleProof { leaf, siblings } - } - - /// Returns the leaf node in this proof. - pub fn leaf(&self) -> Option { - self.leaf.clone() - } - - /// Returns the list of siblings in this proof. - pub fn siblings(&self) -> &[Hash] { - &self.siblings - } -} - -/// A proof that can be used to authenticate an element in a Sparse Merkle Tree given trusted root -/// hash. For example, `TransactionInfoToAccountProof` can be constructed on top of this structure. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct SparseMerkleProof { - /// This proof can be used to authenticate whether a given leaf exists in the tree or not. - /// - If this is `Some(leaf_node)` - /// - If `leaf_node.key` equals requested key, this is an inclusion proof and `leaf_node.value_hash` equals - /// the hash of the corresponding account blob. - /// - Otherwise this is a non-inclusion proof. `leaf_node.key` is the only key that exists in the subtree - /// and `leaf_node.value_hash` equals the hash of the corresponding account blob. - /// - If this is `None`, this is also a non-inclusion proof which indicates the subtree is empty. - leaf: Option, - - /// All siblings in this proof, including the default ones. Siblings are ordered from the bottom - /// level to the root level. - siblings: Vec, -} - -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum NodeInProof { - Leaf(SparseMerkleLeafNode), - Other(Hash), -} - -impl From for NodeInProof { - fn from(hash: Hash) -> Self { - Self::Other(hash) - } -} - -impl From for NodeInProof { - fn from(leaf: SparseMerkleLeafNode) -> Self { - Self::Leaf(leaf) - } -} - -impl NodeInProof { - pub fn hash(&self) -> Hash { - match self { - Self::Leaf(leaf) => leaf.hash(), - Self::Other(hash) => *hash, - } - } -} - -// SOURCE: https://github.com/aptos-labs/aptos-core/blob/1.0.4/types/src/proof/definition.rs#L681 -/// Note: this is not a range proof in the sense that a range of nodes is verified! -/// Instead, it verifies the entire left part of the tree up to a known rightmost node. -/// See the description below. -/// -/// A proof that can be used to authenticate a range of consecutive leaves, from the leftmost leaf to -/// the rightmost known one, in a sparse Merkle tree. For example, given the following sparse Merkle tree: -/// -/// ```text -/// root -/// / \ -/// / \ -/// / \ -/// o o -/// / \ / \ -/// a o o h -/// / \ / \ -/// o d e X -/// / \ / \ -/// b c f g -/// ``` -/// -/// if the proof wants show that `[a, b, c, d, e]` exists in the tree, it would need the siblings -/// `X` and `h` on the right. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct SparseMerkleRangeProof { - /// The vector of siblings on the right of the path from root to last leaf. The ones near the - /// bottom are at the beginning of the vector. In the above example, it's `[X, h]`. - right_siblings: Vec, -} - -impl SparseMerkleRangeProof { - /// Constructs a new `SparseMerkleRangeProof`. - pub fn new(right_siblings: Vec) -> Self { - Self { right_siblings } - } - - /// Returns the right siblings. - pub fn right_siblings(&self) -> &[Hash] { - &self.right_siblings - } -} - -// SOURCE: https://github.com/aptos-labs/aptos-core/blob/1.0.4/types/src/proof/mod.rs#L97 -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct SparseMerkleLeafNode { - key: LeafKey, - value_hash: Hash, -} - -impl SparseMerkleLeafNode { - pub fn new(key: LeafKey, value_hash: Hash) -> Self { - SparseMerkleLeafNode { key, value_hash } - } - - pub fn key(&self) -> &LeafKey { - &self.key - } - - pub fn value_hash(&self) -> &Hash { - &self.value_hash - } - - pub fn hash(&self) -> Hash { - jmt_node_hash2(self.key.bytes.as_slice(), self.value_hash.as_slice()) - } -} - -pub struct SparseMerkleInternalNode { - left_child: Hash, - right_child: Hash, -} - -impl SparseMerkleInternalNode { - pub fn new(left_child: Hash, right_child: Hash) -> Self { - Self { - left_child, - right_child, - } - } - - fn hash(&self) -> Hash { - jmt_node_hash2(self.left_child.as_bytes(), self.right_child.as_bytes()) - } -} - -// INITIAL-MODIFICATION: we propagate usage of our own `Hash` (instead of Aptos' `HashValue`) to avoid -// sourcing the entire https://github.com/aptos-labs/aptos-core/blob/1.0.4/crates/aptos-crypto/src/hash.rs -pub const SPARSE_MERKLE_PLACEHOLDER_HASH: Hash = Hash::new([0u8; Hash::byte_size()]); - -// CSOURCE: https://github.com/aptos-labs/aptos-core/blob/1.0.4/crates/aptos-crypto/src/hash.rs#L422 -/// An iterator over `LeafKey` that generates one bit for each iteration. -pub struct LeafKeyBitIterator<'a> { - /// The reference to the bytes that represent the `LeafKey`. - leaf_key_bytes: &'a [u8], - pos: Range, - // invariant pos.end == leaf_key_bytes.len() * 8; -} - -impl<'a> DoubleEndedIterator for LeafKeyBitIterator<'a> { - fn next_back(&mut self) -> Option { - self.pos.next_back().map(|x| self.get_bit(x)) - } -} - -impl<'a> ExactSizeIterator for LeafKeyBitIterator<'a> {} - -impl<'a> LeafKeyBitIterator<'a> { - /// Constructs a new `LeafKeyBitIterator` using given `leaf_key_bytes`. - fn new(leaf_key: LeafKeyRef<'a>) -> Self { - LeafKeyBitIterator { - leaf_key_bytes: leaf_key.bytes, - pos: (0..leaf_key.bytes.len() * 8), - } - } - - /// Returns the `index`-th bit in the bytes. - fn get_bit(&self, index: usize) -> bool { - let pos = index / 8; - let bit = 7 - index % 8; - (self.leaf_key_bytes[pos] >> bit) & 1 != 0 - } -} - -impl<'a> Iterator for LeafKeyBitIterator<'a> { - type Item = bool; - - fn next(&mut self) -> Option { - self.pos.next().map(|x| self.get_bit(x)) - } - - fn size_hint(&self) -> (usize, Option) { - self.pos.size_hint() - } -} - -// INITIAL-MODIFICATION: since we use our own `LeafKey` here, we need it to implement these for it -pub trait IteratedLeafKey { - fn iter_bits(&self) -> LeafKeyBitIterator<'_>; - - fn get_nibble(&self, index: usize) -> Nibble; -} - -impl IteratedLeafKey for LeafKey { - fn iter_bits(&self) -> LeafKeyBitIterator<'_> { - LeafKeyBitIterator::new(self.as_ref()) - } - - fn get_nibble(&self, index: usize) -> Nibble { - Nibble::from(if index % 2 == 0 { - self.bytes[index / 2] >> 4 - } else { - self.bytes[index / 2] & 0x0F - }) - } -} - -impl IteratedLeafKey for LeafKeyRef<'_> { - fn iter_bits(&self) -> LeafKeyBitIterator<'_> { - LeafKeyBitIterator::new(*self) - } - - fn get_nibble(&self, index: usize) -> Nibble { - Nibble::from(if index % 2 == 0 { - self.bytes[index / 2] >> 4 - } else { - self.bytes[index / 2] & 0x0F - }) - } -} - -// SOURCE: https://github.com/aptos-labs/aptos-core/blob/1.0.4/types/src/transaction/mod.rs#L57 -pub type Version = u64; - -// SOURCE: https://github.com/aptos-labs/aptos-core/blob/1.0.4/types/src/nibble/mod.rs#L20 -#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] -#[serde(transparent)] -pub struct Nibble(u8); - -impl From for Nibble { - fn from(nibble: u8) -> Self { - assert!(nibble < 16, "Nibble out of range: {}", nibble); - Self(nibble) - } -} - -impl From for u8 { - fn from(nibble: Nibble) -> Self { - nibble.0 - } -} - -impl fmt::LowerHex for Nibble { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:x}", self.0) - } -} - -// SOURCE: https://github.com/aptos-labs/aptos-core/blob/1.0.4/types/src/nibble/nibble_path/mod.rs#L22 -/// NibblePath defines a path in Merkle tree in the unit of nibble (4 bits). -#[derive(Clone, Hash, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] -pub struct NibblePath { - /// Indicates the total number of nibbles in bytes. Either `bytes.len() * 2 - 1` or - /// `bytes.len() * 2`. - // Guarantees intended ordering based on the top-to-bottom declaration order of the struct's - // members. - num_nibbles: usize, - /// The underlying bytes that stores the path, 2 nibbles per byte. If the number of nibbles is - /// odd, the second half of the last byte must be 0. - bytes: Vec, -} - -/// Supports debug format by concatenating nibbles literally. For example, [0x12, 0xa0] with 3 -/// nibbles will be printed as "12a". -impl fmt::Debug for NibblePath { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.nibbles().try_for_each(|x| write!(f, "{:x}", x)) - } -} - -// INITIAL-MODIFICATION: just to show it in errors -impl fmt::Display for NibblePath { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let hex_chars = self - .bytes - .iter() - .flat_map(|b| [b >> 4, b & 15]) - .map(|b| char::from_digit(u32::from(b), 16).unwrap()) - .take(self.num_nibbles); - - for ch in hex_chars { - write!(f, "{}", ch)?; - } - Ok(()) - } -} - -/// Convert a vector of bytes into `NibblePath` using the lower 4 bits of each byte as nibble. -impl FromIterator for NibblePath { - fn from_iter>(iter: I) -> Self { - let mut nibble_path = NibblePath::new_even(vec![]); - for nibble in iter { - nibble_path.push(nibble); - } - nibble_path - } -} - -impl NibblePath { - /// Creates a new `NibblePath` from a vector of bytes assuming each byte has 2 nibbles. - pub fn new_even(bytes: Vec) -> Self { - let num_nibbles = bytes.len() * 2; - NibblePath { num_nibbles, bytes } - } - - /// Similar to `new()` but asserts that the bytes have one less nibble. - pub fn new_odd(bytes: Vec) -> Self { - assert_eq!( - bytes.last().expect("Should have odd number of nibbles.") & 0x0F, - 0, - "Last nibble must be 0." - ); - let num_nibbles = bytes.len() * 2 - 1; - NibblePath { num_nibbles, bytes } - } - - /// Adds a nibble to the end of the nibble path. - pub fn push(&mut self, nibble: Nibble) { - if self.num_nibbles % 2 == 0 { - self.bytes.push(u8::from(nibble) << 4); - } else { - self.bytes[self.num_nibbles / 2] |= u8::from(nibble); - } - self.num_nibbles += 1; - } - - /// Pops a nibble from the end of the nibble path. - pub fn pop(&mut self) -> Option { - let poped_nibble = if self.num_nibbles % 2 == 0 { - self.bytes.last_mut().map(|last_byte| { - let nibble = *last_byte & 0x0F; - *last_byte &= 0xF0; - Nibble::from(nibble) - }) - } else { - self.bytes.pop().map(|byte| Nibble::from(byte >> 4)) - }; - if poped_nibble.is_some() { - self.num_nibbles -= 1; - } - poped_nibble - } - - /// Returns the last nibble. - pub fn last(&self) -> Option { - let last_byte_option = self.bytes.last(); - if self.num_nibbles % 2 == 0 { - last_byte_option.map(|last_byte| Nibble::from(*last_byte & 0x0F)) - } else { - let last_byte = last_byte_option.expect("Last byte must exist if num_nibbles is odd."); - Some(Nibble::from(*last_byte >> 4)) - } - } - - /// Get the i-th bit. - fn get_bit(&self, i: usize) -> bool { - assert!(i < self.num_nibbles * 4); - let pos = i / 8; - let bit = 7 - i % 8; - ((self.bytes[pos] >> bit) & 1) != 0 - } - - /// Get the i-th nibble. - pub fn get_nibble(&self, i: usize) -> Nibble { - assert!(i < self.num_nibbles); - Nibble::from((self.bytes[i / 2] >> (if i % 2 == 1 { 0 } else { 4 })) & 0xF) - } - - /// Get a bit iterator iterates over the whole nibble path. - pub fn bits(&self) -> NibbleBitIterator { - NibbleBitIterator { - nibble_path: self, - pos: (0..self.num_nibbles * 4), - } - } - - /// Get a nibble iterator iterates over the whole nibble path. - pub fn nibbles(&self) -> NibbleIterator { - NibbleIterator::new(self, 0, self.num_nibbles) - } - - /// Get the total number of nibbles stored. - pub fn num_nibbles(&self) -> usize { - self.num_nibbles - } - - /// Returns `true` if the nibbles contains no elements. - pub fn is_empty(&self) -> bool { - self.num_nibbles() == 0 - } - - /// Get the underlying bytes storing nibbles. - pub fn bytes(&self) -> &[u8] { - &self.bytes - } - - pub fn into_bytes(self) -> Vec { - self.bytes - } - - pub fn truncate(&mut self, len: usize) { - assert!(len <= self.num_nibbles); - self.num_nibbles = len; - self.bytes.truncate((len + 1) / 2); - if len % 2 != 0 { - *self.bytes.last_mut().expect("must exist.") &= 0xF0; - } - } -} - -pub trait Peekable: Iterator { - /// Returns the `next()` value without advancing the iterator. - fn peek(&self) -> Option; -} - -/// BitIterator iterates a nibble path by bit. -pub struct NibbleBitIterator<'a> { - nibble_path: &'a NibblePath, - pos: Range, -} - -impl<'a> Peekable for NibbleBitIterator<'a> { - /// Returns the `next()` value without advancing the iterator. - fn peek(&self) -> Option { - if self.pos.start < self.pos.end { - Some(self.nibble_path.get_bit(self.pos.start)) - } else { - None - } - } -} - -/// BitIterator spits out a boolean each time. True/false denotes 1/0. -impl<'a> Iterator for NibbleBitIterator<'a> { - type Item = bool; - - fn next(&mut self) -> Option { - self.pos.next().map(|i| self.nibble_path.get_bit(i)) - } -} - -/// Support iterating bits in reversed order. -impl<'a> DoubleEndedIterator for NibbleBitIterator<'a> { - fn next_back(&mut self) -> Option { - self.pos.next_back().map(|i| self.nibble_path.get_bit(i)) - } -} - -/// NibbleIterator iterates a nibble path by nibble. -#[derive(Debug)] -pub struct NibbleIterator<'a> { - /// The underlying nibble path that stores the nibbles - nibble_path: &'a NibblePath, - - /// The current index, `pos.start`, will bump by 1 after calling `next()` until `pos.start == - /// pos.end`. - pos: Range, - - /// The start index of the iterator. At the beginning, `pos.start == start`. [start, pos.end) - /// defines the range of `nibble_path` this iterator iterates over. `nibble_path` refers to - /// the entire underlying buffer but the range may only be partial. - start: usize, - // invariant self.start <= self.pos.start; - // invariant self.pos.start <= self.pos.end; -} - -/// NibbleIterator spits out a byte each time. Each byte must be in range [0, 16). -impl<'a> Iterator for NibbleIterator<'a> { - type Item = Nibble; - - fn next(&mut self) -> Option { - self.pos.next().map(|i| self.nibble_path.get_nibble(i)) - } -} - -impl<'a> Peekable for NibbleIterator<'a> { - /// Returns the `next()` value without advancing the iterator. - fn peek(&self) -> Option { - if self.pos.start < self.pos.end { - Some(self.nibble_path.get_nibble(self.pos.start)) - } else { - None - } - } -} - -impl<'a> NibbleIterator<'a> { - fn new(nibble_path: &'a NibblePath, start: usize, end: usize) -> Self { - assert!(start <= end); - Self { - nibble_path, - pos: (start..end), - start, - } - } - - /// Returns a nibble iterator that iterates all visited nibbles. - pub fn visited_nibbles(&self) -> NibbleIterator<'a> { - Self::new(self.nibble_path, self.start, self.pos.start) - } - - /// Returns a nibble iterator that iterates all remaining nibbles. - pub fn remaining_nibbles(&self) -> NibbleIterator<'a> { - Self::new(self.nibble_path, self.pos.start, self.pos.end) - } - - /// Turn it into a `BitIterator`. - pub fn bits(&self) -> NibbleBitIterator<'a> { - NibbleBitIterator { - nibble_path: self.nibble_path, - pos: (self.pos.start * 4..self.pos.end * 4), - } - } - - /// Cut and return the range of the underlying `nibble_path` that this iterator is iterating - /// over as a new `NibblePath` - pub fn get_nibble_path(&self) -> NibblePath { - self.visited_nibbles().chain(self.remaining_nibbles()).collect() - } - - /// Get the number of nibbles that this iterator covers. - pub fn num_nibbles(&self) -> usize { - assert!(self.start <= self.pos.end); // invariant - self.pos.end - self.start - } - - /// Return `true` if the iteration is over. - pub fn is_finished(&self) -> bool { - self.peek().is_none() - } -} - -// INITIAL-MODIFICATION: We will use this type (instead of `Hash`) to allow for arbitrary key length -/// A leaf key (i.e. a complete nibble path). -#[derive(Clone, Debug, Copy, Hash, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] -pub struct LeafKey { - /// The underlying bytes. - /// All leaf keys of the same tree must be of the same length - otherwise the tree's behavior - /// becomes unspecified. - /// All leaf keys must be evenly distributed across their space - otherwise the tree's - /// performance degrades. - /// TARI: always a hash, so replaced heap-allocated Vec with a Hash - #[serde(with = "serde_with::hex")] - pub bytes: Hash, -} - -impl LeafKey { - pub fn new(bytes: Hash) -> Self { - Self { bytes } - } - - pub fn as_ref(&self) -> LeafKeyRef<'_> { - LeafKeyRef::new(self.bytes.as_slice()) - } - - pub fn iter_bits(&self) -> BitIterator<'_> { - BitIterator::new(self.bytes.as_slice()) - } - - pub fn common_prefix_bits_len(&self, other: &LeafKey) -> usize { - self.iter_bits() - .zip(other.iter_bits()) - .take_while(|(x, y)| x == y) - .count() - } -} - -impl Display for LeafKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.bytes.fmt(f) - } -} - -// INITIAL-MODIFICATION: We will use this type (instead of `Hash`) to allow for arbitrary key length -/// A leaf key (i.e. a complete nibble path). -#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] -pub struct LeafKeyRef<'a> { - /// The underlying bytes. - /// All leaf keys of the same tree must be of the same length - otherwise the tree's behavior - /// becomes unspecified. - /// All leaf keys must be evenly distributed across their space - otherwise the tree's - /// performance degrades. - pub bytes: &'a [u8], -} - -impl<'a> LeafKeyRef<'a> { - pub fn new(bytes: &'a [u8]) -> Self { - Self { bytes } - } -} - -impl PartialEq for LeafKeyRef<'_> { - fn eq(&self, other: &LeafKey) -> bool { - self.bytes == other.bytes.as_slice() - } -} - -// SOURCE: https://github.com/aptos-labs/aptos-core/blob/1.0.4/storage/jellyfish-merkle/src/node_type/mod.rs#L48 -/// The unique key of each node. -#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] -pub struct NodeKey { - /// The version at which the node is created. - version: Version, - /// The nibble path this node represents in the tree. - nibble_path: NibblePath, -} - -impl NodeKey { - /// Creates a new `NodeKey`. - pub fn new(version: Version, nibble_path: NibblePath) -> Self { - Self { version, nibble_path } - } - - /// A shortcut to generate a node key consisting of a version and an empty nibble path. - pub fn new_empty_path(version: Version) -> Self { - Self::new(version, NibblePath::new_even(vec![])) - } - - /// Gets the version. - pub fn version(&self) -> Version { - self.version - } - - /// Gets the nibble path. - pub fn nibble_path(&self) -> &NibblePath { - &self.nibble_path - } - - /// Generates a child node key based on this node key. - pub fn gen_child_node_key(&self, version: Version, n: Nibble) -> Self { - let mut node_nibble_path = self.nibble_path().clone(); - node_nibble_path.push(n); - Self::new(version, node_nibble_path) - } - - /// Generates parent node key at the same version based on this node key. - pub fn gen_parent_node_key(&self) -> Self { - let mut node_nibble_path = self.nibble_path().clone(); - assert!(node_nibble_path.pop().is_some(), "Current node key is root.",); - Self::new(self.version, node_nibble_path) - } -} - -// INITIAL-MODIFICATION: just to show it in errors -impl fmt::Display for NodeKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "v{}:{}", self.version, self.nibble_path) - } -} - -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub enum NodeType { - Leaf, - Null, - /// A internal node that haven't been finished the leaf count migration, i.e. None or not all - /// of the children leaf counts are known. - Internal { - leaf_count: usize, - }, -} - -/// Each child of [`InternalNode`] encapsulates a nibble forking at this node. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct Child { - /// The hash value of this child node. - #[serde(with = "serde_with::hex")] - pub hash: Hash, - /// `version`, the `nibble_path` of the [`NodeKey`] of this [`InternalNode`] the child belongs - /// to and the child's index constitute the [`NodeKey`] to uniquely identify this child node - /// from the storage. Used by `[`NodeKey::gen_child_node_key`]. - pub version: Version, - /// Indicates if the child is a leaf, or if it's an internal node, the total number of leaves - /// under it (though it can be unknown during migration). - pub node_type: NodeType, -} - -impl Child { - pub fn new(hash: Hash, version: Version, node_type: NodeType) -> Self { - Self { - hash, - version, - node_type, - } - } - - pub fn is_leaf(&self) -> bool { - matches!(self.node_type, NodeType::Leaf) - } - - pub fn leaf_count(&self) -> usize { - match self.node_type { - NodeType::Leaf => 1, - NodeType::Internal { leaf_count } => leaf_count, - NodeType::Null => unreachable!("Child cannot be Null"), - } - } -} - -/// [`Children`] is just a collection of children belonging to a [`InternalNode`], indexed from 0 to -/// 15, inclusive. -pub(crate) type Children = IndexMap; - -/// Represents a 4-level subtree with 16 children at the bottom level. Theoretically, this reduces -/// IOPS to query a tree by 4x since we compress 4 levels in a standard Merkle tree into 1 node. -/// Though we choose the same internal node structure as that of Patricia Merkle tree, the root hash -/// computation logic is similar to a 4-level sparse Merkle tree except for some customizations. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct InternalNode { - /// Up to 16 children. - children: Children, - /// Total number of leaves under this internal node - leaf_count: usize, -} - -impl InternalNode { - /// Creates a new Internal node. - pub fn new(mut children: Children) -> Self { - children.sort_keys(); - let leaf_count = children.values().map(Child::leaf_count).sum(); - Self { children, leaf_count } - } - - pub fn leaf_count(&self) -> usize { - self.leaf_count - } - - pub fn node_type(&self) -> NodeType { - NodeType::Internal { - leaf_count: self.leaf_count, - } - } - - pub fn hash(&self) -> Hash { - self.merkle_hash( - 0, // start index - 16, // the number of leaves in the subtree of which we want the hash of root - self.generate_bitmaps(), - ) - } - - pub fn children_sorted(&self) -> impl Iterator { - // let mut tmp = self.children.iter().collect::>(); - // tmp.sort_by_key(|(nibble, _)| **nibble); - // tmp.into_iter() - self.children.iter() - } - - pub fn into_children(self) -> Children { - self.children - } - - /// Gets the `n`-th child. - pub fn child(&self, n: Nibble) -> Option<&Child> { - self.children.get(&n) - } - - /// Generates `existence_bitmap` and `leaf_bitmap` as a pair of `u16`s: child at index `i` - /// exists if `existence_bitmap[i]` is set; child at index `i` is leaf node if - /// `leaf_bitmap[i]` is set. - pub fn generate_bitmaps(&self) -> (u16, u16) { - let mut existence_bitmap = 0; - let mut leaf_bitmap = 0; - for (nibble, child) in &self.children { - let i = u8::from(*nibble); - existence_bitmap |= 1u16 << i; - if child.is_leaf() { - leaf_bitmap |= 1u16 << i; - } - } - // `leaf_bitmap` must be a subset of `existence_bitmap`. - assert_eq!(existence_bitmap | leaf_bitmap, existence_bitmap); - (existence_bitmap, leaf_bitmap) - } - - /// Given a range [start, start + width), returns the sub-bitmap of that range. - fn range_bitmaps(start: u8, width: u8, bitmaps: (u16, u16)) -> (u16, u16) { - assert!(start < 16 && width.count_ones() == 1 && start % width == 0); - assert!(width <= 16 && (start + width) <= 16); - // A range with `start == 8` and `width == 4` will generate a mask 0b0000111100000000. - // use as converting to smaller integer types when 'width == 16' - let mask = (((1u32 << width) - 1) << start) as u16; - (bitmaps.0 & mask, bitmaps.1 & mask) - } - - fn merkle_hash(&self, start: u8, width: u8, (existence_bitmap, leaf_bitmap): (u16, u16)) -> Hash { - // Given a bit [start, 1 << nibble_height], return the value of that range. - let (range_existence_bitmap, range_leaf_bitmap) = - Self::range_bitmaps(start, width, (existence_bitmap, leaf_bitmap)); - if range_existence_bitmap == 0 { - // No child under this subtree - SPARSE_MERKLE_PLACEHOLDER_HASH - } else if width == 1 || (range_existence_bitmap.count_ones() == 1 && range_leaf_bitmap != 0) { - // Only 1 leaf child under this subtree or reach the lowest level - let only_child_index = Nibble::from(range_existence_bitmap.trailing_zeros() as u8); - self.child(only_child_index) - .expect("Corrupted internal node: existence_bitmap inconsistent") - .hash - } else { - let left_child = self.merkle_hash(start, width / 2, (range_existence_bitmap, range_leaf_bitmap)); - let right_child = self.merkle_hash( - start + width / 2, - width / 2, - (range_existence_bitmap, range_leaf_bitmap), - ); - SparseMerkleInternalNode::new(left_child, right_child).hash() - } - } - - fn gen_node_in_proof>( - &self, - start: u8, - width: u8, - (existence_bitmap, leaf_bitmap): (u16, u16), - (tree_reader, node_key): (&R, &NodeKey), - ) -> Result { - // Given a bit [start, 1 << nibble_height], return the value of that range. - let (range_existence_bitmap, range_leaf_bitmap) = - Self::range_bitmaps(start, width, (existence_bitmap, leaf_bitmap)); - Ok(if range_existence_bitmap == 0 { - // No child under this subtree - NodeInProof::Other(SPARSE_MERKLE_PLACEHOLDER_HASH) - } else if width == 1 || (range_existence_bitmap.count_ones() == 1 && range_leaf_bitmap != 0) { - // Only 1 leaf child under this subtree or reach the lowest level - let only_child_index = Nibble::from(range_existence_bitmap.trailing_zeros() as u8); - let only_child = self - .child(only_child_index) - .expect("Corrupted internal node: existence_bitmap inconsistent"); - if matches!(only_child.node_type, NodeType::Leaf) { - let only_child_node_key = node_key.gen_child_node_key(only_child.version, only_child_index); - match tree_reader.get_node(&only_child_node_key)? { - Node::Internal(_) => { - unreachable!("Corrupted internal node: in-memory leaf child is internal node on disk") - }, - Node::Leaf(leaf_node) => NodeInProof::Leaf(SparseMerkleLeafNode::from(leaf_node)), - Node::Null => unreachable!("Child cannot be Null"), - } - } else { - NodeInProof::Other(only_child.hash) - } - } else { - let left_child = self.merkle_hash(start, width / 2, (range_existence_bitmap, range_leaf_bitmap)); - let right_child = self.merkle_hash( - start + width / 2, - width / 2, - (range_existence_bitmap, range_leaf_bitmap), - ); - NodeInProof::Other(SparseMerkleInternalNode::new(left_child, right_child).hash()) - }) - } - - /// Gets the child and its corresponding siblings that are necessary to generate the proof for - /// the `n`-th child. If it is an existence proof, the returned child must be the `n`-th - /// child; otherwise, the returned child may be another child. See inline explanation for - /// details. When calling this function with n = 11 (node `b` in the following graph), the - /// range at each level is illustrated as a pair of square brackets: - /// - /// ```text - /// 4 [f e d c b a 9 8 7 6 5 4 3 2 1 0] -> root level - /// --------------------------------------------------------------- - /// 3 [f e d c b a 9 8] [7 6 5 4 3 2 1 0] width = 8 - /// chs <--┘ shs <--┘ - /// 2 [f e d c] [b a 9 8] [7 6 5 4] [3 2 1 0] width = 4 - /// shs <--┘ └--> chs - /// 1 [f e] [d c] [b a] [9 8] [7 6] [5 4] [3 2] [1 0] width = 2 - /// chs <--┘ └--> shs - /// 0 [f] [e] [d] [c] [b] [a] [9] [8] [7] [6] [5] [4] [3] [2] [1] [0] width = 1 - /// ^ chs <--┘ └--> shs - /// | MSB|<---------------------- uint 16 ---------------------------->|LSB - /// height chs: `child_half_start` shs: `sibling_half_start` - /// ``` - pub fn get_child_with_siblings>( - &self, - node_key: &NodeKey, - n: Nibble, - reader: Option<&R>, - ) -> Result<(Option, Vec), JmtStorageError> { - let mut siblings = vec![]; - let (existence_bitmap, leaf_bitmap) = self.generate_bitmaps(); - - // Nibble height from 3 to 0. - for h in (0..4).rev() { - // Get the number of children of the internal node that each subtree at this height - // covers. - let width = 1 << h; - let (child_half_start, sibling_half_start) = get_child_and_sibling_half_start(n, h); - // Compute the root hash of the subtree rooted at the sibling of `r`. - if let Some(reader) = reader { - siblings.push(self.gen_node_in_proof( - sibling_half_start, - width, - (existence_bitmap, leaf_bitmap), - (reader, node_key), - )?); - } else { - siblings.push( - self.merkle_hash(sibling_half_start, width, (existence_bitmap, leaf_bitmap)) - .into(), - ); - } - - let (range_existence_bitmap, range_leaf_bitmap) = - Self::range_bitmaps(child_half_start, width, (existence_bitmap, leaf_bitmap)); - - if range_existence_bitmap == 0 { - // No child in this range. - return Ok((None, siblings)); - } - - if width == 1 || (range_existence_bitmap.count_ones() == 1 && range_leaf_bitmap != 0) { - // Return the only 1 leaf child under this subtree or reach the lowest level - // Even this leaf child is not the n-th child, it should be returned instead of - // `None` because it's existence indirectly proves the n-th child doesn't exist. - // Please read proof format for details. - let only_child_index = Nibble::from(range_existence_bitmap.trailing_zeros() as u8); - return Ok(( - { - let only_child_version = self - .child(only_child_index) - // Should be guaranteed by the self invariants, but these are not easy to express at the moment - .expect("Corrupted internal node: child_bitmap inconsistent") - .version; - Some(node_key.gen_child_node_key(only_child_version, only_child_index)) - }, - siblings, - )); - } - } - unreachable!("Impossible to get here without returning even at the lowest level.") - } -} - -/// Given a nibble, computes the start position of its `child_half_start` and `sibling_half_start` -/// at `height` level. -pub(crate) fn get_child_and_sibling_half_start(n: Nibble, height: u8) -> (u8, u8) { - // Get the index of the first child belonging to the same subtree whose root, let's say `r` is - // at `height` that the n-th child belongs to. - // Note: `child_half_start` will be always equal to `n` at height 0. - let child_half_start = (0xFF << height) & u8::from(n); - - // Get the index of the first child belonging to the subtree whose root is the sibling of `r` - // at `height`. - let sibling_half_start = child_half_start ^ (1 << height); - - (child_half_start, sibling_half_start) -} - -/// Leaf node, capturing the value hash and carrying an arbitrary payload. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct LeafNode

{ - // The key of this leaf node (i.e. its full nibble path). - leaf_key: LeafKey, - // The hash of an externally-stored value. - // Note: do not confuse that value with the `payload`. - #[serde(with = "serde_with::hex")] - value_hash: Hash, - // The client payload. - // This is not the "value" whose changes are tracked by the tree (in fact, these values are - // supposed to be stored externally, and the tree only cares about their hashes - see - // `value_hash`). - // Rather, the payload is an arbitrary piece of data that the client wishes to store within the - // tree, in order to facilitate some related processing: - // - Many clients do not need it and will simply use a no-cost `()`. - // - A use-case designed by the original authors was to store a non-hashed element key as a payload (while the - // `leaf_key` contains that key's hash, to ensure the nibble paths are distributed over their space, for - // performance). - // - Our current use-case (specific to a "two layers" tree) is to store the nested tree's root metadata. - payload: P, - // The version at which this leaf was created. - version: Version, -} - -impl

LeafNode

{ - /// Creates a new leaf node. - pub fn new(leaf_key: LeafKey, value_hash: Hash, payload: P, version: Version) -> Self { - Self { - leaf_key, - value_hash, - payload, - version, - } - } - - /// Gets the key. - pub fn leaf_key(&self) -> &LeafKey { - &self.leaf_key - } - - /// Gets the associated value hash. - pub fn value_hash(&self) -> Hash { - self.value_hash - } - - /// Gets the payload. - pub fn payload(&self) -> &P { - &self.payload - } - - /// Gets the version. - pub fn version(&self) -> Version { - self.version - } - - /// Gets the leaf's hash (not to be confused with a `value_hash()`). - /// This hash incorporates the node's key and the value's hash, in order to capture certain - /// changes within a sparse merkle tree (consider 2 trees, both containing a single element with - /// the same value, but stored under different keys - we want their root hashes to differ). - pub fn leaf_hash(&self) -> Hash { - jmt_node_hash2(self.leaf_key.bytes.as_slice(), self.value_hash.as_slice()) - } -} - -impl From> for SparseMerkleLeafNode { - fn from(leaf_node: LeafNode) -> Self { - Self::new(leaf_node.leaf_key, leaf_node.value_hash) - } -} - -/// The concrete node type of [`JellyfishMerkleTree`]. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub enum Node

{ - /// A wrapper of [`InternalNode`]. - Internal(InternalNode), - /// A wrapper of [`LeafNode`]. - Leaf(LeafNode

), - /// Represents empty tree only - Null, -} - -impl

From for Node

{ - fn from(node: InternalNode) -> Self { - Node::Internal(node) - } -} - -impl From> for Node

{ - fn from(node: LeafNode

) -> Self { - Node::Leaf(node) - } -} - -impl

Node

{ - // /// Creates the [`Internal`](Node::Internal) variant. - // #[cfg(any(test, feature = "fuzzing"))] - // pub fn new_internal(children: Children) -> Self { - // Node::Internal(InternalNode::new(children)) - // } - - /// Creates the [`Leaf`](Node::Leaf) variant. - pub fn new_leaf(leaf_key: LeafKey, value_hash: Hash, payload: P, version: Version) -> Self { - Node::Leaf(LeafNode::new(leaf_key, value_hash, payload, version)) - } - - /// Returns `true` if the node is a leaf node. - pub fn is_leaf(&self) -> bool { - matches!(self, Node::Leaf(_)) - } - - pub fn leaf(&self) -> Option<&LeafNode

> { - match self { - Node::Leaf(leaf) => Some(leaf), - _ => None, - } - } - - /// Returns `NodeType` - pub fn node_type(&self) -> NodeType { - match self { - // The returning value will be used to construct a `Child` of a internal node, while an - // internal node will never have a child of Node::Null. - Self::Leaf(_) => NodeType::Leaf, - Self::Internal(n) => n.node_type(), - Self::Null => NodeType::Null, - } - } - - /// Returns leaf count if known - pub fn leaf_count(&self) -> usize { - match self { - Node::Leaf(_) => 1, - Node::Internal(internal_node) => internal_node.leaf_count, - Node::Null => 0, - } - } - - /// Computes the hash of nodes. - pub fn hash(&self) -> Hash { - match self { - Node::Internal(internal_node) => internal_node.hash(), - Node::Leaf(leaf_node) => leaf_node.leaf_hash(), - Node::Null => SPARSE_MERKLE_PLACEHOLDER_HASH, - } - } -} - -// INITIAL-MODIFICATION: we propagate usage of our own error enum (instead of `std::io::ErrorKind` -// used by Aptos) to allow for no-std build. -/// Error originating from underlying storage failure / inconsistency. -#[derive(Debug, thiserror::Error)] -pub enum JmtStorageError { - #[error("A node {0} expected to exist (according to JMT logic) was not found in the storage")] - NotFound(NodeKey), - - #[error("Nodes read from the storage are violating some JMT property (e.g. form a cycle).")] - InconsistentState, - - #[error("Unexpected error: {0}")] - UnexpectedError(String), - - #[error("Attempted to insert node {0} that already exists")] - Conflict(NodeKey), -} - -impl IsNotFoundError for JmtStorageError { - fn is_not_found_error(&self) -> bool { - matches!(self, JmtStorageError::NotFound(_)) - } -} diff --git a/dan_layer/state_tree/src/key_mapper.rs b/dan_layer/state_tree/src/key_mapper.rs index 71af284e5..d6ce1ee2c 100644 --- a/dan_layer/state_tree/src/key_mapper.rs +++ b/dan_layer/state_tree/src/key_mapper.rs @@ -2,8 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause use tari_dan_common_types::VersionedSubstateId; - -use crate::{jellyfish::LeafKey, Hash}; +use tari_jellyfish::{jmt_node_hash, LeafKey, TreeHash}; pub trait DbKeyMapper { fn map_to_leaf_key(id: &T) -> LeafKey; @@ -13,15 +12,15 @@ pub struct SpreadPrefixKeyMapper; impl DbKeyMapper for SpreadPrefixKeyMapper { fn map_to_leaf_key(id: &VersionedSubstateId) -> LeafKey { - let hash = crate::jellyfish::jmt_node_hash(id); + let hash = jmt_node_hash(id); LeafKey::new(hash) } } pub struct HashIdentityKeyMapper; -impl DbKeyMapper for HashIdentityKeyMapper { - fn map_to_leaf_key(hash: &Hash) -> LeafKey { +impl DbKeyMapper for HashIdentityKeyMapper { + fn map_to_leaf_key(hash: &TreeHash) -> LeafKey { LeafKey::new(*hash) } } diff --git a/dan_layer/state_tree/src/lib.rs b/dan_layer/state_tree/src/lib.rs index ce3157a2c..a6339a624 100644 --- a/dan_layer/state_tree/src/lib.rs +++ b/dan_layer/state_tree/src/lib.rs @@ -1,18 +1,16 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause +pub use tari_jellyfish::*; + mod error; pub use error::*; -mod jellyfish; -pub use jellyfish::*; pub mod key_mapper; pub mod memory_store; mod staged_store; pub use staged_store::*; -mod bit_iter; mod tree; - pub use tree::*; diff --git a/dan_layer/state_tree/src/memory_store.rs b/dan_layer/state_tree/src/memory_store.rs index 367cd3a08..6c451feac 100644 --- a/dan_layer/state_tree/src/memory_store.rs +++ b/dan_layer/state_tree/src/memory_store.rs @@ -3,7 +3,7 @@ use std::{collections::HashMap, fmt, fmt::Debug}; -use crate::jellyfish::{JmtStorageError, Node, NodeKey, StaleTreeNode, TreeNode, TreeStoreReader, TreeStoreWriter}; +use tari_jellyfish::{JmtStorageError, Node, NodeKey, StaleTreeNode, TreeNode, TreeStoreReader, TreeStoreWriter}; #[derive(Debug, Default)] pub struct MemoryTreeStore

{ diff --git a/dan_layer/state_tree/src/staged_store.rs b/dan_layer/state_tree/src/staged_store.rs index addcfd4c6..4829994c8 100644 --- a/dan_layer/state_tree/src/staged_store.rs +++ b/dan_layer/state_tree/src/staged_store.rs @@ -5,8 +5,9 @@ use std::collections::{HashMap, VecDeque}; use log::debug; use tari_dan_common_types::option::DisplayContainer; +use tari_jellyfish::{JmtStorageError, Node, NodeKey, StaleTreeNode, TreeStoreReader, TreeStoreWriter}; -use crate::{JmtStorageError, Node, NodeKey, StaleTreeNode, StateHashTreeDiff, TreeStoreReader, TreeStoreWriter}; +use crate::StateHashTreeDiff; const LOG_TARGET: &str = "tari::dan::consensus::sharded_state_tree"; diff --git a/dan_layer/state_tree/src/tree.rs b/dan_layer/state_tree/src/tree.rs index 92421b087..fb3250d7e 100644 --- a/dan_layer/state_tree/src/tree.rs +++ b/dan_layer/state_tree/src/tree.rs @@ -4,20 +4,27 @@ use std::{iter::Peekable, marker::PhantomData}; use serde::{Deserialize, Serialize}; +use tari_common_types::types::FixedHash; use tari_dan_common_types::VersionedSubstateId; - -use crate::{ - error::StateTreeError, - jellyfish::{Hash, JellyfishMerkleTree, SparseMerkleProofExt, TreeStore, Version}, - key_mapper::{DbKeyMapper, HashIdentityKeyMapper, SpreadPrefixKeyMapper}, - memory_store::MemoryTreeStore, +use tari_jellyfish::{ + JellyfishMerkleTree, LeafKey, Node, NodeKey, ProofValue, + SparseMerkleProofExt, StaleTreeNode, + TreeHash, + TreeStore, TreeStoreReader, TreeUpdateBatch, + Version, +}; + +use crate::{ + error::StateTreeError, + key_mapper::{DbKeyMapper, HashIdentityKeyMapper, SpreadPrefixKeyMapper}, + memory_store::MemoryTreeStore, SPARSE_MERKLE_PLACEHOLDER_HASH, }; @@ -44,15 +51,15 @@ impl<'a, S: TreeStoreReader, M: DbKeyMapper> State version: Version, key: &VersionedSubstateId, ) -> Result<(LeafKey, Option>, SparseMerkleProofExt), StateTreeError> { - let smt = JellyfishMerkleTree::new(self.store); + let jmt = JellyfishMerkleTree::new(self.store); let key = M::map_to_leaf_key(key); - let (maybe_value, proof) = smt.get_with_proof_ext(key.as_ref(), version)?; + let (maybe_value, proof) = jmt.get_with_proof_ext(key.as_ref(), version)?; Ok((key, maybe_value, proof)) } - pub fn get_root_hash(&self, version: Version) -> Result { - let smt = JellyfishMerkleTree::new(self.store); - let root_hash = smt.get_root_hash(version)?; + pub fn get_root_hash(&self, version: Version) -> Result { + let jmt = JellyfishMerkleTree::new(self.store); + let root_hash = jmt.get_root_hash(version)?; Ok(root_hash) } } @@ -63,7 +70,7 @@ impl<'a, S: TreeStore, M: DbKeyMapper> StateTree<' current_version: Option, next_version: Version, changes: I, - ) -> Result<(Hash, StateHashTreeDiff), StateTreeError> { + ) -> Result<(TreeHash, StateHashTreeDiff), StateTreeError> { let (root_hash, update_batch) = calculate_substate_changes::<_, M, _>(self.store, current_version, next_version, changes)?; Ok((root_hash, update_batch.into())) @@ -75,7 +82,7 @@ impl<'a, S: TreeStore, M: DbKeyMapper> StateTree<' current_version: Option, next_version: Version, changes: I, - ) -> Result { + ) -> Result { let (root_hash, update_batch) = self.calculate_substate_changes(current_version, next_version, changes)?; self.commit_diff(update_batch)?; Ok(root_hash) @@ -96,13 +103,13 @@ impl<'a, S: TreeStore, M: DbKeyMapper> StateTree<' } } -impl<'a, S: TreeStore<()>, M: DbKeyMapper> StateTree<'a, S, M> { - pub fn put_changes>( +impl<'a, S: TreeStore<()>, M: DbKeyMapper> StateTree<'a, S, M> { + pub fn put_changes>( &mut self, current_version: Option, next_version: Version, changes: I, - ) -> Result { + ) -> Result { let (root_hash, update_result) = self.compute_update_batch(current_version, next_version, changes)?; for (k, node) in update_result.node_batch { @@ -117,12 +124,12 @@ impl<'a, S: TreeStore<()>, M: DbKeyMapper> StateTree<'a, S, M> { Ok(root_hash) } - pub fn compute_update_batch>( + pub fn compute_update_batch>( &mut self, current_version: Option, next_version: Version, changes: I, - ) -> Result<(Hash, TreeUpdateBatch<()>), StateTreeError> { + ) -> Result<(TreeHash, TreeUpdateBatch<()>), StateTreeError> { let jmt = JellyfishMerkleTree::<_, ()>::new(self.store); let changes = changes @@ -144,11 +151,14 @@ fn calculate_substate_changes< current_version: Option, next_version: Version, changes: I, -) -> Result<(Hash, TreeUpdateBatch), StateTreeError> { +) -> Result<(TreeHash, TreeUpdateBatch), StateTreeError> { let jmt = JellyfishMerkleTree::new(store); let changes = changes.into_iter().map(|ch| match ch { - SubstateTreeChange::Up { id, value_hash } => (M::map_to_leaf_key(&id), Some((value_hash, next_version))), + SubstateTreeChange::Up { id, value_hash } => ( + M::map_to_leaf_key(&id), + Some((TreeHash::new(value_hash.into_array()), next_version)), + ), SubstateTreeChange::Down { id } => (M::map_to_leaf_key(&id), None), }); @@ -158,8 +168,13 @@ fn calculate_substate_changes< } pub enum SubstateTreeChange { - Up { id: VersionedSubstateId, value_hash: Hash }, - Down { id: VersionedSubstateId }, + Up { + id: VersionedSubstateId, + value_hash: FixedHash, + }, + Down { + id: VersionedSubstateId, + }, } impl SubstateTreeChange { @@ -199,9 +214,9 @@ impl

From> for StateHashTreeDiff

{ } } -pub fn compute_merkle_root_for_hashes>( +pub fn compute_merkle_root_for_hashes>( mut hashes: Peekable, -) -> Result { +) -> Result { if hashes.peek().is_none() { return Ok(SPARSE_MERKLE_PLACEHOLDER_HASH); } @@ -210,3 +225,18 @@ pub fn compute_merkle_root_for_hashes>( let (hash, _) = root_tree.compute_update_batch(None, 1, hashes)?; Ok(hash) } + +/// Computes a Merkle proof for the given hash is either included in the provided the hashes, or proof of absence. +/// Returns the value (if it exists) and the Merkle proof. +pub fn compute_proof_for_hashes>( + hashes: I, + hash_to_prove: TreeHash, +) -> Result<(Option>, SparseMerkleProofExt), StateTreeError> { + let mut mem_store = MemoryTreeStore::new(); + let mut root_tree = RootStateTree::new(&mut mem_store); + root_tree.put_changes(None, 1, hashes)?; + let jmt = JellyfishMerkleTree::new(&mem_store); + let key = HashIdentityKeyMapper::map_to_leaf_key(&hash_to_prove); + let proof_tuple = jmt.get_with_proof_ext(key.as_ref(), 1)?; + Ok(proof_tuple) +} diff --git a/dan_layer/state_tree/tests/support.rs b/dan_layer/state_tree/tests/support.rs index 2fae20320..83df18b13 100644 --- a/dan_layer/state_tree/tests/support.rs +++ b/dan_layer/state_tree/tests/support.rs @@ -1,18 +1,11 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause +use tari_common_types::types::FixedHash; use tari_dan_common_types::VersionedSubstateId; use tari_engine_types::{hashing::substate_value_hasher32, substate::SubstateId}; -use tari_state_tree::{ - key_mapper::DbKeyMapper, - memory_store::MemoryTreeStore, - Hash, - LeafKey, - StateTree, - SubstateTreeChange, - TreeStore, - Version, -}; +use tari_jellyfish::{LeafKey, TreeHash, TreeStore, Version}; +use tari_state_tree::{key_mapper::DbKeyMapper, memory_store::MemoryTreeStore, StateTree, SubstateTreeChange}; use tari_template_lib::models::{ComponentAddress, ObjectKey}; pub fn make_value(seed: u8) -> VersionedSubstateId { @@ -26,11 +19,11 @@ pub fn change(substate_id_seed: u8, value_seed: Option) -> SubstateTreeChang change_exact(make_value(substate_id_seed), value_seed.map(from_seed)) } -fn hash_value(value: &[u8]) -> Hash { +fn hash_value(value: &[u8]) -> TreeHash { substate_value_hasher32().chain(value).result().into_array().into() } -pub fn hash_value_from_seed(seed: u8) -> Hash { +pub fn hash_value_from_seed(seed: u8) -> TreeHash { hash_value(&from_seed(seed)) } @@ -38,7 +31,7 @@ pub fn change_exact(substate_id: VersionedSubstateId, value: Option>) -> value .map(|value| SubstateTreeChange::Up { id: substate_id.clone(), - value_hash: hash_value(&value), + value_hash: FixedHash::new(hash_value(&value).into_array()), }) .unwrap_or_else(|| SubstateTreeChange::Down { id: substate_id }) } @@ -60,11 +53,11 @@ impl> HashTreeTester { } } - pub fn put_substate_changes(&mut self, changes: impl IntoIterator) -> Hash { + pub fn put_substate_changes(&mut self, changes: impl IntoIterator) -> TreeHash { self.apply_database_updates(changes) } - fn apply_database_updates(&mut self, changes: impl IntoIterator) -> Hash { + fn apply_database_updates(&mut self, changes: impl IntoIterator) -> TreeHash { let next_version = self.current_version.unwrap_or(0) + 1; let current_version = self.current_version.replace(next_version); self.put_changes_at_version(current_version, next_version, changes) @@ -79,7 +72,7 @@ impl> HashTreeTester { current_version: Option, next_version: Version, changes: impl IntoIterator, - ) -> Hash { + ) -> TreeHash { self.create_state_tree() .put_substate_changes(current_version, next_version, changes) .unwrap() diff --git a/dan_layer/state_tree/tests/test.rs b/dan_layer/state_tree/tests/test.rs index 4e6f1881a..59b9f06b1 100644 --- a/dan_layer/state_tree/tests/test.rs +++ b/dan_layer/state_tree/tests/test.rs @@ -2,10 +2,10 @@ // SPDX-License-Identifier: BSD-3-Clause // Adapted from https://github.com/radixdlt/radixdlt-scrypto/blob/868ba44ec3b806992864af27c706968c797eb961/radix-engine-stores/src/hash_tree/test.rs -use std::collections::HashSet; +use std::collections::{BTreeSet, HashSet}; -use itertools::Itertools; -use tari_state_tree::{memory_store::MemoryTreeStore, StaleTreeNode, Version, SPARSE_MERKLE_PLACEHOLDER_HASH}; +use tari_jellyfish::{StaleTreeNode, Version, SPARSE_MERKLE_PLACEHOLDER_HASH}; +use tari_state_tree::memory_store::MemoryTreeStore; use crate::support::{change, hash_value_from_seed, make_value, HashTreeTester}; mod support; @@ -160,10 +160,8 @@ fn records_stale_tree_node_keys() { }; key.version() }) - .unique() - .sorted() - .collect::>(); - assert_eq!(stale_versions, vec![1, 2]); + .collect::>(); + assert_eq!(stale_versions.into_iter().collect::>(), vec![1, 2]); } #[test] diff --git a/dan_layer/storage/src/consensus_models/block.rs b/dan_layer/storage/src/consensus_models/block.rs index 55ed7a4b5..5e3164f06 100644 --- a/dan_layer/storage/src/consensus_models/block.rs +++ b/dan_layer/storage/src/consensus_models/block.rs @@ -28,7 +28,7 @@ use tari_dan_common_types::{ ShardGroup, SubstateAddress, }; -use tari_state_tree::StateTreeError; +use tari_state_tree::{compute_proof_for_hashes, SparseMerkleProofExt, StateTreeError, TreeHash}; use tari_transaction::TransactionId; use time::PrimitiveDateTime; #[cfg(feature = "ts")] @@ -78,6 +78,8 @@ const LOG_TARGET: &str = "tari::dan::storage::consensus_models::block"; pub enum BlockError { #[error("Error computing command merkle hash: {0}")] StateTreeError(#[from] StateTreeError), + #[error("Merke proof generation command index out of bounds: {index}/{len}")] + MerkleProofGenerationCommandIndexOutOfBounds { index: usize, len: usize }, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -311,7 +313,7 @@ impl Block { self.commands.iter().filter_map(|c| c.foreign_proposal()) } - pub fn all_evict_nodes(&self) -> impl Iterator + '_ { + pub fn all_node_evictions(&self) -> impl Iterator + '_ { self.commands.iter().filter_map(|c| c.evict_node()) } @@ -458,6 +460,22 @@ impl Block { pub fn extra_data(&self) -> &ExtraData { self.header.extra_data() } + + pub fn compute_command_inclusion_proof(&self, command_index: usize) -> Result { + let hashes = self.commands.iter().map(|cmd| TreeHash::from(cmd.hash().into_array())); + let command = self.commands.iter().nth(command_index).ok_or( + BlockError::MerkleProofGenerationCommandIndexOutOfBounds { + index: command_index, + len: self.commands.len(), + }, + )?; + let hash = TreeHash::new(command.hash().into_array()); + let (value, proof) = compute_proof_for_hashes(hashes, hash)?; + value.expect( + "Value not found in proof. This is a bug because the hash is taken from commands that generate the tree", + ); + Ok(proof) + } } impl Block { diff --git a/dan_layer/storage/src/consensus_models/block_header.rs b/dan_layer/storage/src/consensus_models/block_header.rs index d4215500b..dade63289 100644 --- a/dan_layer/storage/src/consensus_models/block_header.rs +++ b/dan_layer/storage/src/consensus_models/block_header.rs @@ -11,7 +11,7 @@ use tari_common::configuration::Network; use tari_common_types::types::{FixedHash, PublicKey}; use tari_crypto::tari_utilities::epoch_time::EpochTime; use tari_dan_common_types::{hashing, shard::Shard, Epoch, ExtraData, NodeHeight, NumPreshards, ShardGroup}; -use tari_state_tree::compute_merkle_root_for_hashes; +use tari_state_tree::{compute_merkle_root_for_hashes, TreeHash}; #[cfg(feature = "ts")] use ts_rs::TS; @@ -102,11 +102,6 @@ impl BlockHeader { Ok(header) } - pub fn compute_command_merkle_root(commands: &BTreeSet) -> Result { - let hashes = commands.iter().map(|cmd| cmd.hash()).peekable(); - compute_merkle_root_for_hashes(hashes).map_err(BlockError::StateTreeError) - } - #[allow(clippy::too_many_arguments)] pub fn load( id: BlockId, @@ -406,6 +401,15 @@ impl BlockHeader { pub fn extra_data(&self) -> &ExtraData { &self.extra_data } + + pub fn compute_command_merkle_root(commands: &BTreeSet) -> Result { + let hashes = commands + .iter() + .map(|cmd| TreeHash::from(cmd.hash().into_array())) + .peekable(); + let hash = compute_merkle_root_for_hashes(hashes).map_err(BlockError::StateTreeError)?; + Ok(FixedHash::from(hash.into_array())) + } } impl Display for BlockHeader { diff --git a/dan_layer/storage/src/consensus_models/epoch_checkpoint.rs b/dan_layer/storage/src/consensus_models/epoch_checkpoint.rs index 5c5f5e887..c07a844ea 100644 --- a/dan_layer/storage/src/consensus_models/epoch_checkpoint.rs +++ b/dan_layer/storage/src/consensus_models/epoch_checkpoint.rs @@ -5,7 +5,7 @@ use std::fmt::Display; use indexmap::IndexMap; use tari_dan_common_types::{shard::Shard, Epoch}; -use tari_state_tree::{compute_merkle_root_for_hashes, Hash, StateTreeError, SPARSE_MERKLE_PLACEHOLDER_HASH}; +use tari_state_tree::{compute_merkle_root_for_hashes, StateTreeError, TreeHash, SPARSE_MERKLE_PLACEHOLDER_HASH}; use crate::{ consensus_models::{Block, QuorumCertificate}, @@ -18,11 +18,11 @@ use crate::{ pub struct EpochCheckpoint { block: Block, linked_qcs: Vec, - shard_roots: IndexMap, + shard_roots: IndexMap, } impl EpochCheckpoint { - pub fn new(block: Block, linked_qcs: Vec, shard_roots: IndexMap) -> Self { + pub fn new(block: Block, linked_qcs: Vec, shard_roots: IndexMap) -> Self { Self { block, linked_qcs, @@ -38,18 +38,18 @@ impl EpochCheckpoint { &self.block } - pub fn shard_roots(&self) -> &IndexMap { + pub fn shard_roots(&self) -> &IndexMap { &self.shard_roots } - pub fn get_shard_root(&self, shard: Shard) -> Hash { + pub fn get_shard_root(&self, shard: Shard) -> TreeHash { self.shard_roots .get(&shard) .copied() .unwrap_or(SPARSE_MERKLE_PLACEHOLDER_HASH) } - pub fn compute_state_merkle_root(&self) -> Result { + pub fn compute_state_merkle_root(&self) -> Result { let shard_group = self.block().shard_group(); let hashes = shard_group .shard_iter() diff --git a/dan_layer/template_abi/src/types.rs b/dan_layer/template_abi/src/types.rs index bd737b51d..64791dac2 100644 --- a/dan_layer/template_abi/src/types.rs +++ b/dan_layer/template_abi/src/types.rs @@ -21,13 +21,15 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use serde::{Deserialize, Serialize}; -#[cfg(feature = "ts")] -use ts_rs::TS; use crate::rust::{boxed::Box, string::String, vec::Vec}; #[derive(Debug, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub enum TemplateDef { V1(TemplateDefV1), } @@ -59,7 +61,11 @@ impl TemplateDef { } #[derive(Debug, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub struct TemplateDefV1 { pub template_name: String, pub tari_version: String, @@ -73,7 +79,11 @@ impl TemplateDefV1 { } #[derive(Debug, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub struct FunctionDef { pub name: String, pub arguments: Vec, @@ -82,14 +92,22 @@ pub struct FunctionDef { } #[derive(Debug, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub struct ArgDef { pub name: String, pub arg_type: Type, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub enum Type { #[default] Unit, diff --git a/dan_layer/template_lib/Cargo.toml b/dan_layer/template_lib/Cargo.toml index a02f3c5ce..5d6629b93 100644 --- a/dan_layer/template_lib/Cargo.toml +++ b/dan_layer/template_lib/Cargo.toml @@ -19,7 +19,7 @@ serde = { workspace = true, default-features = false, features = [ ] } serde_with = { workspace = true } ts-rs = { workspace = true, optional = true } - +borsh = { workspace = true, optional = true } [dev-dependencies] serde_json = { workspace = true } @@ -29,3 +29,4 @@ default = ["macro", "std"] macro = ["tari_template_macros"] std = ["serde/std", "tari_bor/std"] ts = ["ts-rs"] +borsh = ["dep:borsh"] diff --git a/dan_layer/template_lib/src/models/component.rs b/dan_layer/template_lib/src/models/component.rs index 0abea1ebb..db46c7880 100644 --- a/dan_layer/template_lib/src/models/component.rs +++ b/dan_layer/template_lib/src/models/component.rs @@ -105,3 +105,10 @@ impl AsRef<[u8]> for ComponentAddress { } newtype_struct_serde_impl!(ComponentAddress, BorTag); + +#[cfg(feature = "borsh")] +impl borsh::BorshSerialize for ComponentAddress { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + self.as_object_key().array().serialize(writer) + } +} diff --git a/dan_layer/template_lib/src/models/entity_id.rs b/dan_layer/template_lib/src/models/entity_id.rs index b9b11b059..b275a9145 100644 --- a/dan_layer/template_lib/src/models/entity_id.rs +++ b/dan_layer/template_lib/src/models/entity_id.rs @@ -159,6 +159,10 @@ impl ObjectKey { self.0 } + pub fn array(&self) -> &[u8; Self::LENGTH] { + &self.0 + } + pub fn from_hex(s: &str) -> Result { from_hex(s).map(Self::from_array) } diff --git a/dan_layer/template_lib/src/models/layer_one_commitment.rs b/dan_layer/template_lib/src/models/layer_one_commitment.rs index a63310cab..8b7c83b3b 100644 --- a/dan_layer/template_lib/src/models/layer_one_commitment.rs +++ b/dan_layer/template_lib/src/models/layer_one_commitment.rs @@ -74,3 +74,9 @@ impl FromStr for UnclaimedConfidentialOutputAddress { Self::from_hex(s) } } +#[cfg(feature = "borsh")] +impl borsh::BorshSerialize for UnclaimedConfidentialOutputAddress { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + borsh::BorshSerialize::serialize(self.as_object_key().array(), writer) + } +} diff --git a/dan_layer/template_lib/src/models/non_fungible.rs b/dan_layer/template_lib/src/models/non_fungible.rs index 962c0305b..dbe460aab 100644 --- a/dan_layer/template_lib/src/models/non_fungible.rs +++ b/dan_layer/template_lib/src/models/non_fungible.rs @@ -28,6 +28,7 @@ use crate::{ derive(ts_rs::TS), ts(export, export_to = "../../bindings/src/types/") )] +#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))] pub enum NonFungibleId { U256(#[serde_as(as = "serde_with::Bytes")] [u8; 32]), String(String), @@ -210,6 +211,13 @@ const TAG: u64 = BinaryTag::NonFungibleAddress.as_u64(); )] pub struct NonFungibleAddress(#[cfg_attr(feature = "ts", ts(type = "string"))] BorTag); +#[cfg(feature = "borsh")] +impl borsh::BorshSerialize for NonFungibleAddress { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + borsh::BorshSerialize::serialize(self.0.inner(), writer) + } +} + /// Data used to build a `NonFungibleAddress` #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, PartialOrd, Ord)] #[cfg_attr( @@ -217,6 +225,7 @@ pub struct NonFungibleAddress(#[cfg_attr(feature = "ts", ts(type = "string"))] B derive(ts_rs::TS), ts(export, export_to = "../../bindings/src/types/") )] +#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))] pub struct NonFungibleAddressContents { resource_address: ResourceAddress, id: NonFungibleId, diff --git a/dan_layer/template_lib/src/models/non_fungible_index.rs b/dan_layer/template_lib/src/models/non_fungible_index.rs index bd079d493..c440a4e75 100644 --- a/dan_layer/template_lib/src/models/non_fungible_index.rs +++ b/dan_layer/template_lib/src/models/non_fungible_index.rs @@ -34,6 +34,7 @@ use super::ResourceAddress; derive(ts_rs::TS), ts(export, export_to = "../../bindings/src/types/") )] +#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))] pub struct NonFungibleIndexAddress { resource_address: ResourceAddress, #[cfg_attr(feature = "ts", ts(type = "number"))] diff --git a/dan_layer/template_lib/src/models/resource.rs b/dan_layer/template_lib/src/models/resource.rs index a3c229cbf..d3f15d197 100644 --- a/dan_layer/template_lib/src/models/resource.rs +++ b/dan_layer/template_lib/src/models/resource.rs @@ -104,6 +104,13 @@ impl TryFrom<&[u8]> for ResourceAddress { newtype_struct_serde_impl!(ResourceAddress, BorTag); +#[cfg(feature = "borsh")] +impl borsh::BorshSerialize for ResourceAddress { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + borsh::BorshSerialize::serialize(self.as_object_key().array(), writer) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/dan_layer/template_lib/src/models/vault.rs b/dan_layer/template_lib/src/models/vault.rs index 3884888b9..01dcfe940 100644 --- a/dan_layer/template_lib/src/models/vault.rs +++ b/dan_layer/template_lib/src/models/vault.rs @@ -117,6 +117,13 @@ impl TryFrom<&[u8]> for VaultId { newtype_struct_serde_impl!(VaultId, BorTag); +#[cfg(feature = "borsh")] +impl borsh::BorshSerialize for VaultId { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + borsh::BorshSerialize::serialize(self.as_object_key().array(), writer) + } +} + /// Encapsulates all the ways that a vault can be referenced #[derive(Clone, Debug, Serialize, Deserialize)] pub enum VaultRef { diff --git a/integration_tests/tests/features/eviction.feature b/integration_tests/tests/features/eviction.feature index 168cf6db5..e6ff5dd0a 100644 --- a/integration_tests/tests/features/eviction.feature +++ b/integration_tests/tests/features/eviction.feature @@ -5,7 +5,6 @@ @eviction Feature: Eviction scenarios - @flaky Scenario: Offline validator gets evicted # Initialize a base node, wallet, miner and several VNs Given a base node BASE