diff --git a/src/accumulator/proof.rs b/src/accumulator/proof.rs index 34e6525..05f85bd 100644 --- a/src/accumulator/proof.rs +++ b/src/accumulator/proof.rs @@ -1,6 +1,8 @@ use crate::accumulator::{stump::Stump, types, util}; use bitcoin_hashes::sha256; +use super::{stump::UpdateData, util::get_proof_positions}; + #[derive(Debug, Default)] /// A proof is a collection of hashes and positions. Each target position /// points to a leaf to be proven. Hashes are all @@ -23,7 +25,7 @@ pub struct Proof { /// would be [05] as you need 04 and 05 to hash to 06. 04 can be calculated /// by hashing 00 and 01. ///```! - /// // 06 + /// // 06 /// // |-------\ /// // 04 05 /// // |---\ |---\ @@ -58,13 +60,13 @@ impl Proof { /// use bitcoin_hashes::{sha256, Hash, HashEngine}; /// use rustreexo::accumulator::{proof::Proof}; /// let targets = vec![0]; - /// + /// /// let mut proof_hashes = Vec::new(); /// let targets = vec![0]; /// // For proving 0, we need 01, 09 and 13's hashes. 00, 08, 12 and 14 can be calculated /// //Fill `proof_hashes` up with all hashes /// Proof::new(targets, proof_hashes); - /// ``` + /// ``` pub fn new(targets: Vec, hashes: Vec) -> Self { Proof { targets: targets, @@ -84,7 +86,7 @@ impl Proof { /// let test_values:Vec = vec![0, 1, 2, 3, 4, 5, 6, 7]; /// // Targets are nodes witch we intend to prove /// let targets = vec![0]; - /// + /// /// let mut proof_hashes = Vec::new(); /// // This tree will look like this /// // 14 @@ -148,7 +150,7 @@ impl Proof { /// those targets are deleted. In this context null means [sha256::Hash::default]. /// /// It's the caller's responsibility to null out the targets if desired by - /// passing a `bitcoin_hashes::sha256::Hash::default()` instead of the actual hash. + /// passing a [Hash::default()](`bitcoin_hashes::sha256::Hash::default()`) instead of the actual hash. pub(crate) fn calculate_hashes( &self, del_hashes: &Vec, @@ -241,6 +243,293 @@ impl Proof { Ok((nodes, calculated_root_hashes)) } + /// Uses the data passed in to update a proof, creating a valid proof for a given + /// set of targets, after an update. This is useful for caching UTXOs. You grab a proof + /// for it once and then keep updating it every block, yielding an always valid proof + /// over those UTXOs. + pub fn update( + self, + cached_hashes: Vec, + add_hashes: Vec, + block_targets: Vec, + remembers: Vec, + update_data: UpdateData, + ) -> Result<(Proof, Vec), String> { + let (proof_after_deletion, cached_hashes) = self.update_proof_remove( + block_targets, + cached_hashes.clone(), + update_data.new_del, + update_data.prev_num_leaves, + )?; + + let data_after_addition = proof_after_deletion.update_proof_add( + add_hashes, + cached_hashes.clone(), + remembers, + update_data.new_add, + update_data.prev_num_leaves, + update_data.to_destroy, + )?; + + Ok(data_after_addition) + } + fn update_proof_add( + self, + adds: Vec, + cached_del_hashes: Vec, + remembers: Vec, + new_nodes: Vec<(u64, sha256::Hash)>, + before_num_leaves: u64, + to_destroy: Vec, + ) -> Result<(Proof, Vec), String> { + // Combine the hashes with the targets. + let orig_targets_with_hash: Vec<(u64, sha256::Hash)> = self + .targets + .to_owned() + .into_iter() + .zip(cached_del_hashes.into_iter()) + .collect(); + + // Attach positions to the proof. + let proof_pos = get_proof_positions( + &self.targets, + before_num_leaves, + util::tree_rows(before_num_leaves), + ); + let proof_with_pos = proof_pos + .clone() + .into_iter() + .zip(self.hashes.clone()) + .collect(); + + // Remap the positions if we moved up a after the addition row. + let targets_after_remap = Proof::maybe_remap( + before_num_leaves, + adds.len() as u64, + orig_targets_with_hash.clone(), + ); + let mut final_targets = targets_after_remap.clone(); + let mut new_nodes_iter = new_nodes.iter(); + let proof_with_pos = + Proof::maybe_remap(before_num_leaves, adds.len() as u64, proof_with_pos); + // remembers is an index telling what newly created UTXO should be cached + for remember in remembers { + let remember_target: Option<&sha256::Hash> = adds.get(remember as usize); + if let Some(remember_target) = remember_target { + let node = new_nodes_iter.find(|(_, hash)| *hash == *remember_target); + if let Some((pos, hash)) = node { + final_targets.push((*pos, *hash)); + } + } + } + + final_targets.sort(); + + let num_leaves = before_num_leaves + (adds.len() as u64); + let (mut new_target_pos, target_hashes): (Vec<_>, Vec<_>) = + final_targets.clone().into_iter().unzip(); + let mut new_proof_pos = proof_pos.clone(); + // Move up positions that need to be moved up due to the empty roots + // being written over. + for node in to_destroy { + new_target_pos = + Proof::calc_next_positions(&vec![node], &final_targets, num_leaves, true)?.0; + new_proof_pos = + Proof::calc_next_positions(&vec![node], &proof_with_pos, num_leaves, true)?.0; + } + // Grab all the new nodes after this add. + let mut needed_proof_positions = + util::get_proof_positions(&new_target_pos, num_leaves, util::tree_rows(num_leaves)); + needed_proof_positions.sort(); + // We'll use all elements from the old proof, as addition only creates new nodes + // in our proof (except for root destruction). But before using it, we have to + // compute the new positions, as adding new elements may move existing elements a few + // rows up. + let new_proof: Vec<_> = new_proof_pos.into_iter().zip(self.hashes).collect(); + let mut new_proof = Self::maybe_remap(before_num_leaves, adds.len() as u64, new_proof); + // Iterates over the needed positions and grab them from new_nodes + // All proof elements must come from the old proof or new_nodes. Old proof elements + // are already in new_proof. Some every missing element must be in new_nodes. + for pos in needed_proof_positions { + for (new_node_pos, hash) in new_nodes.iter() { + if *new_node_pos == pos { + new_proof.push((pos, *hash)); + } else { + // This node must be in either new_nodes or in the old proof, otherwise we can't + // update our proof + if let None = new_proof.iter().find(|(proof_pos, _)| *proof_pos == pos) { + return Err(format!("Missing position {}", pos)); + } + } + } + } + new_proof.sort(); + + let (_, hashes): (Vec, Vec) = new_proof.into_iter().unzip(); + Ok(( + Proof { + hashes, + targets: new_target_pos, + }, + target_hashes, + )) + } + /// maybe_remap remaps the passed in hash and pos if the tree_rows increase after + /// adding the new nodes. + fn maybe_remap( + num_leaves: u64, + num_adds: u64, + positions: Vec<(u64, sha256::Hash)>, + ) -> Vec<(u64, sha256::Hash)> { + let new_forest_rows = util::tree_rows(num_leaves + num_adds); + let old_forest_rows = util::tree_rows(num_leaves); + let tree_rows = util::tree_rows(num_leaves); + let mut new_proofs = vec![]; + if new_forest_rows > old_forest_rows { + for (pos, hash) in positions.iter() { + let row = util::detect_row(*pos, tree_rows); + + let old_start_pos = util::start_position_at_row(row, old_forest_rows); + let new_start_pos = util::start_position_at_row(row, new_forest_rows); + + let offset = pos - old_start_pos; + let new_pos = offset + new_start_pos; + new_proofs.push((new_pos, *hash)); + } + return new_proofs; + } + + positions + } + + /// update_proof_remove modifies the cached proof with the deletions that happen in the block proof. + /// It updates the necessary proof hashes and un-caches the targets that are being deleted. + fn update_proof_remove( + self, + block_targets: Vec, + cached_hashes: Vec, + updated: Vec<(u64, sha256::Hash)>, + num_leaves: u64, + ) -> Result<(Proof, Vec), String> { + let total_rows = util::tree_rows(num_leaves); + + let targets_with_hash: Vec<(u64, bitcoin_hashes::sha256::Hash)> = self + .targets + .clone() + .into_iter() + .zip(cached_hashes.clone().into_iter()) + .filter(|(pos, _)| !block_targets.contains(pos)) + .collect(); + + let (targets, _): (Vec<_>, Vec<_>) = targets_with_hash.to_owned().into_iter().unzip(); + let proof_positions = + util::get_proof_positions(&self.targets, num_leaves, util::tree_rows(num_leaves)); + + let old_proof: Vec<_> = proof_positions.iter().zip(self.hashes.iter()).collect(); + + let mut new_proof = vec![]; + // Grab all the positions of the needed proof hashes. + let needed_pos = util::get_proof_positions(&targets, num_leaves, total_rows); + + let old_proof_iter = old_proof.iter(); + // Loop through old_proofs and only add the needed proof hashes. + for (pos, hash) in old_proof_iter { + // Some positions might not be useful anymore, due to deleted targets + if needed_pos.contains(*pos) { + // Grab all positions from the old proof, if it changed, then takes the new + // hash from `updated` + if let Some((_, updated_hash)) = + updated.iter().find(|(updated_pos, _)| *pos == updated_pos) + { + if *updated_hash != sha256::Hash::default() { + new_proof.push((**pos, *updated_hash)); + } + } else { + // If it didn't change, take the value from the old proof + if **hash != sha256::Hash::default() { + new_proof.push((**pos, **hash)); + } + } + } + } + + let missing_positions = needed_pos + .into_iter() + .filter(|pos| !proof_positions.contains(pos) && !block_targets.contains(pos)); + + for missing in missing_positions { + if let Some((_, hash)) = updated + .iter() + .find(|(updated_pos, _)| missing == *updated_pos) + { + if *hash != sha256::Hash::default() { + new_proof.push((missing, *hash)); + } + } + } + + // We need to remap all proof hashes and sort then, otherwise our hash will be wrong. + // This happens because deletion moves nodes upwards, some of this nodes may be a proof + // element. If so we move it to its new position. After that the vector is probably unsorted, so we sort it. + let (pos, hashes): (Vec, Vec) = + Proof::calc_next_positions(&block_targets, &new_proof, num_leaves, true)?; + + let mut proof_elements: Vec<_> = pos.into_iter().zip(hashes.into_iter()).collect(); + + proof_elements.sort(); + // Grab the hashes for the proof + let (_, hashes): (Vec, Vec) = proof_elements.into_iter().unzip(); + // Gets all proof targets, but with their new positions after delete + let (targets, target_hashes) = + Proof::calc_next_positions(&block_targets, &targets_with_hash, num_leaves, true)?; + + Ok((Proof { hashes, targets }, target_hashes)) + } + + fn calc_next_positions( + block_targets: &Vec, + old_positions: &Vec<(u64, sha256::Hash)>, + num_leaves: u64, + append_roots: bool, + ) -> Result<(Vec, Vec), String> { + let total_rows = util::tree_rows(num_leaves); + let mut new_positions = vec![]; + + let block_targets = util::detwin(block_targets.to_owned(), total_rows); + + for (position, hash) in old_positions { + if *hash == sha256::Hash::default() { + continue; + } + let mut next_pos = *position; + for target in block_targets.iter() { + if util::is_root_position(next_pos, num_leaves, total_rows) { + break; + } + // If these positions are in different subtrees, continue. + let (sub_tree, _, _) = util::detect_offset(*target, num_leaves); + let (sub_tree1, _, _) = util::detect_offset(next_pos, num_leaves); + if sub_tree != sub_tree1 { + continue; + } + + if util::is_ancestor(util::parent(*target, total_rows), next_pos, total_rows)? { + next_pos = util::calc_next_pos(next_pos, *target, total_rows)?; + } + } + + if append_roots { + new_positions.push((next_pos, *hash)); + } else { + if !util::is_root_position(next_pos, num_leaves, total_rows) { + new_positions.push((next_pos, *hash)); + } + } + } + new_positions.sort(); + let (new_positions, new_hashes) = new_positions.into_iter().unzip(); + Ok((new_positions, new_hashes)) + } fn sorted_push( nodes: &mut Vec<(u64, bitcoin_hashes::sha256::Hash)>, @@ -255,7 +544,7 @@ impl Proof { mod tests { use std::str::FromStr; - use bitcoin_hashes::{sha256, Hash, HashEngine}; + use bitcoin_hashes::{hex::FromHex, sha256, Hash, HashEngine}; use serde::Deserialize; use super::Proof; @@ -269,6 +558,275 @@ mod tests { proofhashes: Vec, expected: bool, } + /// This test checks whether our update proof works for different scenarios. We start + /// with a (valid) cached proof, then we receive `blocks`, like we would in normal Bitcoin + /// but for this test, block is just random data. For each block we update our Stump and + /// our proof as well, after that, our proof **must** still be valid for the latest Stump. + /// + /// Fix-me: Using derive for deserialize, when also using sha256::Hash leads to an odd + /// error that can't be easily fixed. Even bumping version doesn't appear to help. + /// Deriving hashes directly reduces the amount of boilerplate code used, and makes everything + /// more clearer, hence, it's preferable. + #[test] + fn test_update_proof() { + #[derive(Debug, Deserialize)] + struct JsonProof { + targets: Vec, + hashes: Vec, + } + #[derive(Debug, Deserialize)] + struct UpdatedData { + /// The newly created utxo to be added to our accumulator + adds: Vec, + /// The proof for all destroyed utxos + proof: JsonProof, + /// The hash of all destroyed utxos + del_hashes: Vec, + } + #[derive(Debug, Deserialize)] + struct TestData { + /// Blocks contains new utxos and utxos that are being deleted + update: UpdatedData, + /// The proof we have for our wallet's utxos + cached_proof: JsonProof, + /// A initial set of roots, may be empty for starting with an empty stump + initial_roots: Vec, + /// The number of leaves in the initial Stump + initial_leaves: u64, + /// The hash of all wallet's utxo + cached_hashes: Vec, + /// The indexes of all the new utxos to cache + remembers: Vec, + /// After we update our stump, which roots we expect? + expected_roots: Vec, + /// After we update the proof, the proof's target should be this + expected_targets: Vec, + /// After we update the proof, the cached hashes should be this + expected_cached_hashes: Vec, + } + let contents = std::fs::read_to_string("test_values/cached_proof_tests.json") + .expect("Something went wrong reading the file"); + + let values: Vec = + serde_json::from_str(contents.as_str()).expect("JSON deserialization error"); + for case_values in values { + let proof_hashes = case_values + .cached_proof + .hashes + .iter() + .map(|val| sha256::Hash::from_str(val).unwrap()) + .collect(); + let cached_hashes: Vec<_> = case_values + .cached_hashes + .iter() + .map(|val| sha256::Hash::from_str(val).unwrap()) + .collect(); + + let cached_proof = Proof::new(case_values.cached_proof.targets, proof_hashes); + let roots = case_values + .initial_roots + .into_iter() + .map(|hash| sha256::Hash::from_hex(&hash).unwrap()) + .collect(); + + let stump = Stump { + leafs: case_values.initial_leaves, + roots, + }; + + let utxos = case_values + .update + .adds + .iter() + .map(|preimage| hash_from_u8(*preimage as u8)) + .collect(); + let del_hashes = case_values + .update + .del_hashes + .iter() + .map(|hash| sha256::Hash::from_hex(hash).unwrap()) + .collect(); + let block_proof_hashes: Vec<_> = case_values + .update + .proof + .hashes + .iter() + .map(|hash| sha256::Hash::from_hex(hash).unwrap()) + .collect(); + + let block_proof = + Proof::new(case_values.update.proof.targets.clone(), block_proof_hashes); + let (stump, updated) = stump.modify(&utxos, &del_hashes, &block_proof).unwrap(); + + let (cached_proof, cached_hashes) = cached_proof + .update( + cached_hashes.clone(), + utxos, + case_values.update.proof.targets, + case_values.remembers, + updated.clone(), + ) + .unwrap(); + + let res = cached_proof.verify(&cached_hashes, &stump); + + let expected_roots: Vec<_> = case_values + .expected_roots + .iter() + .map(|hash| sha256::Hash::from_hex(hash).unwrap()) + .collect(); + + let expected_cached_hashes: Vec<_> = case_values + .expected_cached_hashes + .iter() + .map(|hash| sha256::Hash::from_hex(hash).unwrap()) + .collect(); + assert_eq!(res, Ok(true)); + assert_eq!(cached_proof.targets, case_values.expected_targets); + assert_eq!(stump.roots, expected_roots); + assert_eq!(cached_hashes, expected_cached_hashes); + } + } + #[test] + fn test_calc_next_positions() { + use super::Proof; + + #[derive(Clone)] + struct Test { + name: &'static str, + block_targets: Vec, + old_positions: Vec<(u64, sha256::Hash)>, + num_leaves: u64, + num_adds: u64, + append_roots: bool, + expected: (Vec, Vec), + } + + let tests = vec![Test { + name: "One empty root deleted", + block_targets: vec![26], + old_positions: vec![ + ( + 1, + bitcoin_hashes::sha256::Hash::from_str( + "4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a", + ) + .unwrap(), + ), + ( + 13, + bitcoin_hashes::sha256::Hash::from_str( + "9d1e0e2d9459d06523ad13e28a4093c2316baafe7aec5b25f30eba2e113599c4", + ) + .unwrap(), + ), + ( + 17, + bitcoin_hashes::sha256::Hash::from_str( + "9576f4ade6e9bc3a6458b506ce3e4e890df29cb14cb5d3d887672aef55647a2b", + ) + .unwrap(), + ), + ( + 25, + bitcoin_hashes::sha256::Hash::from_str( + "29590a14c1b09384b94a2c0e94bf821ca75b62eacebc47893397ca88e3bbcbd7", + ) + .unwrap(), + ), + ], + num_leaves: 14, + num_adds: 2, + append_roots: false, + expected: ( + vec![1, 17, 21, 25], + vec![ + bitcoin_hashes::sha256::Hash::from_str( + "4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a", + ) + .unwrap(), + bitcoin_hashes::sha256::Hash::from_str( + "9576f4ade6e9bc3a6458b506ce3e4e890df29cb14cb5d3d887672aef55647a2b", + ) + .unwrap(), + bitcoin_hashes::sha256::Hash::from_str( + "9d1e0e2d9459d06523ad13e28a4093c2316baafe7aec5b25f30eba2e113599c4", + ) + .unwrap(), + bitcoin_hashes::sha256::Hash::from_str( + "29590a14c1b09384b94a2c0e94bf821ca75b62eacebc47893397ca88e3bbcbd7", + ) + .unwrap(), + ], + ), + }]; + + for test in tests { + let res = Proof::calc_next_positions( + &test.block_targets, + &test.old_positions, + test.num_leaves + test.num_adds, + test.append_roots, + ) + .unwrap(); + + assert_eq!(res, test.expected, "testcase: \"{}\" fail", test.name); + } + } + #[test] + fn test_update_proof_delete() { + let preimages = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let hashes = preimages + .into_iter() + .map(|preimage| hash_from_u8(preimage)) + .collect(); + let (stump, _) = Stump::new() + .modify(&hashes, &vec![], &Proof::default()) + .unwrap(); + + let proof_hashes = vec![ + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + "084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c5", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879", + "9eec588c41d87b16b0ee226cb38da3864f9537632321d8be855a73d5616dcc73", + ]; + let proof_hashes = proof_hashes + .into_iter() + .map(|hash| sha256::Hash::from_str(hash).unwrap()) + .collect(); + + let cached_proof_hashes = [ + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6", + "9576f4ade6e9bc3a6458b506ce3e4e890df29cb14cb5d3d887672aef55647a2b", + "9eec588c41d87b16b0ee226cb38da3864f9537632321d8be855a73d5616dcc73", + ]; + let cached_proof_hashes = cached_proof_hashes + .iter() + .map(|hash| sha256::Hash::from_str(hash).unwrap()) + .collect(); + let cached_proof = Proof::new(vec![0, 1, 7], cached_proof_hashes); + + let proof = Proof::new(vec![1, 2, 6], proof_hashes); + + let (stump, modified) = stump + .modify( + &vec![], + &vec![hash_from_u8(1), hash_from_u8(2), hash_from_u8(6)], + &proof, + ) + .unwrap(); + let (new_proof, _) = cached_proof + .update_proof_remove( + vec![1, 2, 6], + vec![hash_from_u8(0), hash_from_u8(1), hash_from_u8(7)], + modified.new_del, + 10, + ) + .unwrap(); + + let res = new_proof.verify(&vec![hash_from_u8(0), hash_from_u8(7)], &stump); + assert_eq!(res, Ok(true)); + } fn hash_from_u8(value: u8) -> bitcoin_hashes::sha256::Hash { let mut engine = bitcoin_hashes::sha256::Hash::engine(); @@ -390,7 +948,6 @@ mod tests { let res = p.verify(&del_hashes, &s); assert!(Ok(expected) == res); } - #[test] fn test_proof_verify() { let contents = std::fs::read_to_string("test_values/test_cases.json") diff --git a/src/accumulator/stump.rs b/src/accumulator/stump.rs index fc2cf71..8c11667 100644 --- a/src/accumulator/stump.rs +++ b/src/accumulator/stump.rs @@ -233,7 +233,7 @@ mod test { new_del_hashes: Vec, to_destroy: Vec, } - let contents = std::fs::read_to_string("test_values/cache_tests.json") + let contents = std::fs::read_to_string("test_values/update_data_tests.json") .expect("Something went wrong reading the file"); let tests = serde_json::from_str::>(contents.as_str()) diff --git a/src/accumulator/util.rs b/src/accumulator/util.rs index a497db0..c04aae5 100644 --- a/src/accumulator/util.rs +++ b/src/accumulator/util.rs @@ -68,7 +68,14 @@ pub fn detwin(nodes: Vec, forest_rows: u8) -> Vec { dels_after } - +// start_position_at_row returns the smallest position an accumulator can have for the +// requested row for the given numLeaves. +pub fn start_position_at_row(row: u8, forest_rows: u8) -> u64 { + // 2 << forest_rows is 2 more than the max position + // to get the correct offset for a given row, + // subtract (2 << `row complement of forest_rows`) from (2 << forest_rows) + (2 << forest_rows) - (2 << (forest_rows - row)) as u64 +} fn add_and_sort(mut vec: Vec, value: u64) -> Vec { vec.push(value); vec.sort(); diff --git a/test_values/cached_proof_tests.json b/test_values/cached_proof_tests.json new file mode 100644 index 0000000..297130e --- /dev/null +++ b/test_values/cached_proof_tests.json @@ -0,0 +1,339 @@ +[ + { + "update": { + "adds": [20, 21, 22], + "proof": { + "targets": [0, 1], + "hashes": [ + "9576f4ade6e9bc3a6458b506ce3e4e890df29cb14cb5d3d887672aef55647a2b", + "29590a14c1b09384b94a2c0e94bf821ca75b62eacebc47893397ca88e3bbcbd7" + ] + }, + "del_hashes": [ + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + "4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a" + ] + }, + "remembers": [], + "cached_proof": { + "targets": [4, 6, 7], + "hashes": [ + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db", + "df46b17be5f66f0750a4b3efa26d4679db170a72d41eb56c3e4ff75a58c65386" + ] + }, + "initial_stump": "Add 8 leaves [0, 1, 2, 3, 4, 5, 6, 7]", + "initial_roots": [ + "b151a956139bb821d4effa34ea95c17560e0135d1e4661fc23cedc3af49dac42" + ], + "initial_leaves": 8, + "cached_hashes": [ + "e52d9c508c502347344d8c07ad91cbd6068afc75ff6292f062a09ca381c89e71", + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879" + ], + "expected_roots": [ + "97491b30a42410dc3267d17933cf5e1b55cfb92ebab2dcf1bcd098032dacee95", + "0d451c2d9366705a3557fd038c25a40a348cc0effa2dd00dfaaba65749cd0915", + "7cb7c4547cf2653590d7a9ace60cc623d25148adfbc88a89aeb0ef88da7839ba" + ], + "expected_targets": [4, 6, 7], + "expected_cached_hashes": [ + "e52d9c508c502347344d8c07ad91cbd6068afc75ff6292f062a09ca381c89e71", + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879" + ] + }, + { + "update": { + "adds": [8], + "proof": { + "targets": [0, 1, 4], + "hashes": [ + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db", + "9576f4ade6e9bc3a6458b506ce3e4e890df29cb14cb5d3d887672aef55647a2b", + "34028bbc87000c39476cdc60cf80ca32d579b3a0e2d3f80e0ad8c3739a01aa91" + ] + }, + "del_hashes": [ + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + "4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a", + "e52d9c508c502347344d8c07ad91cbd6068afc75ff6292f062a09ca381c89e71" + ] + }, + "remembers": [0], + "cached_proof": { + "targets": [4, 6, 7], + "hashes": [ + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db", + "df46b17be5f66f0750a4b3efa26d4679db170a72d41eb56c3e4ff75a58c65386" + ] + }, + "initial_stump": "Add 8 leaves [0, 1, 2, 3, 4, 5, 6, 7]", + "initial_roots": [ + "b151a956139bb821d4effa34ea95c17560e0135d1e4661fc23cedc3af49dac42" + ], + "initial_leaves": 8, + "cached_hashes": [ + "e52d9c508c502347344d8c07ad91cbd6068afc75ff6292f062a09ca381c89e71", + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879" + ], + "expected_roots": [ + "7f6df6757821144278f5d535261ab4ffc55e2618f7bc62f1a19e7d289061b08b", + "beead77994cf573341ec17b58bbf7eb34d2711c993c1d976b128b3188dc1829a" + ], + "expected_targets": [6, 7, 8], + "expected_cached_hashes": [ + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879", + "beead77994cf573341ec17b58bbf7eb34d2711c993c1d976b128b3188dc1829a" + ] + }, + { + "update": { + "adds": [8], + "proof": { + "targets": [4], + "hashes": [ + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db", + "34028bbc87000c39476cdc60cf80ca32d579b3a0e2d3f80e0ad8c3739a01aa91", + "df46b17be5f66f0750a4b3efa26d4679db170a72d41eb56c3e4ff75a58c65386" + ] + }, + "del_hashes": [ + "e52d9c508c502347344d8c07ad91cbd6068afc75ff6292f062a09ca381c89e71" + ] + }, + "remembers": [0], + "cached_proof": { + "targets": [4, 5, 7], + "hashes": [ + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6", + "df46b17be5f66f0750a4b3efa26d4679db170a72d41eb56c3e4ff75a58c65386" + ] + }, + "initial_stump": "Add 8 leaves [0, 1, 2, 3, 4, 5, 6, 7]", + "initial_roots": [ + "b151a956139bb821d4effa34ea95c17560e0135d1e4661fc23cedc3af49dac42" + ], + "initial_leaves": 8, + "cached_hashes": [ + "e52d9c508c502347344d8c07ad91cbd6068afc75ff6292f062a09ca381c89e71", + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879" + ], + "expected_roots": [ + "f1e8f77fb6c03d3e62acc1824e6671da21bb643f362a9af5171167bfdfbbb9cd", + "beead77994cf573341ec17b58bbf7eb34d2711c993c1d976b128b3188dc1829a" + ], + "expected_targets": [7, 8, 18], + "expected_cached_hashes": [ + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879", + "beead77994cf573341ec17b58bbf7eb34d2711c993c1d976b128b3188dc1829a", + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db" + ] + }, + { + "update": { + "adds": [], + "proof": { + "targets": [4, 5, 6, 7], + "hashes": [ + "df46b17be5f66f0750a4b3efa26d4679db170a72d41eb56c3e4ff75a58c65386" + ] + }, + "del_hashes": [ + "e52d9c508c502347344d8c07ad91cbd6068afc75ff6292f062a09ca381c89e71", + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db", + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879" + ] + }, + "remembers": [], + "cached_proof": { + "targets": [1, 8, 12], + "hashes": [ + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + "2b4c342f5433ebe591a1da77e013d1b72475562d48578dca8b84bac6651c3cb9", + "9d1e0e2d9459d06523ad13e28a4093c2316baafe7aec5b25f30eba2e113599c4", + "9576f4ade6e9bc3a6458b506ce3e4e890df29cb14cb5d3d887672aef55647a2b", + "c413035120e8c9b0ca3e40c93d06fe60a0d056866138300bb1f1dd172b4923c3", + "29590a14c1b09384b94a2c0e94bf821ca75b62eacebc47893397ca88e3bbcbd7" + ] + }, + "initial_stump": "Add 14 leaves [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]", + "initial_roots": [ + "b151a956139bb821d4effa34ea95c17560e0135d1e4661fc23cedc3af49dac42", + "9c053db406c1a077112189469a3aca0573d3481bef09fa3d2eda3304d7d44be8", + "55d0a0ef8f5c25a9da266b36c0c5f4b31008ece82df2512c8966bddcc27a66a0" + ], + "initial_leaves": 14, + "cached_hashes": [ + "4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a", + "beead77994cf573341ec17b58bbf7eb34d2711c993c1d976b128b3188dc1829a", + "ef6cbd2161eaea7943ce8693b9824d23d1793ffb1c0fca05b600d3899b44c977" + ], + "expected_roots": [ + "df46b17be5f66f0750a4b3efa26d4679db170a72d41eb56c3e4ff75a58c65386", + "9c053db406c1a077112189469a3aca0573d3481bef09fa3d2eda3304d7d44be8", + "55d0a0ef8f5c25a9da266b36c0c5f4b31008ece82df2512c8966bddcc27a66a0" + ], + "expected_targets": [8, 12, 17], + "expected_cached_hashes": [ + "beead77994cf573341ec17b58bbf7eb34d2711c993c1d976b128b3188dc1829a", + "ef6cbd2161eaea7943ce8693b9824d23d1793ffb1c0fca05b600d3899b44c977", + "4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a" + ] + }, + { + "update": { + "adds": [], + "proof": { + "targets": [0], + "hashes": [ + "4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a", + "9576f4ade6e9bc3a6458b506ce3e4e890df29cb14cb5d3d887672aef55647a2b", + "29590a14c1b09384b94a2c0e94bf821ca75b62eacebc47893397ca88e3bbcbd7" + ] + }, + "del_hashes": [ + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d" + ] + }, + "remembers": [], + "cached_proof": { + "targets": [0, 7], + "hashes": [ + "4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a", + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6", + "9576f4ade6e9bc3a6458b506ce3e4e890df29cb14cb5d3d887672aef55647a2b", + "9eec588c41d87b16b0ee226cb38da3864f9537632321d8be855a73d5616dcc73" + ] + }, + "initial_stump": "Add 8 leaves [0, 1, 2, 3, 4, 5, 6, 7]", + "initial_roots": [ + "b151a956139bb821d4effa34ea95c17560e0135d1e4661fc23cedc3af49dac42" + ], + "initial_leaves": 8, + "cached_hashes": [ + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879" + ], + "expected_roots": [ + "726fdd3b432cc59e68487d126e70f0db74a236267f8daeae30b31839a4e7ebed" + ], + "expected_targets": [7], + "expected_cached_hashes": [ + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879" + ] + }, + + { + "update": { + "adds": [], + "proof": { + "targets": [3, 6], + "hashes": [ + "dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879", + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db", + "7a5db95c85dd117a20ca1b5b2ef50f61ee9529f40a957d97757c3a5b5ca7f5dd", + "2c358bbc9182d5eaf8ae15c50fbe0dcd45fc794bdc73eafffddedbb0f6bab327" + ] + }, + "del_hashes": [ + "084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c5", + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6" + ] + }, + "remembers": [], + "cached_proof": { + "targets": [2, 3, 7, 32, 34, 50, 52], + "hashes": [ + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6", + "e799acb98a071c4884707e4bc8c093ba22571c8d84cc0223ab0c2c9327313a5b", + "ff4b5145903ab6a21824078a0f2bce2fb27739e9e80aa8195f2d3be70b12fc79", + "d1366f2a8c5a8fe1fc9b581a759ab6333e8d1683c74e09904d5acb50a9bffd5b" + ] + }, + "initial_stump": "Add 32 leaves [0, 1, 2, 3, 4, ... 29, 30, 31], then delete [1, 4, 8, 9, 10, 16, 17, 18]", + "initial_roots": [ + "0f434fd19b46d19ad44dd83b205c8e8c00f1b6d5701427f2c0524304dcbdf1d8" + ], + "initial_leaves": 32, + "cached_hashes": [ + "dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986", + "084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c5", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879", + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db", + "e7cf46a078fed4fafd0b5e3aff144802b853f8ae459a4f0c14add3314b7cc3a6", + "ab897fbdedfa502b2d839b6a56100887dccdc507555c282e59589e06300a62e2" + ], + "expected_roots": [ + "bbe46ee06d681bb5bad60c1b4860566e5e78b489b227cf927ecfbde599163dda" + ], + "expected_targets": [32, 33, 34, 35, 50, 52], + "expected_cached_hashes": [ + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + "dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986", + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879", + "e7cf46a078fed4fafd0b5e3aff144802b853f8ae459a4f0c14add3314b7cc3a6", + "ab897fbdedfa502b2d839b6a56100887dccdc507555c282e59589e06300a62e2" + ] + }, + { + "update": { + "adds": [], + "proof": { + "targets": [3, 6], + "hashes": [ + "dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879", + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db" + ] + }, + "del_hashes": [ + "084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c5", + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6" + ] + }, + "remembers": [], + "cached_proof": { + "targets": [2, 3, 7, 16, 18, 26], + "hashes": [ + "67586e98fad27da0b9968bc039a1ef34c939b9b8e523a8bef89d478608c5ecf6" + ] + }, + "initial_stump": "Add 12 leaves [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], then delete [1, 4, 8, 9, 10]", + "initial_roots": [ + "5093a5ae2bceeef66d9af3f4f1ed71acffe4932cd02afca5d2dc659dca5c418f", + "e7cf46a078fed4fafd0b5e3aff144802b853f8ae459a4f0c14add3314b7cc3a6" + ], + "initial_leaves": 12, + "cached_hashes": [ + "dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986", + "084fed08b978af4d7d196a7446a86b58009e636b611db16211b65a9aadff29c5", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879", + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db", + "e7cf46a078fed4fafd0b5e3aff144802b853f8ae459a4f0c14add3314b7cc3a6" + ], + "expected_roots": [ + "2bfc2e9199263e7b2ef762afeb2fede84eb4fac37f3f28fd22d5761c8390b875", + "e7cf46a078fed4fafd0b5e3aff144802b853f8ae459a4f0c14add3314b7cc3a6" + ], + "expected_targets": [16, 17, 18, 19, 26], + "expected_cached_hashes": [ + "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + "dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986", + "e77b9a9ae9e30b0dbdb6f510a264ef9de781501d7b6b92ae89eb059c5ab743db", + "ca358758f6d27e6cf45272937977a748fd88391db679ceda7dc7bf1f005ee879", + "e7cf46a078fed4fafd0b5e3aff144802b853f8ae459a4f0c14add3314b7cc3a6" + ] + } +] \ No newline at end of file diff --git a/test_values/cache_tests.json b/test_values/update_data_tests.json similarity index 100% rename from test_values/cache_tests.json rename to test_values/update_data_tests.json