From c7660e037851c96db48016c645b4dc06349e9435 Mon Sep 17 00:00:00 2001 From: Neotamandua <107320179+Neotamandua@users.noreply.github.com> Date: Fri, 20 Dec 2024 14:02:00 +0200 Subject: [PATCH 1/3] node-data: rename hash to digest - Update comments --- node-data/src/ledger/faults.rs | 10 ++++++++-- node-data/src/ledger/transaction.rs | 8 +++++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/node-data/src/ledger/faults.rs b/node-data/src/ledger/faults.rs index f9948d39f..4d0faa65d 100644 --- a/node-data/src/ledger/faults.rs +++ b/node-data/src/ledger/faults.rs @@ -81,8 +81,14 @@ impl From for InvalidFault { } impl Fault { - /// Hash the serialized form - pub fn hash(&self) -> [u8; 32] { + // TODO: change to HEIGHT|TYPE|PROV_KEY once faults collection is + // implemented + pub fn id(&self) -> [u8; 32] { + self.digest() + } + + /// Digest the serialized form + pub fn digest(&self) -> [u8; 32] { let mut b = vec![]; self.write(&mut b).expect("Write to a vec shall not fail"); sha3::Sha3_256::digest(&b[..]).into() diff --git a/node-data/src/ledger/transaction.rs b/node-data/src/ledger/transaction.rs index e4d13aa49..c7b4901a7 100644 --- a/node-data/src/ledger/transaction.rs +++ b/node-data/src/ledger/transaction.rs @@ -55,14 +55,16 @@ pub struct SpentTransaction { } impl Transaction { - /// Computes the hash of the transaction. + /// Computes the hash digest of the entire transaction data. /// - /// This method returns the hash of the entire + /// This method returns the Sha3 256 digest of the entire /// transaction in its serialized form /// + /// The digest hash is currently only being used in the merkle tree. + /// /// ### Returns /// An array of 32 bytes representing the hash of the transaction. - pub fn hash(&self) -> [u8; 32] { + pub fn digest(&self) -> [u8; 32] { sha3::Sha3_256::digest(self.inner.to_var_bytes()).into() } From 8133760fb4966074054ce47fd7c36df3b7cb5992 Mon Sep 17 00:00:00 2001 From: Neotamandua <107320179+Neotamandua@users.noreply.github.com> Date: Thu, 5 Dec 2024 23:47:35 +0200 Subject: [PATCH 2/3] consensus: adjust hash to digest terminology --- consensus/src/proposal/block_generator.rs | 12 ++++++------ consensus/src/proposal/handler.rs | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/consensus/src/proposal/block_generator.rs b/consensus/src/proposal/block_generator.rs index 5ee57eddc..18f258328 100644 --- a/consensus/src/proposal/block_generator.rs +++ b/consensus/src/proposal/block_generator.rs @@ -113,15 +113,15 @@ impl Generator { // We always write the faults len in a u32 let mut faults_size = u32::SIZE; - let faults_hashes: Vec<_> = faults + let fault_digests: Vec<_> = faults .iter() .map(|f| { faults_size += f.size(); - f.hash() + f.digest() }) .collect(); - blk_header.faultroot = merkle_root(&faults_hashes); + blk_header.faultroot = merkle_root(&fault_digests); // We know for sure that this operation cannot underflow let max_txs_bytes = MAX_BLOCK_SIZE - header_size - faults_size; @@ -140,10 +140,10 @@ impl Generator { blk_header.state_hash = result.verification_output.state_root; blk_header.event_bloom = result.verification_output.event_bloom; - let tx_hashes: Vec<_> = - result.txs.iter().map(|t| t.inner.hash()).collect(); + let tx_digests: Vec<_> = + result.txs.iter().map(|t| t.inner.digest()).collect(); let txs: Vec<_> = result.txs.into_iter().map(|t| t.inner).collect(); - blk_header.txroot = merkle_root(&tx_hashes[..]); + blk_header.txroot = merkle_root(&tx_digests[..]); blk_header.timestamp = max( ru.timestamp() + *MINIMUM_BLOCK_TIME, diff --git a/consensus/src/proposal/handler.rs b/consensus/src/proposal/handler.rs index 248aeb2fc..d98d1d70e 100644 --- a/consensus/src/proposal/handler.rs +++ b/consensus/src/proposal/handler.rs @@ -173,9 +173,9 @@ fn verify_candidate_msg( } // Verify tx_root - let tx_hashes: Vec<_> = - p.candidate.txs().iter().map(|t| t.hash()).collect(); - let tx_root = merkle_root(&tx_hashes[..]); + let tx_digests: Vec<_> = + p.candidate.txs().iter().map(|t| t.digest()).collect(); + let tx_root = merkle_root(&tx_digests[..]); if tx_root != p.candidate.header().txroot { return Err(ConsensusError::InvalidBlock); } @@ -186,9 +186,9 @@ fn verify_candidate_msg( } // Verify fault_root - let fault_hashes: Vec<_> = - p.candidate.faults().iter().map(|t| t.hash()).collect(); - let fault_root = merkle_root(&fault_hashes[..]); + let fault_digests: Vec<_> = + p.candidate.faults().iter().map(|t| t.digest()).collect(); + let fault_root = merkle_root(&fault_digests[..]); if fault_root != p.candidate.header().faultroot { return Err(ConsensusError::InvalidBlock); } From b480d9a8297a28db4744200562c904c75f0595e5 Mon Sep 17 00:00:00 2001 From: Neotamandua <107320179+Neotamandua@users.noreply.github.com> Date: Fri, 20 Dec 2024 14:07:02 +0200 Subject: [PATCH 3/3] node: adjust hash to id function calls --- node/src/database/rocksdb.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/node/src/database/rocksdb.rs b/node/src/database/rocksdb.rs index 10d3ffdc8..f43190a16 100644 --- a/node/src/database/rocksdb.rs +++ b/node/src/database/rocksdb.rs @@ -318,7 +318,7 @@ impl<'db, DB: DBAccess> Ledger for DBTransaction<'db, DB> { LightBlock { header: header.clone(), transactions_ids: txs.iter().map(|t| t.inner.id()).collect(), - faults_ids: faults.iter().map(|f| f.hash()).collect(), + faults_ids: faults.iter().map(|f| f.id()).collect(), } .write(&mut buf)?; @@ -349,7 +349,7 @@ impl<'db, DB: DBAccess> Ledger for DBTransaction<'db, DB> { for f in faults { let mut d = vec![]; f.write(&mut d)?; - self.put_cf(cf, f.hash(), d)?; + self.put_cf(cf, f.id(), d)?; } } self.store_block_label(header.height, &header.hash, label)?; @@ -410,7 +410,7 @@ impl<'db, DB: DBAccess> Ledger for DBTransaction<'db, DB> { self.inner.delete_cf(self.ledger_txs_cf, tx.id())?; } for f in b.faults() { - self.inner.delete_cf(self.ledger_faults_cf, f.hash())?; + self.inner.delete_cf(self.ledger_faults_cf, f.id())?; } self.inner.delete_cf(self.ledger_cf, b.header().hash)?; @@ -1206,7 +1206,7 @@ impl node_data::Serializable for LightBlock { let len = self.faults_ids.len() as u32; w.write_all(&len.to_le_bytes())?; - // Write faults hashes + // Write faults id for f_id in &self.faults_ids { w.write_all(f_id)?; } @@ -1236,7 +1236,7 @@ impl node_data::Serializable for LightBlock { // Read faults count let len = Self::read_u32_le(r)?; - // Read faults hashes + // Read faults ids let mut faults_ids = vec![]; for _ in 0..len { let mut f_id = [0u8; 32]; @@ -1299,10 +1299,7 @@ mod tests { // Assert all faults are fully fetched from ledger as // well. for pos in 0..b.faults().len() { - assert_eq!( - db_blk.faults()[pos].hash(), - b.faults()[pos].hash() - ); + assert_eq!(db_blk.faults()[pos].id(), b.faults()[pos].id()); } });