Skip to content

Commit

Permalink
Merge pull request #3158 from dusk-network/neotamandua/tx_digest
Browse files Browse the repository at this point in the history
node-data: change transaction hash method to digest
  • Loading branch information
Neotamandua authored Dec 22, 2024
2 parents 438447e + b480d9a commit d21148b
Show file tree
Hide file tree
Showing 5 changed files with 31 additions and 26 deletions.
12 changes: 6 additions & 6 deletions consensus/src/proposal/block_generator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,15 +97,15 @@ impl<T: Operations> Generator<T> {

// We always write the faults len in a u32
let mut faults_size = u32::SIZE;
let faults_hashes: Vec<_> = faults
let fault_digests: Vec<_> = faults
.iter()
.map(|f| {
faults_size += f.size();
f.hash()
f.digest()
})
.collect();

blk_header.faultroot = merkle_root(&faults_hashes);
blk_header.faultroot = merkle_root(&fault_digests);

// We know for sure that this operation cannot underflow
let max_txs_bytes = MAX_BLOCK_SIZE - header_size - faults_size;
Expand All @@ -126,10 +126,10 @@ impl<T: Operations> Generator<T> {
blk_header.state_hash = result.verification_output.state_root;
blk_header.event_bloom = result.verification_output.event_bloom;

let tx_hashes: Vec<_> =
result.txs.iter().map(|t| t.inner.hash()).collect();
let tx_digests: Vec<_> =
result.txs.iter().map(|t| t.inner.digest()).collect();
let txs: Vec<_> = result.txs.into_iter().map(|t| t.inner).collect();
blk_header.txroot = merkle_root(&tx_hashes[..]);
blk_header.txroot = merkle_root(&tx_digests[..]);

blk_header.timestamp = max(
ru.timestamp() + *MINIMUM_BLOCK_TIME,
Expand Down
12 changes: 6 additions & 6 deletions consensus/src/proposal/handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,9 +189,9 @@ fn verify_candidate_msg(
}

// Verify tx_root
let tx_hashes: Vec<_> =
p.candidate.txs().iter().map(|t| t.hash()).collect();
let tx_root = merkle_root(&tx_hashes[..]);
let tx_digests: Vec<_> =
p.candidate.txs().iter().map(|t| t.digest()).collect();
let tx_root = merkle_root(&tx_digests[..]);
if tx_root != p.candidate.header().txroot {
return Err(ConsensusError::InvalidBlock);
}
Expand All @@ -202,9 +202,9 @@ fn verify_candidate_msg(
}

// Verify fault_root
let fault_hashes: Vec<_> =
p.candidate.faults().iter().map(|t| t.hash()).collect();
let fault_root = merkle_root(&fault_hashes[..]);
let fault_digests: Vec<_> =
p.candidate.faults().iter().map(|t| t.digest()).collect();
let fault_root = merkle_root(&fault_digests[..]);
if fault_root != p.candidate.header().faultroot {
return Err(ConsensusError::InvalidBlock);
}
Expand Down
10 changes: 8 additions & 2 deletions node-data/src/ledger/faults.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,14 @@ impl From<BlsSigError> for InvalidFault {
}

impl Fault {
/// Hash the serialized form
pub fn hash(&self) -> [u8; 32] {
// TODO: change to HEIGHT|TYPE|PROV_KEY once faults collection is
// implemented
pub fn id(&self) -> [u8; 32] {
self.digest()
}

/// Digest the serialized form
pub fn digest(&self) -> [u8; 32] {
let mut b = vec![];
self.write(&mut b).expect("Write to a vec shall not fail");
sha3::Sha3_256::digest(&b[..]).into()
Expand Down
8 changes: 5 additions & 3 deletions node-data/src/ledger/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,16 @@ pub struct SpentTransaction {
}

impl Transaction {
/// Computes the hash of the transaction.
/// Computes the hash digest of the entire transaction data.
///
/// This method returns the hash of the entire
/// This method returns the Sha3 256 digest of the entire
/// transaction in its serialized form
///
/// The digest hash is currently only being used in the merkle tree.
///
/// ### Returns
/// An array of 32 bytes representing the hash of the transaction.
pub fn hash(&self) -> [u8; 32] {
pub fn digest(&self) -> [u8; 32] {
sha3::Sha3_256::digest(self.inner.to_var_bytes()).into()
}

Expand Down
15 changes: 6 additions & 9 deletions node/src/database/rocksdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ impl<'db, DB: DBAccess> Ledger for DBTransaction<'db, DB> {
LightBlock {
header: header.clone(),
transactions_ids: txs.iter().map(|t| t.inner.id()).collect(),
faults_ids: faults.iter().map(|f| f.hash()).collect(),
faults_ids: faults.iter().map(|f| f.id()).collect(),
}
.write(&mut buf)?;

Expand Down Expand Up @@ -349,7 +349,7 @@ impl<'db, DB: DBAccess> Ledger for DBTransaction<'db, DB> {
for f in faults {
let mut d = vec![];
f.write(&mut d)?;
self.put_cf(cf, f.hash(), d)?;
self.put_cf(cf, f.id(), d)?;
}
}
self.store_block_label(header.height, &header.hash, label)?;
Expand Down Expand Up @@ -410,7 +410,7 @@ impl<'db, DB: DBAccess> Ledger for DBTransaction<'db, DB> {
self.inner.delete_cf(self.ledger_txs_cf, tx.id())?;
}
for f in b.faults() {
self.inner.delete_cf(self.ledger_faults_cf, f.hash())?;
self.inner.delete_cf(self.ledger_faults_cf, f.id())?;
}

self.inner.delete_cf(self.ledger_cf, b.header().hash)?;
Expand Down Expand Up @@ -1215,7 +1215,7 @@ impl node_data::Serializable for LightBlock {
let len = self.faults_ids.len() as u32;
w.write_all(&len.to_le_bytes())?;

// Write faults hashes
// Write faults id
for f_id in &self.faults_ids {
w.write_all(f_id)?;
}
Expand Down Expand Up @@ -1245,7 +1245,7 @@ impl node_data::Serializable for LightBlock {
// Read faults count
let len = Self::read_u32_le(r)?;

// Read faults hashes
// Read faults ids
let mut faults_ids = vec![];
for _ in 0..len {
let mut f_id = [0u8; 32];
Expand Down Expand Up @@ -1308,10 +1308,7 @@ mod tests {
// Assert all faults are fully fetched from ledger as
// well.
for pos in 0..b.faults().len() {
assert_eq!(
db_blk.faults()[pos].hash(),
b.faults()[pos].hash()
);
assert_eq!(db_blk.faults()[pos].id(), b.faults()[pos].id());
}
});

Expand Down

0 comments on commit d21148b

Please sign in to comment.