From da1e3313dd4aed99a4da49a615f19d517d3633e3 Mon Sep 17 00:00:00 2001 From: Gustavo Inacio Date: Mon, 21 Oct 2024 23:29:40 +0200 Subject: [PATCH] refactor: header-accumulator Signed-off-by: Gustavo Inacio --- Cargo.lock | 1 + crates/flat-head/src/era_verifier.rs | 6 +- crates/header-accumulator/Cargo.toml | 1 + crates/header-accumulator/src/epoch.rs | 48 ++++ .../header-accumulator/src/era_validator.rs | 135 +++------- crates/header-accumulator/src/errors.rs | 137 ++-------- crates/header-accumulator/src/lib.rs | 1 - crates/header-accumulator/src/main.rs | 233 ------------------ crates/header-accumulator/src/sync.rs | 208 ---------------- .../header-accumulator/tests/era_validator.rs | 174 ++++++------- 10 files changed, 184 insertions(+), 760 deletions(-) delete mode 100644 crates/header-accumulator/src/main.rs delete mode 100644 crates/header-accumulator/src/sync.rs diff --git a/Cargo.lock b/Cargo.lock index 933706cd..36d326ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2619,6 +2619,7 @@ dependencies = [ "serde_json", "sf-protos", "tempfile", + "thiserror", "tree_hash 0.8.0", "trin-validation", ] diff --git a/crates/flat-head/src/era_verifier.rs b/crates/flat-head/src/era_verifier.rs index 81031a26..59ddb37b 100644 --- a/crates/flat-head/src/era_verifier.rs +++ b/crates/flat-head/src/era_verifier.rs @@ -30,7 +30,7 @@ pub async fn verify_eras( for epoch in start_epoch..=end_epoch.unwrap_or(start_epoch + 1) { let tx = tx.clone(); - let macc = macc.clone(); + let era_validator: EraValidator = macc.clone().into(); let store = blocks_store.clone(); task::spawn(async move { @@ -51,8 +51,8 @@ pub async fn verify_eras( (succ, errs) }); - let valid_epochs = macc - .era_validate(successful_headers, epoch, Some(epoch + 1), true) + let valid_epochs = era_validator + .validate_era(successful_headers, epoch, Some(epoch + 1), true) .unwrap(); let _ = tx.send(valid_epochs).await; diff --git a/crates/header-accumulator/Cargo.toml b/crates/header-accumulator/Cargo.toml index d5c2b53b..07e884fd 100644 --- a/crates/header-accumulator/Cargo.toml +++ b/crates/header-accumulator/Cargo.toml @@ -20,6 +20,7 @@ serde_json.workspace = true sf-protos = { path = "../sf-protos" } tree_hash = "0.8.0" trin-validation.workspace = true +thiserror.workspace = true [dev-dependencies] decoder = { path = "../flat-files-decoder" } diff --git a/crates/header-accumulator/src/epoch.rs b/crates/header-accumulator/src/epoch.rs index 16bde8d9..b13d1614 100644 --- a/crates/header-accumulator/src/epoch.rs +++ b/crates/header-accumulator/src/epoch.rs @@ -1,3 +1,7 @@ +use std::array::IntoIter; + +use crate::{errors::EraValidateError, types::ExtHeaderRecord}; + /// The maximum number of slots per epoch in Ethereum. /// In the context of Proof of Stake (PoS) consensus, an epoch is a collection of slots /// during which validators propose and attest to blocks. The maximum size of an epoch @@ -13,3 +17,47 @@ pub const FINAL_EPOCH: usize = 1896; /// "The Merge" took place at block 15537394, when the Ethereum network fully switched /// from Proof of Work (PoW) to Proof of Stake (PoS). pub const MERGE_BLOCK: u64 = 15537394; + +/// Epoch containing 8192 blocks +pub struct Epoch(Box<[ExtHeaderRecord; MAX_EPOCH_SIZE]>); + +impl TryFrom> for Epoch { + type Error = EraValidateError; + + fn try_from(value: Vec) -> Result { + let len = value.len(); + println!("length: {len}"); + let value: Vec = value.into_iter().take(MAX_EPOCH_SIZE).collect(); + let value: Box<[ExtHeaderRecord; MAX_EPOCH_SIZE]> = value + .try_into() + .map_err(|_| EraValidateError::InvalidEpochLength(len))?; + let epoch_number = value[0].block_number / MAX_EPOCH_SIZE as u64; + if value + .iter() + .map(|block| block.block_number / MAX_EPOCH_SIZE as u64) + .all(|epoch| epoch == epoch_number) + { + Ok(Self(value)) + } else { + Err(EraValidateError::InvalidBlockInEpoch) + } + } +} + +impl Epoch { + pub fn number(&self) -> usize { + (self.0[0].block_number / MAX_EPOCH_SIZE as u64) as usize + } + pub fn iter(&self) -> std::slice::Iter<'_, ExtHeaderRecord> { + self.0.iter() + } +} + +impl IntoIterator for Epoch { + type Item = ExtHeaderRecord; + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} diff --git a/crates/header-accumulator/src/era_validator.rs b/crates/header-accumulator/src/era_validator.rs index 7efc2d4e..5d2ef550 100644 --- a/crates/header-accumulator/src/era_validator.rs +++ b/crates/header-accumulator/src/era_validator.rs @@ -1,19 +1,27 @@ -use std::path::Path; - use ethportal_api::types::execution::accumulator::{EpochAccumulator, HeaderRecord}; use tree_hash::TreeHash; -use trin_validation::accumulator::PreMergeAccumulator; +use trin_validation::accumulator::{HistoricalEpochRoots, PreMergeAccumulator}; use crate::{ - epoch::{FINAL_EPOCH, MAX_EPOCH_SIZE, MERGE_BLOCK}, - errors::{EraValidateError, HeaderAccumulatorError}, - sync::{Lock, LockEntry}, - types::ExtHeaderRecord, + epoch::{Epoch, FINAL_EPOCH}, + errors::EraValidateError, }; -pub trait EraValidator { - type Error; +pub struct EraValidator { + historical_epochs: HistoricalEpochRoots, +} + +impl From for EraValidator { + fn from(value: PreMergeAccumulator) -> Self { + Self { + historical_epochs: value.historical_epochs, + } + } +} + +pub type RootHash = [u8; 32]; +impl EraValidator { /// Validates many headers against a header accumulator /// /// It also keeps a record in `lockfile.json` of the validated epochs to skip them @@ -25,13 +33,15 @@ pub trait EraValidator { /// * `start_epoch` - The epoch number that all the first 8192 blocks are set located /// * `end_epoch` - The epoch number that all the last 8192 blocks are located /// * `use_lock` - when set to true, uses the lockfile to store already processed blocks. True by default - fn era_validate( - &self, - headers: Vec, - start_epoch: usize, - end_epoch: Option, - use_lock: bool, - ) -> Result, Self::Error>; + pub fn validate_eras(&self, epochs: &[&Epoch]) -> Result, EraValidateError> { + let mut validated_epochs = Vec::new(); + for epoch in epochs { + let root = self.validate_era(epoch)?; + validated_epochs.push(root); + } + + Ok(validated_epochs) + } /// takes 8192 block headers and checks if they consist in a valid epoch. /// @@ -44,103 +54,20 @@ pub trait EraValidator { /// /// For block post merge, the sync-committee should be used to validate block headers /// in the canonical blockchain. So this function is not useful for those. - fn process_headers( - &self, - headers: Vec, - epoch: usize, - ) -> Result<[u8; 32], Self::Error>; -} - -impl EraValidator for PreMergeAccumulator { - type Error = HeaderAccumulatorError; - - fn era_validate( - &self, - mut headers: Vec, - start_epoch: usize, - end_epoch: Option, - use_lock: bool, - ) -> Result, Self::Error> { - let end_epoch = end_epoch.unwrap_or(start_epoch + 1); - - // Ensure start epoch is less than end epoch - if start_epoch >= end_epoch { - Err(EraValidateError::EndEpochLessThanStartEpoch)?; - } - - let mut validated_epochs = Vec::new(); - for epoch in start_epoch..end_epoch { - // checks if epoch was already synced form lockfile. - if use_lock { - let file_path = Path::new("./lockfile.json"); - let lock_file = Lock::from_file(file_path)?; - - match lock_file.check_sync_state(file_path, epoch, self.historical_epochs[epoch].0) - { - Ok(true) => { - log::info!("Skipping, epoch already synced: {}", epoch); - continue; - } - Ok(false) => { - log::info!("syncing new epoch: {}", epoch); - } - Err(e) => { - return { - log::error!("error: {}", e); - Err(EraValidateError::EpochAccumulatorError.into()) - } - } - } - } - let epoch_headers: Vec = headers.drain(0..MAX_EPOCH_SIZE).collect(); - let root = self.process_headers(epoch_headers, epoch)?; - validated_epochs.push(epoch); - - // stores the validated epoch into lockfile to avoid validating again and keeping a concise state - if use_lock { - let path = Path::new("./lockfile.json"); - let mut lock_file = Lock::from_file(path)?; - lock_file.update(LockEntry::new(&epoch, root)); - - match lock_file.store_last_state(path) { - Ok(_) => {} - Err(e) => { - log::error!("error: {}", e); - return Err(EraValidateError::EpochAccumulatorError.into()); - } - } - } - } - - Ok(validated_epochs) - } - - fn process_headers( - &self, - mut headers: Vec, - epoch: usize, - ) -> Result<[u8; 32], Self::Error> { - if headers.len() != MAX_EPOCH_SIZE { - Err(EraValidateError::InvalidEpochLength)?; - } - - if headers[0].block_number % MAX_EPOCH_SIZE as u64 != 0 { - Err(EraValidateError::InvalidEpochStart)?; - } - - if epoch > FINAL_EPOCH { + pub fn validate_era(&self, epoch: &Epoch) -> Result { + if epoch.number() > FINAL_EPOCH { log::warn!( "the blocks from this epoch are not being validated since they are post merge. For post merge blocks, use the sync-committee subprotocol" ); - headers.retain(|header: &ExtHeaderRecord| header.block_number < MERGE_BLOCK); + // TODO return error } - let header_records: Vec<_> = headers.into_iter().map(HeaderRecord::from).collect(); + let header_records: Vec<_> = epoch.iter().map(HeaderRecord::from).collect(); let epoch_accumulator = EpochAccumulator::from(header_records); let root: [u8; 32] = epoch_accumulator.tree_hash_root().0; - let valid_root = self.historical_epochs[epoch].0; + let valid_root = self.historical_epochs[epoch.number()].0; if root != valid_root { log::error!( diff --git a/crates/header-accumulator/src/errors.rs b/crates/header-accumulator/src/errors.rs index 8bf3e46a..73209221 100644 --- a/crates/header-accumulator/src/errors.rs +++ b/crates/header-accumulator/src/errors.rs @@ -1,139 +1,48 @@ -use std::fmt; - use sf_protos::error::ProtosError; -#[derive(Debug)] -pub enum HeaderAccumulatorError { - EraValidateError(EraValidateError), - SyncError(SyncError), -} - -impl std::error::Error for HeaderAccumulatorError {} - -impl fmt::Display for HeaderAccumulatorError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - HeaderAccumulatorError::EraValidateError(ref e) => write!(f, "{}", e), - HeaderAccumulatorError::SyncError(ref e) => write!(f, "{}", e), - } - } -} - -impl From for HeaderAccumulatorError { - fn from(error: EraValidateError) -> Self { - HeaderAccumulatorError::EraValidateError(error) - } -} - -impl From for HeaderAccumulatorError { - fn from(error: SyncError) -> Self { - HeaderAccumulatorError::SyncError(error) - } -} - -#[derive(Debug)] +#[derive(thiserror::Error, Debug)] pub enum EraValidateError { + #[error("Too many header records")] TooManyHeaderRecords, + #[error("Invalid pre-merge accumulator file")] InvalidPreMergeAccumulatorFile, + #[error("Error decoding header from flat files")] HeaderDecodeError, + #[error("Error decoding flat files")] FlatFileDecodeError, + #[error("Era accumulator mismatch")] EraAccumulatorMismatch, + #[error("Error creating epoch accumulator")] EpochAccumulatorError, + #[error("Error generating inclusion proof")] ProofGenerationFailure, + #[error("Error validating inclusion proof")] ProofValidationFailure, + #[error("Error reading from stdin")] IoError, + #[error("Start epoch block not found")] StartEpochBlockNotFound, + #[error("Start epoch must be less than end epoch")] EndEpochLessThanStartEpoch, + #[error("Merge block not found")] MergeBlockNotFound, + #[error("Error reading json from stdin")] JsonError, + #[error("Error decoding total difficulty")] TotalDifficultyDecodeError, + #[error("blocks in epoch must respect the range of blocks numbers")] InvalidEpochStart, - InvalidEpochLength, + #[error("blocks in epoch must be exactly 8192 units, found {0}")] + InvalidEpochLength(usize), + + #[error("not all blocks are in the same epoch")] + InvalidBlockInEpoch, + #[error("Error converting ExtHeaderRecord to header")] ExtHeaderRecordError, + #[error("Invalid block range: {0} - {1}")] InvalidBlockRange(u64, u64), } -#[derive(Debug)] -pub enum SyncError { - LockfileIoError(std::io::Error), - LockfileReadError, -} - -impl std::error::Error for EraValidateError {} -impl std::error::Error for SyncError {} - -impl fmt::Display for EraValidateError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use EraValidateError::*; - match *self { - EraValidateError::TooManyHeaderRecords => write!(f, "Too many header records"), - EraValidateError::InvalidPreMergeAccumulatorFile => { - write!(f, "Invalid pre-merge accumulator file") - } - HeaderDecodeError => { - write!(f, "Error decoding header from flat files") - } - FlatFileDecodeError => write!(f, "Error decoding flat files"), - EraAccumulatorMismatch => write!(f, "Era accumulator mismatch"), - EpochAccumulatorError => { - write!(f, "Error creating epoch accumulator") - } - ProofGenerationFailure => { - write!(f, "Error generating inclusion proof") - } - ProofValidationFailure => { - write!(f, "Error validating inclusion proof") - } - IoError => write!(f, "Error reading from stdin"), - StartEpochBlockNotFound => { - write!(f, "Start epoch block not found") - } - EndEpochLessThanStartEpoch => { - write!(f, "Start epoch must be less than end epoch") - } - MergeBlockNotFound => { - write!(f, "Merge block not found") - } - JsonError => { - write!(f, "Error reading json from stdin") - } - TotalDifficultyDecodeError => { - write!(f, "Error decoding total difficulty") - } - InvalidEpochLength => { - write!(f, "blocks in epoch must be exactly 8192 units") - } - InvalidEpochStart => { - write!( - f, - "blocks in epoch must respect the range of blocks numbers" - ) - } - ExtHeaderRecordError => { - write!(f, "Error converting ExtHeaderRecord to header") - } - InvalidBlockRange(start, end) => { - write!(f, "Invalid block range: {} - {}", start, end) - } - } - } -} - -impl fmt::Display for SyncError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::LockfileIoError(e) => write!(f, "Error reading lockfile: {e}"), - Self::LockfileReadError => write!(f, "Epoch not found"), - } - } -} - -impl From for SyncError { - fn from(error: std::io::Error) -> Self { - SyncError::LockfileIoError(error) - } -} - impl From for EraValidateError { fn from(error: ProtosError) -> Self { match error { diff --git a/crates/header-accumulator/src/lib.rs b/crates/header-accumulator/src/lib.rs index 46ca87a0..02968767 100644 --- a/crates/header-accumulator/src/lib.rs +++ b/crates/header-accumulator/src/lib.rs @@ -2,5 +2,4 @@ pub mod epoch; pub mod era_validator; pub mod errors; pub mod inclusion_proof; -pub mod sync; pub mod types; diff --git a/crates/header-accumulator/src/main.rs b/crates/header-accumulator/src/main.rs deleted file mode 100644 index e9c202d8..00000000 --- a/crates/header-accumulator/src/main.rs +++ /dev/null @@ -1,233 +0,0 @@ -use clap::{Arg, Command, Parser, Subcommand}; -use header_accumulator::errors::EraValidateError; -use std::{io::BufReader, process}; -use trin_validation::accumulator::PreMergeAccumulator; - -#[derive(Parser, Debug)] -#[clap(author, version, about, long_about = None)] -struct Cli { - #[clap(subcommand)] - command: Commands, -} - -#[derive(Subcommand, Debug)] -enum Commands { - /// Stream data continuously - Stream { - #[clap(short, long, default_value = "false")] - decompress: bool, - #[clap(short, long)] - end_block: Option, - }, - /// Decode files from input to output - Decode { - #[clap(short, long)] - input: String, - #[clap(long)] - headers_dir: Option, - }, -} - -fn main() { - env_logger::init(); - let matches = Command::new("header_accumulator") - .version("0") - .author("Semiotic Labs") - .about("Validates flat files against Header Accumulators") - .arg_required_else_help(true) - .subcommand( - Command::new("era_validate") - .about("Validates entire ERAs of flat files against Header Accumulators") - .arg( - Arg::new("directory") - .help("Directory where the flat files are stored") - .required(false) - .index(1), - ) - .arg( - Arg::new("start_epoch") - .help("Start epoch to check") - .required(false) - .short('s') - .long("start_epoch"), - ) - .arg( - Arg::new("end_epoch") - .help("End epoch to check") - .required(false) - .short('e') - .long("end_epoch"), - ) - .arg( - Arg::new("pre_merge_accumulator_file") - .help("pre-merge accumulator file (optional)") - .required(false) - .short('m') - .long("pre_merge_accumulator_file"), - ) - .subcommand( - Command::new("stream") - .about("Validates streams ERAs of flat files against Header Accumulators") - .arg( - Arg::new("pre_merge_accumulator_file") - .help("pre-merge accumulator file (optional)") - .required(false) - .short('m') - .long("pre_merge_accumulator_file"), - ), - ), - ) - .subcommand( - Command::new("generate_inclusion_proof") - .about("Generates inclusion proofs for a range of blocks") - .arg( - Arg::new("directory") - .help("Directory where the flat files are stored") - .required(true) - .index(1), - ) - .arg( - Arg::new("start_block") - .help("Start block to generate inclusion proof for") - .required(true) - .index(2), - ) - .arg( - Arg::new("end_block") - .help("End block to generate inclusion proof for") - .required(true) - .index(3), - ) - .arg( - Arg::new("output_file") - .help("Output file for the inclusion proof") - .required(false) - .short('o') - .long("output_file"), - ), - ) - .subcommand( - Command::new("verify_inclusion_proof") - .about("Verifies inclusion proofs for a range of blocks") - .arg( - Arg::new("directory") - .help("Directory where the flat files are stored") - .required(true) - .index(1), - ) - .arg( - Arg::new("start_block") - .help("Start block to verify inclusion proof for") - .required(true) - .index(2), - ) - .arg( - Arg::new("end_block") - .help("End block to verify inclusion proof for") - .required(true) - .index(3), - ) - .arg( - Arg::new("inclusion_proof_file") - .help("Inclusion proof to verify") - .required(true) - .index(4), - ), - ) - .get_matches(); - - match matches.subcommand() { - // TODO: move this functionality to flat_head - Some(("era_validate", era_validate_matches)) => { - if let Some(("stream", stream_matches)) = era_validate_matches.subcommand() { - let pre_merge_accumulator_file = - stream_matches.get_one::("pre_merge_accumulator_file"); - let _pre_merge_accumulator = match pre_merge_accumulator_file { - Some(pre_merge_accumulator_file) => { - PreMergeAccumulator::try_from_file(pre_merge_accumulator_file.into()) - .map_err(|_| EraValidateError::InvalidPreMergeAccumulatorFile) - .expect("Invalid pre-merge accumulator file") - } - None => PreMergeAccumulator::default(), - }; - let _reader = BufReader::with_capacity(1 << 32, std::io::stdin().lock()); - let _writer = std::io::stdout(); - process::exit(0); - } - } - //TODO: move this functionality to flat_head - // Some(("generate_inclusion_proof", generate_inclusion_proof_matches)) => { - // let directory = generate_inclusion_proof_matches - // .get_one::("directory") - // .expect("Directory is required."); - // let start_block = generate_inclusion_proof_matches - // .get_one::("start_block") - // .expect("Start block is required."); - // let end_block = generate_inclusion_proof_matches - // .get_one::("end_block") - // .expect("End block is required."); - - // let inclusion_proof = inclusion_proof::generate_inclusion_proof( - // &directory, - // start_block.parse::().unwrap(), - // end_block.parse::().unwrap(), - // ) - // .expect("Error generating inclusion proof"); - - // let inclusion_proof_serialized = serde_json::to_string(&inclusion_proof).unwrap(); - // // write the proof to a file - // // if output_file is not provided, write to inclusion_proof.json - // let output_file = generate_inclusion_proof_matches.get_one::("output_file"); - // match output_file { - // Some(output_file) => { - // std::fs::write(output_file.to_owned() + ".json", inclusion_proof_serialized) - // .expect("Unable to write file"); - // } - // None => { - // std::fs::write("inclusion_proof.json", inclusion_proof_serialized) - // .expect("Unable to write file"); - // } - // } - // process::exit(0); - // } - // Some(("verify_inclusion_proof", verify_inclusion_proof_matches)) => { - // let directory = verify_inclusion_proof_matches - // .get_one::("directory") - // .expect("Directory is required."); - // let start_block = verify_inclusion_proof_matches - // .get_one::("start_block") - // .expect("Start block is required."); - // let end_block = verify_inclusion_proof_matches - // .get_one::("end_block") - // .expect("End block is required."); - // let inclusion_proof_file = verify_inclusion_proof_matches - // .get_one::("inclusion_proof_file") - // .expect("Inclusion proof is required."); - - // // Load inclusion proof - // let inclusion_proof = std::fs::read_to_string(inclusion_proof_file) - // .expect("Error reading inclusion proof file"); - // let inclusion_proof: Vec<[H256; 15]> = - // serde_json::from_str(&inclusion_proof).expect("Error parsing inclusion proof"); - - // let result = inclusion_proof::verify_inclusion_proof( - // &directory, - // None, - // start_block.parse::().unwrap(), - // end_block.parse::().unwrap(), - // inclusion_proof, - // ); - - // if result.is_ok() { - // println!("Inclusion proof verified!"); - // process::exit(0); - // } else { - // println!("Inclusion proof failed to verify"); - // process::exit(1); - // } - // } - _ => { - println!("No subcommand was used"); - } - } -} diff --git a/crates/header-accumulator/src/sync.rs b/crates/header-accumulator/src/sync.rs deleted file mode 100644 index ac1fdc30..00000000 --- a/crates/header-accumulator/src/sync.rs +++ /dev/null @@ -1,208 +0,0 @@ -use base64::prelude::*; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::error::Error; -use std::fs::{metadata, OpenOptions}; -use std::io::{Read, Write}; -use std::path::Path; - -use crate::errors::{EraValidateError, HeaderAccumulatorError, SyncError}; - -pub struct LockEntry { - epoch: String, - root: String, -} - -impl LockEntry { - pub fn new(epoch: &usize, root: [u8; 32]) -> Self { - LockEntry { - epoch: epoch.to_string(), - root: BASE64_STANDARD.encode(root), - } - } -} - -#[derive(Default, Serialize, Deserialize)] -pub struct Lock { - entries: HashMap, -} - -impl Lock { - pub fn new() -> Self { - Lock::default() - } - - pub fn check_sync_state( - &self, - file_path: &Path, - epoch: usize, - premerge_accumulator_hash: [u8; 32], - ) -> Result { - if metadata(file_path).is_err() { - log::info!("The lockfile did not exist and was created"); - } - - let epoch = epoch.to_string(); - - if !self.entries.contains_key(&epoch) { - return Ok(false); - } - - let stored_hash = self - .entries - .get(&epoch) - .ok_or(SyncError::LockfileReadError)?; - - let stored_hash = BASE64_STANDARD - .decode(stored_hash) - .expect("Failed to decode Base64"); - - // this ensures the decoded bytes fit into a `[u8; 32]` array, which is the hash type - let stored_hash: [u8; 32] = match stored_hash.try_into() { - Ok(b) => b, - Err(_) => panic!("Decoded hash does not fit into a 32-byte array"), - }; - - if premerge_accumulator_hash != stored_hash { - log::error!( - "the valid hash is: {:?} and the provided hash was: {:?}", - premerge_accumulator_hash, - stored_hash - ); - return Err(EraValidateError::EraAccumulatorMismatch.into()); - } - - Ok(true) - } - - pub fn from_file(file_path: &Path) -> Result { - let mut file = OpenOptions::new() - .read(true) - .write(true) - .create(true) - .truncate(false) - .open(file_path)?; - - let mut contents = String::new(); - - file.read_to_string(&mut contents)?; - - Ok(if contents.trim().is_empty() { - Lock::new() - } else { - serde_json::from_str(&contents).unwrap_or_default() - }) - } - - pub fn store_last_state(&self, file_path: &Path) -> Result<(), Box> { - let json_string = self.to_json()?; - - let mut file = OpenOptions::new() - .write(true) - .truncate(true) - .open(file_path)?; - - file.write_all(json_string.as_bytes())?; - - Ok(()) - } - - fn to_json(&self) -> Result> { - Ok(serde_json::to_string_pretty(self)?) - } - - pub fn update(&mut self, entry: LockEntry) { - self.entries.insert(entry.epoch, entry.root); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::fs::File; - use std::io::Write; - use tempfile::tempdir; - use trin_validation::accumulator::PreMergeAccumulator; - - #[test] - fn test_store_last_state() -> Result<(), Box> { - let dir = tempdir()?; - let file_path = dir.path().join("test_lock.json"); - - let entry = LockEntry { - epoch: "0".into(), - root: "XsH/uMOxRvQmBsdM7Zc9wW7FoQfANFhYw0P8lHgLQhg=".into(), - }; - - let mut lock_file = Lock::from_file(&file_path).unwrap(); - lock_file.update(entry); - lock_file.store_last_state(&file_path)?; - - let mut file = File::open(file_path)?; - let mut contents = String::new(); - file.read_to_string(&mut contents)?; - - let lock: Lock = serde_json::from_str(&contents)?; - - // test if the entry was correctly added - assert!(lock.entries.contains_key("0")); - assert_eq!( - lock.entries.get("0"), - Some(&"XsH/uMOxRvQmBsdM7Zc9wW7FoQfANFhYw0P8lHgLQhg=".into()) - ); - - Ok(()) - } - - #[test] - fn test_check_sync_state() -> Result<(), Box> { - let dir = tempdir()?; - let file_path = dir.path().join("lockfile.json"); - - let mock_json = r#"{ - "entries": { - "1": "pTZOmpvFE8RgHw1i5rRtve3zIAu/rlTWNQ9G8segGTg=", - "0": "XsH/uMOxRvQmBsdM7Zc9wW7FoQfANFhYw0P8lHgLQhg=" - } - }"#; - - let mut file = File::create(&file_path)?; - writeln!(file, "{}", mock_json)?; - - let mac_file: PreMergeAccumulator = PreMergeAccumulator::default(); - - let json_lock = Lock::from_file(&file_path)?; - - // Test case where epoch exists and hashes match - let epoch = 0; - assert!(json_lock - .check_sync_state(&file_path, epoch, mac_file.historical_epochs[0].0) - .unwrap(),); - - // Test case where epoch does not exist - let epoch = 2; - let result = json_lock - .check_sync_state(&file_path, epoch, mac_file.historical_epochs[2].0) - .unwrap(); - assert!(!result); - - // // test when hashes differ but lock is present - let epoch = 0; - let result = json_lock - .check_sync_state(&file_path, epoch, mac_file.historical_epochs[1].0) - .map_err(|error| error.to_string()); - assert_eq!( - result.unwrap_err(), - EraValidateError::EraAccumulatorMismatch.to_string() - ); - - // test case for another epoch hash - let epoch = 1; - let result = json_lock - .check_sync_state(&file_path, epoch, mac_file.historical_epochs[1].0) - .map_err(|error| error.to_string()); - assert!(result.unwrap()); - - Ok(()) - } -} diff --git a/crates/header-accumulator/tests/era_validator.rs b/crates/header-accumulator/tests/era_validator.rs index dd3bc95d..53c4f21b 100644 --- a/crates/header-accumulator/tests/era_validator.rs +++ b/crates/header-accumulator/tests/era_validator.rs @@ -1,18 +1,11 @@ -use std::fs; - use decoder::{decode_flat_files, Decompression}; use header_accumulator::{ - era_validator::EraValidator, errors::HeaderAccumulatorError, types::ExtHeaderRecord, + epoch::Epoch, era_validator::EraValidator, errors::EraValidateError, types::ExtHeaderRecord, }; use trin_validation::accumulator::PreMergeAccumulator; #[test] -fn test_era_validate() -> Result<(), HeaderAccumulatorError> { - // clean up before tests - if let Err(e) = fs::remove_file("lockfile.json") { - eprintln!("Error deleting lockfile.json: {}", e); - } - +fn test_era_validate() -> Result<(), EraValidateError> { let mut headers: Vec = Vec::new(); for number in (0..=8200).step_by(100) { let file_name = format!("tests/ethereum_firehose_first_8200/{:010}.dbin", number); @@ -44,98 +37,85 @@ fn test_era_validate() -> Result<(), HeaderAccumulatorError> { } assert_eq!(headers.len(), 8300); assert_eq!(headers[0].block_number, 0); - let premerge_accumulator = PreMergeAccumulator::default(); - - let result = premerge_accumulator.era_validate(headers.clone(), 0, None, false)?; - println!("result 1: {:?}", result); - - assert!(result.contains(&0), "The vector does not contain 0"); - - // Test with creating a lockfile - let result = premerge_accumulator.era_validate(headers.clone(), 0, None, true)?; - println!("result 2: {:?}", result); - - assert!(result.contains(&0), "The vector does not contain 0"); - // test with the lockfile created before. - let result = premerge_accumulator.era_validate(headers.clone(), 0, None, true)?; + let premerge_accumulator: EraValidator = PreMergeAccumulator::default().into(); + let epoch: Epoch = headers.try_into().unwrap(); - // already validated epochs are not included in the array. - assert_eq!(result.len(), 0); + let result = premerge_accumulator.validate_eras(&[&epoch])?; - // clean up after tests - if let Err(e) = fs::remove_file("lockfile.json") { - eprintln!("Error deleting lockfile.json: {}", e); - } + let expected = [ + 94, 193, 255, 184, 195, 177, 70, 244, 38, 6, 199, 76, 237, 151, 61, 193, 110, 197, 161, 7, + 192, 52, 88, 88, 195, 67, 252, 148, 120, 11, 66, 24, + ]; + assert_eq!(result.first(), Some(&expected)); Ok(()) } -#[test] - -fn test_era_validate_compressed() -> Result<(), HeaderAccumulatorError> { - // clean up before tests - if let Err(e) = fs::remove_file("lockfile.json") { - eprintln!("Error deleting lockfile.json: {}", e); - } - - let mut headers: Vec = Vec::new(); - for number in (0..=8200).step_by(100) { - let file_name = format!("tests/compressed/{:010}.dbin.zst", number); - match decode_flat_files(file_name, None, None, Decompression::Zstd) { - Ok(blocks) => { - let (successful_headers, _): (Vec<_>, Vec<_>) = blocks - .iter() - .cloned() - .map(|block| ExtHeaderRecord::try_from(&block)) - .fold((Vec::new(), Vec::new()), |(mut succ, mut errs), res| { - match res { - Ok(header) => succ.push(header), - Err(e) => { - // Log the error or handle it as needed - eprintln!("Error converting block: {:?}", e); - errs.push(e); - } - }; - (succ, errs) - }); - - headers.extend(successful_headers); - } - Err(e) => { - eprintln!("error: {:?}", e); - break; - } - } - } - - assert_eq!(headers.len(), 8300); - assert_eq!(headers[0].block_number, 0); - - let premerge_accumulator = PreMergeAccumulator::default(); - - let result = premerge_accumulator.era_validate(headers.clone(), 0, None, false)?; - println!("result 1: {:?}", result); - - assert!(result.contains(&0), "The vector does not contain 0"); - - // Test with creating a lockfile - let result = premerge_accumulator.era_validate(headers.clone(), 0, None, true)?; - println!("result 2: {:?}", result); - - assert!(result.contains(&0), "The vector does not contain 0"); - - // test with the lockfile created before. - - let result = premerge_accumulator.era_validate(headers.clone(), 0, None, true)?; - - // already validated epochs are not included in the array. - assert_eq!(result.len(), 0); - - // clean up after tests - if let Err(e) = fs::remove_file("lockfile.json") { - eprintln!("Error deleting lockfile.json: {}", e); - } - - Ok(()) -} +// #[test] +// fn test_era_validate_compressed() -> Result<(), HeaderAccumulatorError> { +// // clean up before tests +// if let Err(e) = fs::remove_file("lockfile.json") { +// eprintln!("Error deleting lockfile.json: {}", e); +// } +// +// let mut headers: Vec = Vec::new(); +// for number in (0..=8200).step_by(100) { +// let file_name = format!("tests/compressed/{:010}.dbin.zst", number); +// match decode_flat_files(file_name, None, None, Decompression::Zstd) { +// Ok(blocks) => { +// let (successful_headers, _): (Vec<_>, Vec<_>) = blocks +// .iter() +// .cloned() +// .map(|block| ExtHeaderRecord::try_from(&block)) +// .fold((Vec::new(), Vec::new()), |(mut succ, mut errs), res| { +// match res { +// Ok(header) => succ.push(header), +// Err(e) => { +// // Log the error or handle it as needed +// eprintln!("Error converting block: {:?}", e); +// errs.push(e); +// } +// }; +// (succ, errs) +// }); +// +// headers.extend(successful_headers); +// } +// Err(e) => { +// eprintln!("error: {:?}", e); +// break; +// } +// } +// } +// +// assert_eq!(headers.len(), 8300); +// assert_eq!(headers[0].block_number, 0); +// +// let premerge_accumulator = PreMergeAccumulator::default(); +// +// let result = premerge_accumulator.era_validate(headers.clone(), 0, None, false)?; +// println!("result 1: {:?}", result); +// +// assert!(result.contains(&0), "The vector does not contain 0"); +// +// // Test with creating a lockfile +// let result = premerge_accumulator.era_validate(headers.clone(), 0, None, true)?; +// println!("result 2: {:?}", result); +// +// assert!(result.contains(&0), "The vector does not contain 0"); +// +// // test with the lockfile created before. +// +// let result = premerge_accumulator.era_validate(headers.clone(), 0, None, true)?; +// +// // already validated epochs are not included in the array. +// assert_eq!(result.len(), 0); +// +// // clean up after tests +// if let Err(e) = fs::remove_file("lockfile.json") { +// eprintln!("Error deleting lockfile.json: {}", e); +// } +// +// Ok(()) +// }