Skip to content

Commit

Permalink
refactor: header-accumulator
Browse files Browse the repository at this point in the history
Signed-off-by: Gustavo Inacio <[email protected]>
  • Loading branch information
gusinacio committed Oct 21, 2024
1 parent 47c29bb commit da1e331
Show file tree
Hide file tree
Showing 10 changed files with 184 additions and 760 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions crates/flat-head/src/era_verifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ pub async fn verify_eras(

for epoch in start_epoch..=end_epoch.unwrap_or(start_epoch + 1) {
let tx = tx.clone();
let macc = macc.clone();
let era_validator: EraValidator = macc.clone().into();
let store = blocks_store.clone();

task::spawn(async move {
Expand All @@ -51,8 +51,8 @@ pub async fn verify_eras(
(succ, errs)
});

let valid_epochs = macc
.era_validate(successful_headers, epoch, Some(epoch + 1), true)
let valid_epochs = era_validator
.validate_era(successful_headers, epoch, Some(epoch + 1), true)
.unwrap();

let _ = tx.send(valid_epochs).await;
Expand Down
1 change: 1 addition & 0 deletions crates/header-accumulator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ serde_json.workspace = true
sf-protos = { path = "../sf-protos" }
tree_hash = "0.8.0"
trin-validation.workspace = true
thiserror.workspace = true

[dev-dependencies]
decoder = { path = "../flat-files-decoder" }
Expand Down
48 changes: 48 additions & 0 deletions crates/header-accumulator/src/epoch.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
use std::array::IntoIter;

use crate::{errors::EraValidateError, types::ExtHeaderRecord};

/// The maximum number of slots per epoch in Ethereum.
/// In the context of Proof of Stake (PoS) consensus, an epoch is a collection of slots
/// during which validators propose and attest to blocks. The maximum size of an epoch
Expand All @@ -13,3 +17,47 @@ pub const FINAL_EPOCH: usize = 1896;
/// "The Merge" took place at block 15537394, when the Ethereum network fully switched
/// from Proof of Work (PoW) to Proof of Stake (PoS).
pub const MERGE_BLOCK: u64 = 15537394;

/// Epoch containing 8192 blocks
pub struct Epoch(Box<[ExtHeaderRecord; MAX_EPOCH_SIZE]>);

impl TryFrom<Vec<ExtHeaderRecord>> for Epoch {
type Error = EraValidateError;

fn try_from(value: Vec<ExtHeaderRecord>) -> Result<Self, Self::Error> {
let len = value.len();
println!("length: {len}");
let value: Vec<ExtHeaderRecord> = value.into_iter().take(MAX_EPOCH_SIZE).collect();
let value: Box<[ExtHeaderRecord; MAX_EPOCH_SIZE]> = value
.try_into()
.map_err(|_| EraValidateError::InvalidEpochLength(len))?;
let epoch_number = value[0].block_number / MAX_EPOCH_SIZE as u64;
if value
.iter()
.map(|block| block.block_number / MAX_EPOCH_SIZE as u64)
.all(|epoch| epoch == epoch_number)
{
Ok(Self(value))
} else {
Err(EraValidateError::InvalidBlockInEpoch)
}
}
}

impl Epoch {
pub fn number(&self) -> usize {
(self.0[0].block_number / MAX_EPOCH_SIZE as u64) as usize
}
pub fn iter(&self) -> std::slice::Iter<'_, ExtHeaderRecord> {
self.0.iter()
}
}

impl IntoIterator for Epoch {
type Item = ExtHeaderRecord;
type IntoIter = IntoIter<ExtHeaderRecord, MAX_EPOCH_SIZE>;

fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
135 changes: 31 additions & 104 deletions crates/header-accumulator/src/era_validator.rs
Original file line number Diff line number Diff line change
@@ -1,19 +1,27 @@
use std::path::Path;

use ethportal_api::types::execution::accumulator::{EpochAccumulator, HeaderRecord};
use tree_hash::TreeHash;
use trin_validation::accumulator::PreMergeAccumulator;
use trin_validation::accumulator::{HistoricalEpochRoots, PreMergeAccumulator};

use crate::{
epoch::{FINAL_EPOCH, MAX_EPOCH_SIZE, MERGE_BLOCK},
errors::{EraValidateError, HeaderAccumulatorError},
sync::{Lock, LockEntry},
types::ExtHeaderRecord,
epoch::{Epoch, FINAL_EPOCH},
errors::EraValidateError,
};

pub trait EraValidator {
type Error;
pub struct EraValidator {
historical_epochs: HistoricalEpochRoots,
}

impl From<PreMergeAccumulator> for EraValidator {
fn from(value: PreMergeAccumulator) -> Self {
Self {
historical_epochs: value.historical_epochs,
}
}
}

pub type RootHash = [u8; 32];

impl EraValidator {
/// Validates many headers against a header accumulator
///
/// It also keeps a record in `lockfile.json` of the validated epochs to skip them
Expand All @@ -25,13 +33,15 @@ pub trait EraValidator {
/// * `start_epoch` - The epoch number that all the first 8192 blocks are set located
/// * `end_epoch` - The epoch number that all the last 8192 blocks are located
/// * `use_lock` - when set to true, uses the lockfile to store already processed blocks. True by default
fn era_validate(
&self,
headers: Vec<ExtHeaderRecord>,
start_epoch: usize,
end_epoch: Option<usize>,
use_lock: bool,
) -> Result<Vec<usize>, Self::Error>;
pub fn validate_eras(&self, epochs: &[&Epoch]) -> Result<Vec<RootHash>, EraValidateError> {
let mut validated_epochs = Vec::new();
for epoch in epochs {
let root = self.validate_era(epoch)?;
validated_epochs.push(root);
}

Ok(validated_epochs)
}

/// takes 8192 block headers and checks if they consist in a valid epoch.
///
Expand All @@ -44,103 +54,20 @@ pub trait EraValidator {
///
/// For block post merge, the sync-committee should be used to validate block headers
/// in the canonical blockchain. So this function is not useful for those.
fn process_headers(
&self,
headers: Vec<ExtHeaderRecord>,
epoch: usize,
) -> Result<[u8; 32], Self::Error>;
}

impl EraValidator for PreMergeAccumulator {
type Error = HeaderAccumulatorError;

fn era_validate(
&self,
mut headers: Vec<ExtHeaderRecord>,
start_epoch: usize,
end_epoch: Option<usize>,
use_lock: bool,
) -> Result<Vec<usize>, Self::Error> {
let end_epoch = end_epoch.unwrap_or(start_epoch + 1);

// Ensure start epoch is less than end epoch
if start_epoch >= end_epoch {
Err(EraValidateError::EndEpochLessThanStartEpoch)?;
}

let mut validated_epochs = Vec::new();
for epoch in start_epoch..end_epoch {
// checks if epoch was already synced form lockfile.
if use_lock {
let file_path = Path::new("./lockfile.json");
let lock_file = Lock::from_file(file_path)?;

match lock_file.check_sync_state(file_path, epoch, self.historical_epochs[epoch].0)
{
Ok(true) => {
log::info!("Skipping, epoch already synced: {}", epoch);
continue;
}
Ok(false) => {
log::info!("syncing new epoch: {}", epoch);
}
Err(e) => {
return {
log::error!("error: {}", e);
Err(EraValidateError::EpochAccumulatorError.into())
}
}
}
}
let epoch_headers: Vec<ExtHeaderRecord> = headers.drain(0..MAX_EPOCH_SIZE).collect();
let root = self.process_headers(epoch_headers, epoch)?;
validated_epochs.push(epoch);

// stores the validated epoch into lockfile to avoid validating again and keeping a concise state
if use_lock {
let path = Path::new("./lockfile.json");
let mut lock_file = Lock::from_file(path)?;
lock_file.update(LockEntry::new(&epoch, root));

match lock_file.store_last_state(path) {
Ok(_) => {}
Err(e) => {
log::error!("error: {}", e);
return Err(EraValidateError::EpochAccumulatorError.into());
}
}
}
}

Ok(validated_epochs)
}

fn process_headers(
&self,
mut headers: Vec<ExtHeaderRecord>,
epoch: usize,
) -> Result<[u8; 32], Self::Error> {
if headers.len() != MAX_EPOCH_SIZE {
Err(EraValidateError::InvalidEpochLength)?;
}

if headers[0].block_number % MAX_EPOCH_SIZE as u64 != 0 {
Err(EraValidateError::InvalidEpochStart)?;
}

if epoch > FINAL_EPOCH {
pub fn validate_era(&self, epoch: &Epoch) -> Result<RootHash, EraValidateError> {
if epoch.number() > FINAL_EPOCH {
log::warn!(
"the blocks from this epoch are not being validated since they are post merge.
For post merge blocks, use the sync-committee subprotocol"
);
headers.retain(|header: &ExtHeaderRecord| header.block_number < MERGE_BLOCK);
// TODO return error
}

let header_records: Vec<_> = headers.into_iter().map(HeaderRecord::from).collect();
let header_records: Vec<_> = epoch.iter().map(HeaderRecord::from).collect();
let epoch_accumulator = EpochAccumulator::from(header_records);

let root: [u8; 32] = epoch_accumulator.tree_hash_root().0;
let valid_root = self.historical_epochs[epoch].0;
let valid_root = self.historical_epochs[epoch.number()].0;

if root != valid_root {
log::error!(
Expand Down
Loading

0 comments on commit da1e331

Please sign in to comment.