Skip to content

Commit

Permalink
feat(types)!: add blob reconstruction from shares (#450)
Browse files Browse the repository at this point in the history
Co-authored-by: Mikołaj Florkiewicz <[email protected]>
Signed-off-by: Maciej Zwoliński <[email protected]
  • Loading branch information
zvolin and fl0rek authored Oct 25, 2024
1 parent 956b861 commit 78d9fdb
Show file tree
Hide file tree
Showing 14 changed files with 511 additions and 51 deletions.
8 changes: 4 additions & 4 deletions node/src/daser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ mod tests {
use bytes::BytesMut;
use celestia_proto::bitswap::Block;
use celestia_types::sample::{Sample, SampleId};
use celestia_types::test_utils::{generate_eds, ExtendedHeaderGenerator};
use celestia_types::test_utils::{generate_dummy_eds, ExtendedHeaderGenerator};
use celestia_types::{AxisType, DataAvailabilityHeader, ExtendedDataSquare};
use cid::Cid;
use prost::Message;
Expand Down Expand Up @@ -570,7 +570,7 @@ mod tests {
let mut headers = Vec::new();

for _ in 0..20 {
let eds = generate_eds(2);
let eds = generate_dummy_eds(2);
let dah = DataAvailabilityHeader::from_eds(&eds);
let header = gen.next_with_dah(dah);

Expand Down Expand Up @@ -644,7 +644,7 @@ mod tests {
handle.expect_no_cmd().await;

// Push block 21 in the store
let eds = generate_eds(2);
let eds = generate_dummy_eds(2);
let dah = DataAvailabilityHeader::from_eds(&eds);
let header = gen.next_with_dah(dah);
store.insert(header).await.unwrap();
Expand All @@ -663,7 +663,7 @@ mod tests {
square_width: usize,
simulate_invalid_sampling: bool,
) {
let eds = generate_eds(square_width);
let eds = generate_dummy_eds(square_width);
let dah = DataAvailabilityHeader::from_eds(&eds);
let header = gen.next_with_dah(dah);
let height = header.height().value();
Expand Down
4 changes: 2 additions & 2 deletions node/src/p2p/shwap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,14 @@ mod tests {
use crate::store::InMemoryStore;
use crate::test_utils::async_test;
use bytes::BytesMut;
use celestia_types::test_utils::{generate_eds, ExtendedHeaderGenerator};
use celestia_types::test_utils::{generate_dummy_eds, ExtendedHeaderGenerator};
use celestia_types::{AxisType, DataAvailabilityHeader};

#[async_test]
async fn hash() {
let store = Arc::new(InMemoryStore::new());

let eds = generate_eds(4);
let eds = generate_dummy_eds(4);
let dah = DataAvailabilityHeader::from_eds(&eds);

let mut gen = ExtendedHeaderGenerator::new();
Expand Down
4 changes: 2 additions & 2 deletions node/tests/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use celestia_tendermint_proto::Protobuf;
use celestia_types::consts::HASH_SIZE;
use celestia_types::fraud_proof::BadEncodingFraudProof;
use celestia_types::hash::Hash;
use celestia_types::test_utils::{corrupt_eds, generate_eds, ExtendedHeaderGenerator};
use celestia_types::test_utils::{corrupt_eds, generate_dummy_eds, ExtendedHeaderGenerator};
use futures::StreamExt;
use libp2p::swarm::NetworkBehaviour;
use libp2p::{gossipsub, identity, noise, ping, tcp, yamux, Multiaddr, SwarmBuilder};
Expand Down Expand Up @@ -182,7 +182,7 @@ async fn stops_services_when_network_is_compromised() {
store.insert(gen.next_many_verified(64)).await.unwrap();

// create a corrupted block and insert it
let mut eds = generate_eds(8);
let mut eds = generate_dummy_eds(8);
let (header, befp) = corrupt_eds(&mut gen, &mut eds);

store.insert(header).await.unwrap();
Expand Down
27 changes: 10 additions & 17 deletions rpc/tests/share.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,14 @@ async fn get_share() {
async fn get_shares_by_namespace() {
let client = new_test_client(AuthLevel::Write).await.unwrap();
let namespace = random_ns();
let data = random_bytes(1024);
let blob = Blob::new(namespace, data.clone()).unwrap();
let blobs: Vec<_> = (0..4)
.map(|_| {
let data = random_bytes(1024);
Blob::new(namespace, data.clone()).unwrap()
})
.collect();

let submitted_height = blob_submit(&client, &[blob]).await.unwrap();
let submitted_height = blob_submit(&client, &blobs).await.unwrap();

let header = client.header_get_by_height(submitted_height).await.unwrap();

Expand All @@ -41,21 +45,10 @@ async fn get_shares_by_namespace() {
.await
.unwrap();

let seq_len = ns_shares.rows[0].shares[0]
.sequence_length()
.expect("not parity");
assert_eq!(seq_len as usize, data.len());

let reconstructed_data = ns_shares
.rows
.into_iter()
.flat_map(|row| row.shares.into_iter())
.fold(vec![], |mut acc, share| {
acc.extend_from_slice(share.payload().expect("not parity"));
acc
});
let reconstructed =
Blob::reconstruct_all(ns_shares.rows.iter().flat_map(|row| row.shares.iter())).unwrap();

assert_eq!(&reconstructed_data[..seq_len as usize], &data[..]);
assert_eq!(reconstructed, blobs);
}

#[tokio::test]
Expand Down
Loading

0 comments on commit 78d9fdb

Please sign in to comment.