Skip to content

Commit

Permalink
clippy fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
ogabrielides committed Apr 30, 2024
1 parent 599553f commit 4c63644
Show file tree
Hide file tree
Showing 9 changed files with 61 additions and 71 deletions.
2 changes: 1 addition & 1 deletion grovedb/src/batch/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -560,7 +560,7 @@ impl GroveDbOp {
}

/// Verify consistency of operations
pub fn verify_consistency_of_operations(ops: &Vec<GroveDbOp>) -> GroveDbOpConsistencyResults {
pub fn verify_consistency_of_operations(ops: &[GroveDbOp]) -> GroveDbOpConsistencyResults {
let ops_len = ops.len();
// operations should not have any duplicates
let mut repeated_ops = vec![];
Expand Down
75 changes: 37 additions & 38 deletions grovedb/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -171,14 +171,12 @@ use std::collections::{BTreeMap, BTreeSet};
#[cfg(feature = "full")]
use std::{collections::HashMap, fmt, option::Option::None, path::Path};

use blake3;
#[cfg(any(feature = "full", feature = "verify"))]
use element::helpers;
#[cfg(any(feature = "full", feature = "verify"))]
pub use element::Element;
#[cfg(feature = "full")]
pub use element::ElementFlags;
use grovedb_costs::storage_cost::key_value_cost::KeyValueStorageCost;
#[cfg(feature = "full")]
use grovedb_costs::{
cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost,
Expand Down Expand Up @@ -235,8 +233,6 @@ type Hash = [u8; 32];
pub struct GroveDb {
#[cfg(feature = "full")]
db: RocksDbStorage,

version: i32,
}

// Struct governing state sync
Expand Down Expand Up @@ -272,12 +268,18 @@ impl SubtreesMetadata {
}
}

impl Default for SubtreesMetadata {
fn default() -> Self {
Self::new()
}
}

impl fmt::Debug for SubtreesMetadata {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for (prefix, metadata) in self.data.iter() {
let metadata_path = &metadata.0;
let metadata_path_str = util_path_to_string(&metadata_path);
write!(
let metadata_path_str = util_path_to_string(metadata_path);
writeln!(
f,
" prefix:{:?} -> path:{:?}\n",
hex::encode(prefix),
Expand Down Expand Up @@ -312,7 +314,7 @@ impl GroveDb {
/// Opens a given path
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
let db = RocksDbStorage::default_rocksdb_with_path(path)?;
Ok(GroveDb { db, version: 1 })
Ok(GroveDb { db })
}

/// Uses raw iter to delete GroveDB key values pairs from rocksdb
Expand Down Expand Up @@ -1092,7 +1094,7 @@ impl GroveDb {
let current_path = SubtreePath::from(path);

let parent_path_opt = current_path.derive_parent();
if (parent_path_opt.is_some()) {
if parent_path_opt.is_some() {
let parent_path = parent_path_opt.unwrap().0;
let parent_merk = self
.open_transactional_merk_at_path(parent_path, tx, None)
Expand Down Expand Up @@ -1143,20 +1145,20 @@ impl GroveDb {
global_chunk_id: &[u8],
tx: &'db Transaction,
) -> Result<Vec<Op>, Error> {
let CHUNK_PREFIX_LENGTH: usize = 32;
if (global_chunk_id.len() < CHUNK_PREFIX_LENGTH) {
let chunk_prefix_length: usize = 32;
if global_chunk_id.len() < chunk_prefix_length {
return Err(Error::CorruptedData(
"expected global chunk id of at least 32 length".to_string(),
));
}

let (chunk_prefix, chunk_id) = global_chunk_id.split_at(CHUNK_PREFIX_LENGTH);
let (chunk_prefix, chunk_id) = global_chunk_id.split_at(chunk_prefix_length);

let mut array = [0u8; 32];
array.copy_from_slice(chunk_prefix);
let chunk_prefix_key: SubtreePrefix = array;

let subtrees_metadata = self.get_subtrees_metadata(&tx)?;
let subtrees_metadata = self.get_subtrees_metadata(tx)?;

match subtrees_metadata.data.get(&chunk_prefix_key) {
Some(path_data) => {
Expand All @@ -1165,36 +1167,36 @@ impl GroveDb {
let path: &[&[u8]] = &subtree_path;

let merk = self
.open_transactional_merk_at_path(path.into(), &tx, None)
.open_transactional_merk_at_path(path.into(), tx, None)
.value?;

if (merk.is_empty_tree().unwrap()) {
if merk.is_empty_tree().unwrap() {
return Ok(vec![]);
}

let mut chunk_producer_res = ChunkProducer::new(&merk);
let chunk_producer_res = ChunkProducer::new(&merk);
match chunk_producer_res {
Ok(mut chunk_producer) => {
let chunk_res = chunk_producer
.chunk(String::from_utf8(chunk_id.to_vec()).unwrap().as_str());
match chunk_res {
Ok((chunk, _)) => Ok(chunk),
Err(_) => {
return Err(Error::CorruptedData(
Err(Error::CorruptedData(
"Unable to create to load chunk".to_string(),
));
))
}
}
}
Err(_) => {
return Err(Error::CorruptedData(
Err(Error::CorruptedData(
"Unable to create Chunk producer".to_string(),
));
))
}
}
}
None => {
return Err(Error::CorruptedData("Prefix not found".to_string()));
Err(Error::CorruptedData("Prefix not found".to_string()))
}
}
}
Expand Down Expand Up @@ -1262,23 +1264,23 @@ impl GroveDb {
let mut res = vec![];

let (global_chunk_id, chunk_data) = chunk;
let (chunk_prefix, chunk_id) = util_split_global_chunk_id(&global_chunk_id)?;
let (chunk_prefix, chunk_id) = util_split_global_chunk_id(global_chunk_id)?;

match (
&mut state_sync_info.restorer,
&state_sync_info.current_prefix,
) {
(Some(restorer), Some(ref current_prefix)) => {
if (*current_prefix != chunk_prefix) {
if *current_prefix != chunk_prefix {
return Err(Error::InternalError("Invalid incoming prefix"));
}
if (!state_sync_info.pending_chunks.contains(global_chunk_id)) {
if !state_sync_info.pending_chunks.contains(global_chunk_id) {
return Err(Error::InternalError(
"Incoming global_chunk_id not expected",
));
}
state_sync_info.pending_chunks.remove(global_chunk_id);
if (!chunk_data.is_empty()) {
if !chunk_data.is_empty() {
match restorer.process_chunk(chunk_id.to_string(), chunk_data) {
Ok(next_chunk_ids) => {
state_sync_info.num_processed_chunks += 1;
Expand All @@ -1302,25 +1304,22 @@ impl GroveDb {
}
}

if (res.is_empty()) {
if (!state_sync_info.pending_chunks.is_empty()) {
if res.is_empty() {
if !state_sync_info.pending_chunks.is_empty() {
return Ok((res, state_sync_info));
}
match (
state_sync_info.restorer.take(),
state_sync_info.current_prefix.take(),
) {
(Some(restorer), Some(current_prefix)) => {
if (state_sync_info.num_processed_chunks > 0) {
if (!restorer.finalize().is_ok()) {
return Err(Error::InternalError("Unable to finalize merk"));
}
if (state_sync_info.num_processed_chunks > 0) && (restorer.finalize().is_err()) {
return Err(Error::InternalError("Unable to finalize merk"));
}
state_sync_info.processed_prefixes.insert(current_prefix);

let subtrees_metadata = self.get_subtrees_metadata(tx)?;
if let Some(value) = subtrees_metadata.data.get(&current_prefix) {
let v_path = &value.0;
println!(" path:{:?} done", util_path_to_string(&value.0));
}

Expand Down Expand Up @@ -1360,10 +1359,10 @@ impl GroveDb {
}

// Converts a path into a human-readable string (for debuting)
pub fn util_path_to_string(path: &Vec<Vec<u8>>) -> Vec<String> {
pub fn util_path_to_string(path: &[Vec<u8>]) -> Vec<String> {
let mut subtree_path_str: Vec<String> = vec![];
for subtree in path.to_vec() {
let string = std::str::from_utf8(&subtree).unwrap();
for subtree in path {
let string = std::str::from_utf8(subtree).unwrap();
subtree_path_str.push(string.parse().unwrap());
}
subtree_path_str
Expand All @@ -1373,22 +1372,22 @@ pub fn util_path_to_string(path: &Vec<Vec<u8>>) -> Vec<String> {
pub fn util_split_global_chunk_id(
global_chunk_id: &[u8],
) -> Result<(SubtreePrefix, String), Error> {
let CHUNK_PREFIX_LENGTH: usize = 32;
if global_chunk_id.len() < CHUNK_PREFIX_LENGTH {
let chunk_prefix_length: usize = 32;
if global_chunk_id.len() < chunk_prefix_length {
return Err(Error::CorruptedData(
"expected global chunk id of at least 32 length".to_string(),
));
}

let (chunk_prefix, chunk_id) = global_chunk_id.split_at(CHUNK_PREFIX_LENGTH);
let (chunk_prefix, chunk_id) = global_chunk_id.split_at(chunk_prefix_length);
let mut array = [0u8; 32];
array.copy_from_slice(chunk_prefix);
let chunk_prefix_key: SubtreePrefix = array;
let str_chunk_id = String::from_utf8(chunk_id.to_vec());
match str_chunk_id {
Ok(s) => Ok((chunk_prefix_key, s)),
Err(_) => {
return Err(Error::CorruptedData(
Err(Error::CorruptedData(
"unable to convert chunk id to string".to_string(),
))
}
Expand Down
1 change: 0 additions & 1 deletion grovedb/src/operations/auxiliary.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ use grovedb_costs::{
cost_return_on_error, cost_return_on_error_no_add,
storage_cost::key_value_cost::KeyValueStorageCost, CostResult, CostsExt, OperationCost,
};
use grovedb_merk::{proofs::Query, KVIterator};
use grovedb_path::SubtreePath;
#[cfg(feature = "full")]
use grovedb_storage::StorageContext;
Expand Down
2 changes: 1 addition & 1 deletion grovedb/src/operations/delete/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ use grovedb_storage::{
#[cfg(feature = "full")]
use crate::{
batch::{GroveDbOp, Op},
util::{storage_context_optional_tx, storage_context_with_parent_optional_tx},
util::{storage_context_with_parent_optional_tx},
Element, ElementFlags, Error, GroveDb, Transaction, TransactionArg,
};
use crate::{raw_decode, util::merk_optional_tx_path_not_empty};
Expand Down
12 changes: 4 additions & 8 deletions merk/src/merk/chunks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,10 @@ where
number_of_chunks(self.height)
}

pub fn is_empty(&self) -> bool {
number_of_chunks(self.height) == 0
}

/// Gets the next chunk based on the `ChunkProducer`'s internal index state.
/// This is mostly useful for letting `ChunkIter` yield the chunks in order,
/// optimizing throughput compared to random access.
Expand All @@ -387,14 +391,6 @@ where
}),
)
}

// TODO: test this logic out
fn get_chunk_encoding_length(chunk: &[Op]) -> usize {
// TODO: deal with error
chunk
.iter()
.fold(0, |sum, op| sum + op.encoding_length().unwrap())
}
}

/// Iterate over each chunk, returning `None` after last chunk
Expand Down
13 changes: 5 additions & 8 deletions merk/src/merk/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ use crate::{
tree::{
kv::ValueDefinedCostType, AuxMerkBatch, CryptoHash, Op, RefWalker, TreeNode, NULL_HASH,
},
BatchEntry,
Error::{CostsError, EdError, StorageError},
Link,
MerkType::{BaseMerk, LayeredMerk, StandaloneMerk},
Expand Down Expand Up @@ -662,19 +661,17 @@ where
}

let node = node.unwrap();
if &node.hash().unwrap() != &hash {
if node.hash().unwrap() != hash {
bad_link_map.insert(instruction_id.clone(), hash);
parent_keys.insert(instruction_id, parent_key.to_vec());
return;
}

// Need to skip this when restoring a sum tree
if !skip_sum_checks {
if node.sum().unwrap() != sum {
bad_link_map.insert(instruction_id.clone(), hash);
parent_keys.insert(instruction_id, parent_key.to_vec());
return;
}
if !skip_sum_checks && node.sum().unwrap() != sum {
bad_link_map.insert(instruction_id.clone(), hash);
parent_keys.insert(instruction_id, parent_key.to_vec());
return;
}

// TODO: check child heights
Expand Down
21 changes: 10 additions & 11 deletions merk/src/merk/restore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,11 @@
use std::collections::BTreeMap;

use grovedb_costs::cost_return_on_error;
use grovedb_storage::{Batch, StorageContext};

use crate::{
merk,
merk::{committer::MerkCommitter, MerkSource},
merk::{MerkSource},
proofs::{
chunk::{
chunk::{LEFT, RIGHT},
Expand All @@ -47,7 +46,7 @@ use crate::{
tree::{execute, Child, Tree as ProofTree},
Node, Op,
},
tree::{combine_hash, kv::ValueDefinedCostType, NoopCommit, RefWalker, TreeNode},
tree::{combine_hash, kv::ValueDefinedCostType, RefWalker, TreeNode},
CryptoHash, Error,
Error::{CostsError, StorageError},
Link, Merk,
Expand Down Expand Up @@ -96,15 +95,15 @@ impl<'db, S: StorageContext<'db>> Restorer<S> {
.ok_or(Error::ChunkRestoringError(ChunkError::UnexpectedChunk))?;

let mut parent_key_value_hash: Option<CryptoHash> = None;
if (chunk_id.len() == 0) {
parent_key_value_hash = self.parent_key_value_hash.clone();
if chunk_id.is_empty() {
parent_key_value_hash = self.parent_key_value_hash;
}
let chunk_tree = Self::verify_chunk(chunk, expected_root_hash, &parent_key_value_hash)?;

let mut root_traversal_instruction = string_as_traversal_instruction(&chunk_id)?;

if root_traversal_instruction.is_empty() {
self.merk.set_base_root_key(Some(chunk_tree.key().to_vec()));
let _ = self.merk.set_base_root_key(Some(chunk_tree.key().to_vec()));
} else {
// every non root chunk has some associated parent with an placeholder link
// here we update the placeholder link to represent the true data
Expand Down Expand Up @@ -185,9 +184,9 @@ impl<'db, S: StorageContext<'db>> Restorer<S> {
debug_assert_eq!(chunk_len, ((kv_count + hash_count) * 2) - 1);

// chunk structure verified, next verify root hash
let parent_key_value_hash = match parent_key_value_hash_opt {
match parent_key_value_hash_opt {
Some(val_hash) => {
let combined_hash = combine_hash(&val_hash, &tree.hash().unwrap()).unwrap();
let combined_hash = combine_hash(val_hash, &tree.hash().unwrap()).unwrap();
if &combined_hash != expected_root_hash {
return Err(Error::ChunkRestoringError(ChunkError::InvalidChunkProof(
"chunk doesn't match expected root hash",
Expand Down Expand Up @@ -408,14 +407,14 @@ impl<'db, S: StorageContext<'db>> Restorer<S> {
}

// get the latest version of the root node
self.merk
let _ = self.merk
.load_base_root(None::<&fn(&[u8]) -> Option<ValueDefinedCostType>>);

// if height values are wrong, rewrite height
if self.verify_height().is_err() {
self.rewrite_heights();
let _ = self.rewrite_heights();
// update the root node after height rewrite
self.merk
let _ = self.merk
.load_base_root(None::<&fn(&[u8]) -> Option<ValueDefinedCostType>>);
}

Expand Down
Loading

0 comments on commit 4c63644

Please sign in to comment.