Skip to content

Commit

Permalink
update seek count and loaded bytes sizes
Browse files Browse the repository at this point in the history
  • Loading branch information
fominok committed Aug 29, 2024
1 parent 214c401 commit 0866faf
Show file tree
Hide file tree
Showing 11 changed files with 56 additions and 48 deletions.
10 changes: 5 additions & 5 deletions costs/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,11 @@ pub type ChildrenSizesWithIsSumTree = Option<(
#[derive(Debug, Default, Eq, PartialEq, Clone)]
pub struct OperationCost {
/// How many storage_cost seeks were done.
pub seek_count: u16,
pub seek_count: u32,
/// Storage cost of the operation.
pub storage_cost: StorageCost,
/// How many bytes were loaded from hard drive.
pub storage_loaded_bytes: u32,
pub storage_loaded_bytes: u64,
/// How many times node hashing was done (for merkelized tree).
pub hash_node_calls: u32,
}
Expand All @@ -100,7 +100,7 @@ impl OperationCost {

/// Helper function to build default `OperationCost` with different
/// `seek_count`.
pub fn with_seek_count(seek_count: u16) -> Self {
pub fn with_seek_count(seek_count: u32) -> Self {
OperationCost {
seek_count,
..Default::default()
Expand All @@ -121,7 +121,7 @@ impl OperationCost {

/// Helper function to build default `OperationCost` with different
/// `storage_loaded_bytes`.
pub fn with_storage_loaded_bytes(storage_loaded_bytes: u32) -> Self {
pub fn with_storage_loaded_bytes(storage_loaded_bytes: u64) -> Self {
OperationCost {
storage_loaded_bytes,
..Default::default()
Expand Down Expand Up @@ -527,7 +527,7 @@ mod tests {
let loaded_value = b"ayylmao";
let costs_ctx = loaded_value.wrap_fn_cost(|x| OperationCost {
seek_count: 1,
storage_loaded_bytes: x.len() as u32,
storage_loaded_bytes: x.len() as u64,
..Default::default()
});
assert_eq!(
Expand Down
6 changes: 3 additions & 3 deletions grovedb/src/element/get.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ impl Element {
key_ref.len() as u32,
value.as_ref().unwrap().len() as u32,
false,
)
) as u64
}
Some(Element::SumItem(_, flags)) => {
let cost_size = SUM_ITEM_COST_SIZE;
Expand All @@ -173,7 +173,7 @@ impl Element {
});
let value_len = cost_size + flags_len;
cost.storage_loaded_bytes =
KV::node_value_byte_cost_size(key_ref.len() as u32, value_len, false)
KV::node_value_byte_cost_size(key_ref.len() as u32, value_len, false) as u64
}
Some(Element::Tree(_, flags)) | Some(Element::SumTree(_, _, flags)) => {
let tree_cost_size = if element.as_ref().unwrap().is_sum_tree() {
Expand All @@ -191,7 +191,7 @@ impl Element {
key_ref.len() as u32,
value_len,
false,
)
) as u64
}
None => {}
}
Expand Down
12 changes: 8 additions & 4 deletions grovedb/src/estimated_costs/average_case_costs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ impl GroveDb {
key.max_length() as u32,
HASH_LENGTH as u32,
is_sum_tree,
);
) as u64;
}
}
*cost += S::get_storage_context_cost(path.as_vec());
Expand Down Expand Up @@ -434,7 +434,7 @@ impl GroveDb {
in_parent_tree_using_sums,
);
cost.seek_count += 1;
cost.storage_loaded_bytes += value_size;
cost.storage_loaded_bytes += value_size as u64;
*cost += S::get_storage_context_cost(path.as_vec());
Ok(())
}
Expand Down Expand Up @@ -561,8 +561,12 @@ impl GroveDb {
estimated_element_size,
in_parent_tree_using_sums,
);
cost.seek_count += 1 + estimated_references_sizes.len() as u16;
cost.storage_loaded_bytes += value_size + estimated_references_sizes.iter().sum::<u32>();
cost.seek_count += 1 + estimated_references_sizes.len() as u32;
cost.storage_loaded_bytes += value_size as u64
+ estimated_references_sizes
.iter()
.map(|x| *x as u64)
.sum::<u64>();
*cost += S::get_storage_context_cost(path.as_vec());
Ok(())
}
Expand Down
9 changes: 5 additions & 4 deletions grovedb/src/estimated_costs/worst_case_costs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ impl GroveDb {
key.max_length() as u32,
HASH_LENGTH as u32,
is_sum_tree,
);
) as u64;
}
}
*cost += S::get_storage_context_cost(path.as_vec());
Expand Down Expand Up @@ -407,7 +407,7 @@ impl GroveDb {
in_parent_tree_using_sums,
);
cost.seek_count += 1;
cost.storage_loaded_bytes += value_size;
cost.storage_loaded_bytes += value_size as u64;
*cost += S::get_storage_context_cost(path.as_vec());
Ok(())
}
Expand Down Expand Up @@ -498,8 +498,9 @@ impl GroveDb {
max_element_size,
in_parent_tree_using_sums,
);
cost.seek_count += 1 + max_references_sizes.len() as u16;
cost.storage_loaded_bytes += value_size + max_references_sizes.iter().sum::<u32>();
cost.seek_count += 1 + max_references_sizes.len() as u32;
cost.storage_loaded_bytes +=
value_size as u64 + max_references_sizes.iter().map(|x| *x as u64).sum::<u64>();
*cost += S::get_storage_context_cost(path.as_vec());
Ok(())
}
Expand Down
8 changes: 4 additions & 4 deletions merk/src/estimated_costs/average_case_costs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ pub fn add_average_case_get_merk_node(
not_prefixed_key_len,
approximate_element_size,
is_sum_tree,
);
) as u64;
Ok(())
}

Expand All @@ -331,7 +331,7 @@ pub fn add_average_case_merk_has_value(
estimated_element_size: u32,
) {
cost.seek_count += 1;
cost.storage_loaded_bytes += not_prefixed_key_len + estimated_element_size;
cost.storage_loaded_bytes += (not_prefixed_key_len + estimated_element_size) as u64;
}

#[cfg(feature = "full")]
Expand Down Expand Up @@ -420,7 +420,7 @@ pub fn add_average_case_merk_propagate(
// we can get about 1 rotation, if there are more than 2 levels
nodes_updated += 1;
}
cost.seek_count += nodes_updated as u16;
cost.seek_count += nodes_updated as u32;

Check warning on line 423 in merk/src/estimated_costs/average_case_costs.rs

View workflow job for this annotation

GitHub Actions / clippy

casting to the same type is unnecessary (`u32` -> `u32`)

warning: casting to the same type is unnecessary (`u32` -> `u32`) --> merk/src/estimated_costs/average_case_costs.rs:423:24 | 423 | cost.seek_count += nodes_updated as u32; | ^^^^^^^^^^^^^^^^^^^^ help: try: `nodes_updated` | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_cast = note: `#[warn(clippy::unnecessary_cast)]` on by default

cost.hash_node_calls += nodes_updated * 2;

Expand Down Expand Up @@ -664,6 +664,6 @@ pub fn add_average_case_merk_propagate(
total_loaded_bytes as u32
}
}
};
} as u64;
Ok(())
}
19 changes: 11 additions & 8 deletions merk/src/estimated_costs/worst_case_costs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,8 @@ pub fn add_worst_case_get_merk_node(
// To write a node to disk, the left link, right link and kv nodes are encoded.
// worst case, the node has both the left and right link present.
cost.storage_loaded_bytes +=
TreeNode::worst_case_encoded_tree_size(not_prefixed_key_len, max_element_size, is_sum_node);
TreeNode::worst_case_encoded_tree_size(not_prefixed_key_len, max_element_size, is_sum_node)
as u64;
Ok(())
}

Expand All @@ -94,7 +95,7 @@ pub fn add_worst_case_merk_has_value(
max_element_size: u32,
) {
cost.seek_count += 1;
cost.storage_loaded_bytes += not_prefixed_key_len + max_element_size;
cost.storage_loaded_bytes += not_prefixed_key_len as u64 + max_element_size as u64;
}

#[cfg(feature = "full")]
Expand Down Expand Up @@ -208,8 +209,9 @@ pub fn add_worst_case_merk_propagate(

// todo: verify these numbers
cost.storage_cost.replaced_bytes += nodes_updated * MERK_BIGGEST_VALUE_SIZE;
cost.storage_loaded_bytes += nodes_updated * (MERK_BIGGEST_VALUE_SIZE + MERK_BIGGEST_KEY_SIZE);
cost.seek_count += nodes_updated as u16;
cost.storage_loaded_bytes +=
(nodes_updated * (MERK_BIGGEST_VALUE_SIZE + MERK_BIGGEST_KEY_SIZE)) as u64;
cost.seek_count += nodes_updated as u32;

Check warning on line 214 in merk/src/estimated_costs/worst_case_costs.rs

View workflow job for this annotation

GitHub Actions / clippy

casting to the same type is unnecessary (`u32` -> `u32`)

warning: casting to the same type is unnecessary (`u32` -> `u32`) --> merk/src/estimated_costs/worst_case_costs.rs:214:24 | 214 | cost.seek_count += nodes_updated as u32; | ^^^^^^^^^^^^^^^^^^^^ help: try: `nodes_updated` | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_cast
cost.hash_node_calls += nodes_updated * 2;
Ok(())
}
Expand All @@ -220,8 +222,8 @@ pub fn add_worst_case_cost_for_is_empty_tree_except(
cost: &mut OperationCost,
except_keys_count: u16,
) {
cost.seek_count += except_keys_count + 1;
cost.storage_loaded_bytes += MAX_PREFIXED_KEY_SIZE * (except_keys_count as u32 + 1);
cost.seek_count += except_keys_count as u32 + 1;
cost.storage_loaded_bytes += MAX_PREFIXED_KEY_SIZE * (except_keys_count as u64 + 1);
}

/// Add average case cost for is_empty_tree_except
Expand All @@ -231,6 +233,7 @@ pub fn add_average_case_cost_for_is_empty_tree_except(
except_keys_count: u16,
estimated_prefixed_key_size: u32,
) {
cost.seek_count += except_keys_count + 1;
cost.storage_loaded_bytes += estimated_prefixed_key_size * (except_keys_count as u32 + 1);
cost.seek_count += except_keys_count as u32 + 1;
cost.storage_loaded_bytes +=
estimated_prefixed_key_size as u64 * (except_keys_count as u64 + 1);
}
2 changes: 1 addition & 1 deletion merk/src/merk/defaults.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,4 @@ pub const ROOT_KEY_KEY: &[u8] = b"r";
#[cfg(feature = "full")]
pub const MAX_UPDATE_VALUE_BASED_ON_COSTS_TIMES: u8 = 8;
#[cfg(feature = "full")]
pub const MAX_PREFIXED_KEY_SIZE: u32 = 288;
pub const MAX_PREFIXED_KEY_SIZE: u64 = 288;
8 changes: 4 additions & 4 deletions storage/src/rocksdb_storage/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ impl RocksDbStorage {
)
.map(|x| x.len() as u32)
.unwrap_or(0);
cost.storage_loaded_bytes += value_len;
cost.storage_loaded_bytes += value_len as u64;
let key_len = key.len() as u32;
// todo: improve deletion
pending_costs.storage_cost.removed_bytes += BasicStorageRemoval(
Expand All @@ -307,7 +307,7 @@ impl RocksDbStorage {
)
.map(|x| x.len() as u32)
.unwrap_or(0);
cost.storage_loaded_bytes += value_len;
cost.storage_loaded_bytes += value_len as u64;

let key_len = key.len() as u32;
// todo: improve deletion
Expand Down Expand Up @@ -337,7 +337,7 @@ impl RocksDbStorage {
)
.map(|x| x.len() as u32)
.unwrap_or(0);
cost.storage_loaded_bytes += value_len;
cost.storage_loaded_bytes += value_len as u64;

let key_len = key.len() as u32;
// todo: improve deletion
Expand Down Expand Up @@ -367,7 +367,7 @@ impl RocksDbStorage {
)
.map(|x| x.len() as u32)
.unwrap_or(0);
cost.storage_loaded_bytes += value_len;
cost.storage_loaded_bytes += value_len as u64;

let key_len = key.len() as u32;
// todo: improve deletion
Expand Down
8 changes: 4 additions & 4 deletions storage/src/rocksdb_storage/storage_context/context_no_tx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbStorageContext<'db> {
.as_ref()
.ok()
.and_then(Option::as_ref)
.map(|x| x.len() as u32)
.map(|x| x.len() as u64)
.unwrap_or(0),
..Default::default()
})
Expand All @@ -225,7 +225,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbStorageContext<'db> {
.as_ref()
.ok()
.and_then(Option::as_ref)
.map(|x| x.len() as u32)
.map(|x| x.len() as u64)
.unwrap_or(0),
..Default::default()
})
Expand All @@ -241,7 +241,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbStorageContext<'db> {
.as_ref()
.ok()
.and_then(Option::as_ref)
.map(|x| x.len() as u32)
.map(|x| x.len() as u64)
.unwrap_or(0),
..Default::default()
})
Expand All @@ -257,7 +257,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbStorageContext<'db> {
.as_ref()
.ok()
.and_then(Option::as_ref)
.map(|x| x.len() as u32)
.map(|x| x.len() as u64)
.unwrap_or(0),
..Default::default()
})
Expand Down
8 changes: 4 additions & 4 deletions storage/src/rocksdb_storage/storage_context/context_tx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbTransactionContext<'db> {
.as_ref()
.ok()
.and_then(Option::as_ref)
.map(|x| x.len() as u32)
.map(|x| x.len() as u64)
.unwrap_or(0),
..Default::default()
})
Expand All @@ -256,7 +256,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbTransactionContext<'db> {
.as_ref()
.ok()
.and_then(Option::as_ref)
.map(|x| x.len() as u32)
.map(|x| x.len() as u64)
.unwrap_or(0),
..Default::default()
})
Expand All @@ -272,7 +272,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbTransactionContext<'db> {
.as_ref()
.ok()
.and_then(Option::as_ref)
.map(|x| x.len() as u32)
.map(|x| x.len() as u64)
.unwrap_or(0),
..Default::default()
})
Expand All @@ -288,7 +288,7 @@ impl<'db> StorageContext<'db> for PrefixedRocksDbTransactionContext<'db> {
.as_ref()
.ok()
.and_then(Option::as_ref)
.map(|x| x.len() as u32)
.map(|x| x.len() as u64)
.unwrap_or(0),
..Default::default()
})
Expand Down
14 changes: 7 additions & 7 deletions storage/src/rocksdb_storage/storage_context/raw_iterator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ use crate::{
};

/// 256 bytes for the key and 32 bytes for the prefix
const MAX_PREFIXED_KEY_LENGTH: u32 = 256 + 32;
const MAX_PREFIXED_KEY_LENGTH: u64 = 256 + 32;

/// Raw iterator over prefixed storage_cost.
pub struct PrefixedRocksDbRawIterator<I> {
Expand Down Expand Up @@ -91,7 +91,7 @@ impl<'a> RawIterator for PrefixedRocksDbRawIterator<DBRawIteratorWithThreadMode<

let value = if self.valid().unwrap_add_cost(&mut cost) {
self.raw_iterator.value().map(|v| {
cost.storage_loaded_bytes += v.len() as u32;
cost.storage_loaded_bytes += v.len() as u64;
v
})
} else {
Expand All @@ -109,7 +109,7 @@ impl<'a> RawIterator for PrefixedRocksDbRawIterator<DBRawIteratorWithThreadMode<
// Even if we truncate prefix, loaded cost should be maximum for the whole
// function
if k.starts_with(&self.prefix) {
cost.storage_loaded_bytes += k.len() as u32;
cost.storage_loaded_bytes += k.len() as u64;
Some(k.split_at(self.prefix.len()).1)
} else {
// we can think of the underlying storage layer as stacked blocks
Expand Down Expand Up @@ -142,7 +142,7 @@ impl<'a> RawIterator for PrefixedRocksDbRawIterator<DBRawIteratorWithThreadMode<
.key()
.map(|k| {
if k.starts_with(&self.prefix) {
cost.storage_loaded_bytes += k.len() as u32;
cost.storage_loaded_bytes += k.len() as u64;
true
} else {
// we can think of the underlying storage layer as stacked blocks
Expand Down Expand Up @@ -212,7 +212,7 @@ impl<'a> RawIterator for PrefixedRocksDbRawIterator<DBRawIteratorWithThreadMode<

let value = if self.valid().unwrap_add_cost(&mut cost) {
self.raw_iterator.value().map(|v| {
cost.storage_loaded_bytes += v.len() as u32;
cost.storage_loaded_bytes += v.len() as u64;
v
})
} else {
Expand All @@ -230,7 +230,7 @@ impl<'a> RawIterator for PrefixedRocksDbRawIterator<DBRawIteratorWithThreadMode<
// Even if we truncate prefix, loaded cost should be maximum for the whole
// function
if k.starts_with(&self.prefix) {
cost.storage_loaded_bytes += k.len() as u32;
cost.storage_loaded_bytes += k.len() as u64;
Some(k.split_at(self.prefix.len()).1)
} else {
// we can think of the underlying storage layer as stacked blocks
Expand Down Expand Up @@ -263,7 +263,7 @@ impl<'a> RawIterator for PrefixedRocksDbRawIterator<DBRawIteratorWithThreadMode<
.key()
.map(|k| {
if k.starts_with(&self.prefix) {
cost.storage_loaded_bytes += k.len() as u32;
cost.storage_loaded_bytes += k.len() as u64;
true
} else {
// we can think of the underlying storage layer as stacked blocks
Expand Down

0 comments on commit 0866faf

Please sign in to comment.