diff --git a/costs/src/context.rs b/costs/src/context.rs index 8c0faffd..374466ef 100644 --- a/costs/src/context.rs +++ b/costs/src/context.rs @@ -116,6 +116,15 @@ impl CostResult { pub fn cost_as_result(self) -> Result { self.value.map(|_| self.cost) } + + /// Call the provided function on success without altering result or cost. + pub fn for_ok(self, f: impl FnOnce(&T)) -> CostResult { + if let Ok(x) = &self.value { + f(x) + } + + self + } } impl CostResult, E> { diff --git a/grovedb/src/merk_cache.rs b/grovedb/src/merk_cache.rs index b691fbd0..6053a833 100644 --- a/grovedb/src/merk_cache.rs +++ b/grovedb/src/merk_cache.rs @@ -2,18 +2,18 @@ //! after usage automatically. use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{btree_map::Entry, BTreeMap, HashSet}, mem::{self, MaybeUninit}, ops::Deref, }; use grovedb_costs::{cost_return_on_error, CostResult, CostsExt}; -use grovedb_merk::Merk; +use grovedb_merk::{Merk, MerkOptions}; use grovedb_path::SubtreePath; use grovedb_storage::{rocksdb_storage::PrefixedRocksDbTransactionContext, StorageBatch}; use grovedb_version::version::GroveVersion; -use crate::{Error, GroveDb, Transaction}; +use crate::{Element, Error, GroveDb, Transaction}; type TxMerk<'db> = Merk>; @@ -29,7 +29,7 @@ pub(crate) struct MerkCache<'db, 'b, B> { tx: &'db Transaction<'db>, batch: &'db StorageBatch, version: &'db GroveVersion, - inner: HashMap, TxMerk<'db>>, + merks: BTreeMap, TxMerk<'db>>, } impl<'db, 'b, B: AsRef<[u8]>> MerkCache<'db, 'b, B> { @@ -44,7 +44,7 @@ impl<'db, 'b, B: AsRef<[u8]>> MerkCache<'db, 'b, B> { tx, batch, version, - inner: Default::default(), + merks: Default::default(), } } @@ -57,7 +57,7 @@ impl<'db, 'b, B: AsRef<[u8]>> MerkCache<'db, 'b, B> { ) -> CostResult<&'s mut TxMerk<'db>, Error> { let mut cost = Default::default(); - match self.inner.entry(path) { + match self.merks.entry(path) { Entry::Occupied(e) => Ok(e.into_mut()).wrap_with_cost(cost), Entry::Vacant(e) => { let merk = cost_return_on_error!( @@ -100,12 +100,13 @@ impl<'db, 'b, B: AsRef<[u8]>> MerkCache<'db, 'b, B> { // mutable references. The mandatory keys' uniqueness check above makes // sure no overlapping memory will be referenced. let merk_ref = unsafe { - MerkHandle( - (cost_return_on_error!(&mut cost, self.get_merk_mut_internal(path)) + MerkHandle { + merk: (cost_return_on_error!(&mut cost, self.get_merk_mut_internal(path)) as *mut TxMerk<'db>) .as_mut::<'s>() .expect("not a null pointer"), - ) + version: &self.version, + } }; result_uninit[i].write(merk_ref); } @@ -123,7 +124,10 @@ impl<'db, 'b, B: AsRef<[u8]>> MerkCache<'db, 'b, B> { } /// Handle to a cached Merk. -pub(crate) struct MerkHandle<'db, 'c>(&'c mut TxMerk<'db>); +pub(crate) struct MerkHandle<'db, 'c> { + merk: &'c mut TxMerk<'db>, + version: &'db GroveVersion, +} /// It is allowed to dereference `MerkHandle` to regular Merks but in a /// non-mutable way since we want to track what have been done to those Merks. @@ -131,13 +135,20 @@ impl<'db, 'c> Deref for MerkHandle<'db, 'c> { type Target = TxMerk<'db>; fn deref(&self) -> &Self::Target { - &self.0 + &self.merk } } impl<'db, 'c> MerkHandle<'db, 'c> { - pub(crate) fn insert(&mut self) { - todo!() + pub(crate) fn insert( + &mut self, + key: impl AsRef<[u8]>, + element: Element, + options: Option, + ) -> CostResult<(), Error> { + element + .insert(self.merk, key, options, self.version) + .for_ok(|_| todo!()) } } diff --git a/path/src/subtree_path.rs b/path/src/subtree_path.rs index 437f911a..b6132531 100644 --- a/path/src/subtree_path.rs +++ b/path/src/subtree_path.rs @@ -34,7 +34,10 @@ //! combined with it's various `From` implementations it can cover slices, owned //! subtree paths and other path references if use as generic [Into]. -use std::hash::{Hash, Hasher}; +use std::{ + cmp, + hash::{Hash, Hasher}, +}; use crate::{ subtree_path_builder::{SubtreePathBuilder, SubtreePathRelative}, @@ -78,6 +81,48 @@ where } } +/// First and foremost, the order of subtree paths is dictated by their lengths. +/// Therefore, those subtrees closer to the root will come first. The rest it +/// can guarantee is to be free of false equality; however, seemingly unrelated +/// subtrees can come one after another if they share the same length, which was +/// (not) done for performance reasons. +impl<'bl, 'br, BL, BR> PartialOrd> for SubtreePath<'bl, BL> +where + BL: AsRef<[u8]>, + BR: AsRef<[u8]>, +{ + fn partial_cmp(&self, other: &SubtreePath<'br, BR>) -> Option { + let iter_a = self.clone().into_reverse_iter(); + let iter_b = other.clone().into_reverse_iter(); + + Some( + iter_a + .len() + .cmp(&iter_b.len()) + .then_with(|| iter_a.cmp(iter_b)), + ) + } +} + +impl<'bl, 'br, BL, BR> PartialOrd> for SubtreePath<'bl, BL> +where + BL: AsRef<[u8]>, + BR: AsRef<[u8]>, +{ + fn partial_cmp(&self, other: &SubtreePathBuilder<'br, BR>) -> Option { + self.partial_cmp(&SubtreePath::from(other)) + } +} + +impl<'bl, BL> Ord for SubtreePath<'bl, BL> +where + BL: AsRef<[u8]>, +{ + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.partial_cmp(other).expect("order is totally defined") + } +} + impl<'b, B: AsRef<[u8]>> Eq for SubtreePath<'b, B> {} impl<'b, B> From> for SubtreePath<'b, B> { @@ -274,4 +319,29 @@ mod tests { assert_eq!(as_vec, reference_vec); assert_eq!(parent.len(), reference_vec.len()); } + + #[test] + fn ordering() { + let path_a: SubtreePath<_> = (&[b"one" as &[u8], b"two", b"three"]).into(); + let path_b = path_a.derive_owned_with_child(b"four"); + let path_c = path_a.derive_owned_with_child(b"notfour"); + let (path_d_parent, _) = path_a.derive_parent().unwrap(); + let path_d = path_d_parent.derive_owned_with_child(b"three"); + + // Same lengths for different paths don't make them equal: + assert!(!matches!( + SubtreePath::from(&path_b).cmp(&SubtreePath::from(&path_c)), + cmp::Ordering::Equal + )); + + // Equal paths made the same way are equal: + assert!(matches!( + path_a.cmp(&SubtreePath::from(&path_d)), + cmp::Ordering::Equal + )); + + // Longer paths are "greater than": + assert!(path_a < path_b); + assert!(path_a < path_c); + } }